Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:07

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*
0004  * Cobham Gaisler GRSPW/GRSPW2 SpaceWire Kernel Library Interface for RTEMS.
0005  *
0006  * This driver can be used to implement a standard I/O system "char"-driver
0007  * or used directly.
0008  *
0009  * COPYRIGHT (c) 2011
0010  * Cobham Gaisler AB
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 #include <rtems.h>
0035 #include <bsp.h>
0036 #include <rtems/libio.h>
0037 #include <stdlib.h>
0038 #include <stdio.h>
0039 #include <string.h>
0040 #include <assert.h>
0041 #include <ctype.h>
0042 #include <rtems/bspIo.h>
0043 
0044 #include <drvmgr/drvmgr.h>
0045 #include <grlib/ambapp.h>
0046 #include <grlib/ambapp_bus.h>
0047 #include <grlib/grspw_pkt.h>
0048 
0049 #include <grlib/grlib_impl.h>
0050 
0051 /*#define STATIC*/
0052 #define STATIC static
0053 
0054 /*#define GRSPW_DBG(args...) printk(args)*/
0055 #define GRSPW_DBG(args...)
0056 
0057 struct grspw_dma_regs {
0058     volatile unsigned int ctrl; /* DMA Channel Control */
0059     volatile unsigned int rxmax;    /* RX Max Packet Length */
0060     volatile unsigned int txdesc;   /* TX Descriptor Base/Current */
0061     volatile unsigned int rxdesc;   /* RX Descriptor Base/Current */
0062     volatile unsigned int addr; /* Address Register */
0063     volatile unsigned int resv[3];
0064 };
0065 
0066 struct grspw_regs {
0067     volatile unsigned int ctrl;
0068     volatile unsigned int status;
0069     volatile unsigned int nodeaddr;
0070     volatile unsigned int clkdiv;
0071     volatile unsigned int destkey;
0072     volatile unsigned int time;
0073     volatile unsigned int timer;    /* Used only in GRSPW1 */
0074     volatile unsigned int resv1;
0075 
0076     /* DMA Registers, ctrl.NCH determines number of ports, 
0077      * up to 4 channels are supported
0078      */
0079     struct grspw_dma_regs dma[4];
0080 
0081     volatile unsigned int icctrl;
0082     volatile unsigned int icrx;
0083     volatile unsigned int icack;
0084     volatile unsigned int ictimeout;
0085     volatile unsigned int ictickomask;
0086     volatile unsigned int icaamask;
0087     volatile unsigned int icrlpresc;
0088     volatile unsigned int icrlisr;
0089     volatile unsigned int icrlintack;
0090     volatile unsigned int resv2;
0091     volatile unsigned int icisr;
0092     volatile unsigned int resv3;
0093 };
0094 
0095 /* GRSPW - Control Register - 0x00 */
0096 #define GRSPW_CTRL_RA_BIT   31
0097 #define GRSPW_CTRL_RX_BIT   30
0098 #define GRSPW_CTRL_RC_BIT   29
0099 #define GRSPW_CTRL_NCH_BIT  27
0100 #define GRSPW_CTRL_PO_BIT   26
0101 #define GRSPW_CTRL_CC_BIT   25
0102 #define GRSPW_CTRL_ID_BIT   24
0103 #define GRSPW_CTRL_LE_BIT   22
0104 #define GRSPW_CTRL_PS_BIT   21
0105 #define GRSPW_CTRL_NP_BIT   20
0106 #define GRSPW_CTRL_RD_BIT   17
0107 #define GRSPW_CTRL_RE_BIT   16
0108 #define GRSPW_CTRL_TF_BIT   12
0109 #define GRSPW_CTRL_TR_BIT   11
0110 #define GRSPW_CTRL_TT_BIT   10
0111 #define GRSPW_CTRL_LI_BIT   9
0112 #define GRSPW_CTRL_TQ_BIT   8
0113 #define GRSPW_CTRL_RS_BIT   6
0114 #define GRSPW_CTRL_PM_BIT   5
0115 #define GRSPW_CTRL_TI_BIT   4
0116 #define GRSPW_CTRL_IE_BIT   3
0117 #define GRSPW_CTRL_AS_BIT   2
0118 #define GRSPW_CTRL_LS_BIT   1
0119 #define GRSPW_CTRL_LD_BIT   0
0120 
0121 #define GRSPW_CTRL_RA   (1<<GRSPW_CTRL_RA_BIT)
0122 #define GRSPW_CTRL_RX   (1<<GRSPW_CTRL_RX_BIT)
0123 #define GRSPW_CTRL_RC   (1<<GRSPW_CTRL_RC_BIT)
0124 #define GRSPW_CTRL_NCH  (0x3<<GRSPW_CTRL_NCH_BIT)
0125 #define GRSPW_CTRL_PO   (1<<GRSPW_CTRL_PO_BIT)
0126 #define GRSPW_CTRL_CC   (1<<GRSPW_CTRL_CC_BIT)
0127 #define GRSPW_CTRL_ID   (1<<GRSPW_CTRL_ID_BIT)
0128 #define GRSPW_CTRL_LE   (1<<GRSPW_CTRL_LE_BIT)
0129 #define GRSPW_CTRL_PS   (1<<GRSPW_CTRL_PS_BIT)
0130 #define GRSPW_CTRL_NP   (1<<GRSPW_CTRL_NP_BIT)
0131 #define GRSPW_CTRL_RD   (1<<GRSPW_CTRL_RD_BIT)
0132 #define GRSPW_CTRL_RE   (1<<GRSPW_CTRL_RE_BIT)
0133 #define GRSPW_CTRL_TF   (1<<GRSPW_CTRL_TF_BIT)
0134 #define GRSPW_CTRL_TR   (1<<GRSPW_CTRL_TR_BIT)
0135 #define GRSPW_CTRL_TT   (1<<GRSPW_CTRL_TT_BIT)
0136 #define GRSPW_CTRL_LI   (1<<GRSPW_CTRL_LI_BIT)
0137 #define GRSPW_CTRL_TQ   (1<<GRSPW_CTRL_TQ_BIT)
0138 #define GRSPW_CTRL_RS   (1<<GRSPW_CTRL_RS_BIT)
0139 #define GRSPW_CTRL_PM   (1<<GRSPW_CTRL_PM_BIT)
0140 #define GRSPW_CTRL_TI   (1<<GRSPW_CTRL_TI_BIT)
0141 #define GRSPW_CTRL_IE   (1<<GRSPW_CTRL_IE_BIT)
0142 #define GRSPW_CTRL_AS   (1<<GRSPW_CTRL_AS_BIT)
0143 #define GRSPW_CTRL_LS   (1<<GRSPW_CTRL_LS_BIT)
0144 #define GRSPW_CTRL_LD   (1<<GRSPW_CTRL_LD_BIT)
0145 
0146 #define GRSPW_CTRL_IRQSRC_MASK \
0147     (GRSPW_CTRL_LI | GRSPW_CTRL_TQ)
0148 #define GRSPW_ICCTRL_IRQSRC_MASK \
0149     (GRSPW_ICCTRL_TQ | GRSPW_ICCTRL_AQ | GRSPW_ICCTRL_IQ)
0150 
0151 
0152 /* GRSPW - Status Register - 0x04 */
0153 #define GRSPW_STS_LS_BIT    21
0154 #define GRSPW_STS_AP_BIT    9
0155 #define GRSPW_STS_EE_BIT    8
0156 #define GRSPW_STS_IA_BIT    7
0157 #define GRSPW_STS_WE_BIT    6   /* GRSPW1 */
0158 #define GRSPW_STS_PE_BIT    4
0159 #define GRSPW_STS_DE_BIT    3
0160 #define GRSPW_STS_ER_BIT    2
0161 #define GRSPW_STS_CE_BIT    1
0162 #define GRSPW_STS_TO_BIT    0
0163 
0164 #define GRSPW_STS_LS    (0x7<<GRSPW_STS_LS_BIT)
0165 #define GRSPW_STS_AP    (1<<GRSPW_STS_AP_BIT)
0166 #define GRSPW_STS_EE    (1<<GRSPW_STS_EE_BIT)
0167 #define GRSPW_STS_IA    (1<<GRSPW_STS_IA_BIT)
0168 #define GRSPW_STS_WE    (1<<GRSPW_STS_WE_BIT)   /* GRSPW1 */
0169 #define GRSPW_STS_PE    (1<<GRSPW_STS_PE_BIT)
0170 #define GRSPW_STS_DE    (1<<GRSPW_STS_DE_BIT)
0171 #define GRSPW_STS_ER    (1<<GRSPW_STS_ER_BIT)
0172 #define GRSPW_STS_CE    (1<<GRSPW_STS_CE_BIT)
0173 #define GRSPW_STS_TO    (1<<GRSPW_STS_TO_BIT)
0174 
0175 /* GRSPW - Default Address Register - 0x08 */
0176 #define GRSPW_DEF_ADDR_BIT  0
0177 #define GRSPW_DEF_MASK_BIT  8
0178 #define GRSPW_DEF_ADDR  (0xff<<GRSPW_DEF_ADDR_BIT)
0179 #define GRSPW_DEF_MASK  (0xff<<GRSPW_DEF_MASK_BIT)
0180 
0181 /* GRSPW - Clock Divisor Register - 0x0C */
0182 #define GRSPW_CLKDIV_START_BIT  8
0183 #define GRSPW_CLKDIV_RUN_BIT    0
0184 #define GRSPW_CLKDIV_START  (0xff<<GRSPW_CLKDIV_START_BIT)
0185 #define GRSPW_CLKDIV_RUN    (0xff<<GRSPW_CLKDIV_RUN_BIT)
0186 #define GRSPW_CLKDIV_MASK   (GRSPW_CLKDIV_START|GRSPW_CLKDIV_RUN)
0187 
0188 /* GRSPW - Destination key Register - 0x10 */
0189 #define GRSPW_DK_DESTKEY_BIT    0
0190 #define GRSPW_DK_DESTKEY    (0xff<<GRSPW_DK_DESTKEY_BIT)
0191 
0192 /* GRSPW - Time Register - 0x14 */
0193 #define GRSPW_TIME_CTRL_BIT 6
0194 #define GRSPW_TIME_CNT_BIT  0
0195 #define GRSPW_TIME_CTRL     (0x3<<GRSPW_TIME_CTRL_BIT)
0196 #define GRSPW_TIME_TCNT     (0x3f<<GRSPW_TIME_CNT_BIT)
0197 
0198 /* GRSPW - DMA Control Register - 0x20*N */
0199 #define GRSPW_DMACTRL_LE_BIT    16
0200 #define GRSPW_DMACTRL_SP_BIT    15
0201 #define GRSPW_DMACTRL_SA_BIT    14
0202 #define GRSPW_DMACTRL_EN_BIT    13
0203 #define GRSPW_DMACTRL_NS_BIT    12
0204 #define GRSPW_DMACTRL_RD_BIT    11
0205 #define GRSPW_DMACTRL_RX_BIT    10
0206 #define GRSPW_DMACTRL_AT_BIT    9
0207 #define GRSPW_DMACTRL_RA_BIT    8
0208 #define GRSPW_DMACTRL_TA_BIT    7
0209 #define GRSPW_DMACTRL_PR_BIT    6
0210 #define GRSPW_DMACTRL_PS_BIT    5
0211 #define GRSPW_DMACTRL_AI_BIT    4
0212 #define GRSPW_DMACTRL_RI_BIT    3
0213 #define GRSPW_DMACTRL_TI_BIT    2
0214 #define GRSPW_DMACTRL_RE_BIT    1
0215 #define GRSPW_DMACTRL_TE_BIT    0
0216 
0217 #define GRSPW_DMACTRL_LE    (1<<GRSPW_DMACTRL_LE_BIT)
0218 #define GRSPW_DMACTRL_SP    (1<<GRSPW_DMACTRL_SP_BIT)
0219 #define GRSPW_DMACTRL_SA    (1<<GRSPW_DMACTRL_SA_BIT)
0220 #define GRSPW_DMACTRL_EN    (1<<GRSPW_DMACTRL_EN_BIT)
0221 #define GRSPW_DMACTRL_NS    (1<<GRSPW_DMACTRL_NS_BIT)
0222 #define GRSPW_DMACTRL_RD    (1<<GRSPW_DMACTRL_RD_BIT)
0223 #define GRSPW_DMACTRL_RX    (1<<GRSPW_DMACTRL_RX_BIT)
0224 #define GRSPW_DMACTRL_AT    (1<<GRSPW_DMACTRL_AT_BIT)
0225 #define GRSPW_DMACTRL_RA    (1<<GRSPW_DMACTRL_RA_BIT)
0226 #define GRSPW_DMACTRL_TA    (1<<GRSPW_DMACTRL_TA_BIT)
0227 #define GRSPW_DMACTRL_PR    (1<<GRSPW_DMACTRL_PR_BIT)
0228 #define GRSPW_DMACTRL_PS    (1<<GRSPW_DMACTRL_PS_BIT)
0229 #define GRSPW_DMACTRL_AI    (1<<GRSPW_DMACTRL_AI_BIT)
0230 #define GRSPW_DMACTRL_RI    (1<<GRSPW_DMACTRL_RI_BIT)
0231 #define GRSPW_DMACTRL_TI    (1<<GRSPW_DMACTRL_TI_BIT)
0232 #define GRSPW_DMACTRL_RE    (1<<GRSPW_DMACTRL_RE_BIT)
0233 #define GRSPW_DMACTRL_TE    (1<<GRSPW_DMACTRL_TE_BIT)
0234 
0235 /* GRSPW - DMA Channel Max Packet Length Register - (0x20*N + 0x04) */
0236 #define GRSPW_DMARXLEN_MAX_BIT  0
0237 #define GRSPW_DMARXLEN_MAX  (0xffffff<<GRSPW_DMARXLEN_MAX_BIT)
0238 
0239 /* GRSPW - DMA Channel Address Register - (0x20*N + 0x10) */
0240 #define GRSPW_DMAADR_ADDR_BIT   0
0241 #define GRSPW_DMAADR_MASK_BIT   8
0242 #define GRSPW_DMAADR_ADDR   (0xff<<GRSPW_DMAADR_ADDR_BIT)
0243 #define GRSPW_DMAADR_MASK   (0xff<<GRSPW_DMAADR_MASK_BIT)
0244 
0245 /* GRSPW - Interrupt code receive register - 0xa4 */
0246 #define GRSPW_ICCTRL_INUM_BIT   27
0247 #define GRSPW_ICCTRL_IA_BIT 24
0248 #define GRSPW_ICCTRL_LE_BIT 23
0249 #define GRSPW_ICCTRL_PR_BIT 22
0250 #define GRSPW_ICCTRL_DQ_BIT 21 /* never used */
0251 #define GRSPW_ICCTRL_TQ_BIT 20
0252 #define GRSPW_ICCTRL_AQ_BIT 19
0253 #define GRSPW_ICCTRL_IQ_BIT 18
0254 #define GRSPW_ICCTRL_IR_BIT 17
0255 #define GRSPW_ICCTRL_IT_BIT 16
0256 #define GRSPW_ICCTRL_NUMI_BIT   13
0257 #define GRSPW_ICCTRL_BIRQ_BIT   8
0258 #define GRSPW_ICCTRL_ID_BIT 7
0259 #define GRSPW_ICCTRL_II_BIT 6
0260 #define GRSPW_ICCTRL_TXIRQ_BIT  0
0261 #define GRSPW_ICCTRL_INUM   (0x1f << GRSPW_ICCTRL_INUM_BIT)
0262 #define GRSPW_ICCTRL_IA     (1 << GRSPW_ICCTRL_IA_BIT)
0263 #define GRSPW_ICCTRL_LE     (1 << GRSPW_ICCTRL_LE_BIT)
0264 #define GRSPW_ICCTRL_PR     (1 << GRSPW_ICCTRL_PR_BIT)
0265 #define GRSPW_ICCTRL_DQ     (1 << GRSPW_ICCTRL_DQ_BIT)
0266 #define GRSPW_ICCTRL_TQ     (1 << GRSPW_ICCTRL_TQ_BIT)
0267 #define GRSPW_ICCTRL_AQ     (1 << GRSPW_ICCTRL_AQ_BIT)
0268 #define GRSPW_ICCTRL_IQ     (1 << GRSPW_ICCTRL_IQ_BIT)
0269 #define GRSPW_ICCTRL_IR     (1 << GRSPW_ICCTRL_IR_BIT)
0270 #define GRSPW_ICCTRL_IT     (1 << GRSPW_ICCTRL_IT_BIT)
0271 #define GRSPW_ICCTRL_NUMI   (0x7 << GRSPW_ICCTRL_NUMI_BIT)
0272 #define GRSPW_ICCTRL_BIRQ   (0x1f << GRSPW_ICCTRL_BIRQ_BIT)
0273 #define GRSPW_ICCTRL_ID     (1 << GRSPW_ICCTRL_ID_BIT)
0274 #define GRSPW_ICCTRL_II     (1 << GRSPW_ICCTRL_II_BIT)
0275 #define GRSPW_ICCTRL_TXIRQ  (0x3f << GRSPW_ICCTRL_TXIRQ_BIT)
0276 
0277 /* RX Buffer Descriptor */
0278 struct grspw_rxbd {
0279    volatile unsigned int ctrl;
0280    volatile unsigned int addr;
0281 };
0282 
0283 /* TX Buffer Descriptor */
0284 struct grspw_txbd {
0285    volatile unsigned int ctrl;
0286    volatile unsigned int haddr;
0287    volatile unsigned int dlen;
0288    volatile unsigned int daddr;
0289 };
0290 
0291 /* GRSPW - DMA RXBD Ctrl */
0292 #define GRSPW_RXBD_LEN_BIT 0
0293 #define GRSPW_RXBD_LEN  (0x1ffffff<<GRSPW_RXBD_LEN_BIT)
0294 #define GRSPW_RXBD_EN   (1<<25)
0295 #define GRSPW_RXBD_WR   (1<<26)
0296 #define GRSPW_RXBD_IE   (1<<27)
0297 #define GRSPW_RXBD_EP   (1<<28)
0298 #define GRSPW_RXBD_HC   (1<<29)
0299 #define GRSPW_RXBD_DC   (1<<30)
0300 #define GRSPW_RXBD_TR   (1<<31)
0301 
0302 #define GRSPW_TXBD_HLEN (0xff<<0)
0303 #define GRSPW_TXBD_NCL  (0xf<<8)
0304 #define GRSPW_TXBD_EN   (1<<12)
0305 #define GRSPW_TXBD_WR   (1<<13)
0306 #define GRSPW_TXBD_IE   (1<<14)
0307 #define GRSPW_TXBD_LE   (1<<15)
0308 #define GRSPW_TXBD_HC   (1<<16)
0309 #define GRSPW_TXBD_DC   (1<<17)
0310 
0311 #define GRSPW_DMAADR_MASK_BIT   8
0312 #define GRSPW_DMAADR_ADDR   (0xff<<GRSPW_DMAADR_ADDR_BIT)
0313 #define GRSPW_DMAADR_MASK   (0xff<<GRSPW_DMAADR_MASK_BIT)
0314 
0315 
0316 /* GRSPW Error Condition */
0317 #define GRSPW_STAT_ERROR    (GRSPW_STS_EE | GRSPW_STS_IA | GRSPW_STS_WE | GRSPW_STS_PE | GRSPW_STS_DE | GRSPW_STS_ER | GRSPW_STS_CE)
0318 #define GRSPW_DMA_STATUS_ERROR  (GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA)
0319 /* GRSPW Link configuration options */
0320 #define GRSPW_LINK_CFG      (GRSPW_CTRL_LI | GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS)
0321 #define GRSPW_LINKSTATE(status) ((status & GRSPW_CTRL_LS) >> GRSPW_CTRL_LS_BIT)
0322 
0323 /* Software Defaults */
0324 #define DEFAULT_RXMAX 1024  /* 1 KBytes Max RX Packet Size */
0325 
0326 /* GRSPW Constants */
0327 #define GRSPW_TXBD_NR 64    /* Maximum number of TX Descriptors */
0328 #define GRSPW_RXBD_NR 128   /* Maximum number of RX Descriptors */
0329 #define GRSPW_TXBD_SIZE 16  /* Size in bytes of one TX descriptor */
0330 #define GRSPW_RXBD_SIZE 8   /* Size in bytes of one RX descriptor */
0331 #define BDTAB_SIZE 0x400    /* BD Table Size (RX or TX) */
0332 #define BDTAB_ALIGN 0x400   /* BD Table Alignment Requirement */
0333 
0334 /* Memory and HW Registers Access routines. All 32-bit access routines */
0335 #define BD_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
0336 /*#define BD_READ(addr) (*(volatile unsigned int *)(addr))*/
0337 #define BD_READ(addr) grlib_read_uncached32((unsigned long)(addr))
0338 #define REG_WRITE(addr, val) (*(volatile unsigned int *)(addr) = (unsigned int)(val))
0339 #define REG_READ(addr) (*(volatile unsigned int *)(addr))
0340 
0341 struct grspw_ring {
0342     struct grspw_ring *next;    /* Next Descriptor */
0343     union {
0344         struct grspw_txbd *tx;  /* Descriptor Address */
0345         struct grspw_rxbd *rx;  /* Descriptor Address */
0346     } bd;
0347     struct grspw_pkt *pkt;      /* Packet description associated.NULL if none*/ 
0348 };
0349 
0350 /* An entry in the TX descriptor Ring */
0351 struct grspw_txring {
0352     struct grspw_txring *next;  /* Next Descriptor */
0353     struct grspw_txbd *bd;      /* Descriptor Address */
0354     struct grspw_pkt *pkt;      /* Packet description associated.NULL if none*/
0355 };
0356 
0357 /* An entry in the RX descriptor Ring */
0358 struct grspw_rxring {
0359     struct grspw_rxring *next;  /* Next Descriptor */
0360     struct grspw_rxbd *bd;      /* Descriptor Address */
0361     struct grspw_pkt *pkt;      /* Packet description associated.NULL if none*/
0362 };
0363 
0364 
0365 struct grspw_dma_priv {
0366     struct grspw_priv *core;    /* GRSPW Core */
0367     struct grspw_dma_regs *regs;    /* DMA Channel Registers */
0368     int index;          /* DMA Channel Index @ GRSPW core */
0369     int open;           /* DMA Channel opened by user */
0370     int started;            /* DMA Channel activity (start|stop) */
0371     rtems_id sem_rxdma;     /* DMA Channel RX Semaphore */
0372     rtems_id sem_txdma;     /* DMA Channel TX Semaphore */
0373     struct grspw_dma_stats stats;   /* DMA Channel Statistics */
0374     struct grspw_dma_config cfg;    /* DMA Channel Configuration */
0375 
0376     /*** RX ***/
0377 
0378     /* RX Descriptor Ring */
0379     struct grspw_rxbd *rx_bds;      /* Descriptor Address */
0380     struct grspw_rxbd *rx_bds_hwa;      /* Descriptor HW Address */
0381     struct grspw_rxring *rx_ring_base;
0382     struct grspw_rxring *rx_ring_head;  /* Next descriptor to enable */
0383     struct grspw_rxring *rx_ring_tail;  /* Oldest enabled Descriptor */
0384     int rx_irq_en_cnt_curr;
0385     struct {
0386         int waiting;
0387         int ready_cnt;
0388         int op;
0389         int recv_cnt;
0390         rtems_id sem_wait;      /* RX Semaphore used to implement RX blocking */
0391     } rx_wait;
0392 
0393     /* Queue of Packets READY to be scheduled */
0394     struct grspw_list ready;
0395     int ready_cnt;
0396 
0397     /* Scheduled RX Packets Queue */
0398     struct grspw_list rx_sched;
0399     int rx_sched_cnt;
0400 
0401     /* Queue of Packets that has been RECIEVED */
0402     struct grspw_list recv;
0403     int recv_cnt;
0404 
0405 
0406     /*** TX ***/
0407 
0408     /* TX Descriptor Ring */
0409     struct grspw_txbd *tx_bds;      /* Descriptor Address */
0410     struct grspw_txbd *tx_bds_hwa;      /* Descriptor HW Address */
0411     struct grspw_txring *tx_ring_base;
0412     struct grspw_txring *tx_ring_head;
0413     struct grspw_txring *tx_ring_tail;
0414     int tx_irq_en_cnt_curr;
0415     struct {
0416         int waiting;
0417         int send_cnt;
0418         int op;
0419         int sent_cnt;
0420         rtems_id sem_wait;      /* TX Semaphore used to implement TX blocking */
0421     } tx_wait;
0422 
0423     /* Queue of Packets ready to be scheduled for transmission */
0424     struct grspw_list send;
0425     int send_cnt;
0426 
0427     /* Scheduled TX Packets Queue */
0428     struct grspw_list tx_sched;
0429     int tx_sched_cnt;
0430 
0431     /* Queue of Packets that has been SENT */
0432     struct grspw_list sent;
0433     int sent_cnt;
0434 };
0435 
0436 struct grspw_priv {
0437     char devname[8];        /* Device name "grspw%d" */
0438     struct drvmgr_dev *dev;     /* Device */
0439     struct grspw_regs *regs;    /* Virtual Address of APB Registers */
0440     int irq;            /* AMBA IRQ number of core */
0441     int index;          /* Index in order it was probed */
0442     int core_index;         /* Core Bus Index */
0443     int open;           /* If Device is alrady opened (=1) or not (=0) */
0444     void *data;         /* User private Data for this device instance, set by grspw_initialize_user */
0445 
0446     /* Features supported by Hardware */
0447     struct grspw_hw_sup hwsup;
0448 
0449     /* Pointer to an array of Maximally 4 DMA Channels */
0450     struct grspw_dma_priv *dma;
0451 
0452     /* Spin-lock ISR protection */
0453     SPIN_DECLARE(devlock);
0454 
0455     /* Descriptor Memory Area for TX & RX and all DMA channels */
0456     unsigned int bd_mem;
0457     unsigned int bd_mem_alloced;
0458 
0459     /*** Time Code Handling ***/
0460     void (*tcisr)(void *data, int timecode);
0461     void *tcisr_arg;
0462 
0463     /*** Interrupt-code Handling ***/
0464     spwpkt_ic_isr_t icisr;
0465     void *icisr_arg;
0466 
0467     /* Bit mask representing events which shall cause link disable. */
0468     unsigned int dis_link_on_err;
0469 
0470     /* Bit mask for link status bits to clear by ISR */
0471     unsigned int stscfg;
0472 
0473     /*** Message Queue Handling ***/
0474     struct grspw_work_config wc;
0475 
0476     /* "Core Global" Statistics gathered, not dependent on DMA channel */
0477     struct grspw_core_stats stats;
0478 };
0479 
0480 int grspw_initialized = 0;
0481 int grspw_count = 0;
0482 rtems_id grspw_sem;
0483 static struct grspw_priv *priv_tab[GRSPW_MAX];
0484 
0485 /* callback to upper layer when devices are discovered/removed */
0486 void *(*grspw_dev_add)(int) = NULL;
0487 void (*grspw_dev_del)(int,void*) = NULL;
0488 
0489 /* Defaults to do nothing - user can override this function.
0490  * Called from work-task.
0491  */
0492 void __attribute__((weak)) grspw_work_event(
0493     enum grspw_worktask_ev ev,
0494     unsigned int msg)
0495 {
0496 
0497 }
0498 
0499 /* USER OVERRIDABLE - The work task priority. Set to -1 to disable creating
0500  * the work-task and work-queue to save space.
0501  */
0502 int grspw_work_task_priority __attribute__((weak)) = 100;
0503 rtems_id grspw_work_task;
0504 static struct grspw_work_config grspw_wc_def;
0505 
0506 STATIC void grspw_hw_stop(struct grspw_priv *priv);
0507 STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma);
0508 STATIC void grspw_dma_reset(struct grspw_dma_priv *dma);
0509 STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma);
0510 STATIC void grspw_isr(void *data);
0511 
0512 void *grspw_open(int dev_no)
0513 {
0514     struct grspw_priv *priv;
0515     unsigned int bdtabsize, hwa;
0516     int i;
0517     union drvmgr_key_value *value;
0518 
0519     if (grspw_initialized != 1 || (dev_no >= grspw_count))
0520         return NULL;
0521 
0522     priv = priv_tab[dev_no];
0523 
0524     /* Take GRSPW lock - Wait until we get semaphore */
0525     if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
0526         != RTEMS_SUCCESSFUL)
0527         return NULL;
0528 
0529     if (priv->open) {
0530         priv = NULL;
0531         goto out;
0532     }
0533 
0534     /* Initialize Spin-lock for GRSPW Device. This is to protect
0535      * CTRL and DMACTRL registers from ISR.
0536      */
0537     SPIN_INIT(&priv->devlock, priv->devname);
0538 
0539     priv->tcisr = NULL;
0540     priv->tcisr_arg = NULL;
0541     priv->icisr = NULL;
0542     priv->icisr_arg = NULL;
0543     priv->stscfg = LINKSTS_MASK;
0544 
0545     /* Default to common work queue and message queue, if not created
0546      * during initialization then its disabled.
0547      */
0548     grspw_work_cfg(priv, &grspw_wc_def);
0549 
0550     grspw_stats_clr(priv);
0551 
0552     /* Allocate TX & RX Descriptor memory area for all DMA
0553      * channels. Max-size descriptor area is allocated (or user assigned):
0554      *  - 128 RX descriptors per DMA Channel
0555      *  - 64 TX descriptors per DMA Channel
0556      * Specified address must be in CPU RAM.
0557      */
0558     bdtabsize = 2 * BDTAB_SIZE * priv->hwsup.ndma_chans;
0559     value = drvmgr_dev_key_get(priv->dev, "bdDmaArea", DRVMGR_KT_INT);
0560     if (value) {
0561         priv->bd_mem = value->i;
0562         priv->bd_mem_alloced = 0;
0563         if (priv->bd_mem & (BDTAB_ALIGN-1)) {
0564             GRSPW_DBG("GRSPW[%d]: user-def DMA-area not aligned",
0565                       priv->index);
0566             priv = NULL;
0567             goto out;
0568         }
0569     } else {
0570         priv->bd_mem_alloced = (unsigned int)grlib_malloc(bdtabsize + BDTAB_ALIGN - 1);
0571         if (priv->bd_mem_alloced == 0) {
0572             priv = NULL;
0573             goto out;
0574         }
0575         /* Align memory */
0576         priv->bd_mem = (priv->bd_mem_alloced + (BDTAB_ALIGN - 1)) &
0577                        ~(BDTAB_ALIGN-1);
0578     }
0579 
0580     /* Translate into DMA address that HW can use to access DMA
0581      * descriptors
0582      */
0583     drvmgr_translate_check(
0584         priv->dev,
0585         CPUMEM_TO_DMA,
0586         (void *)priv->bd_mem,
0587         (void **)&hwa,
0588         bdtabsize);
0589 
0590     GRSPW_DBG("GRSPW%d DMA descriptor table setup: (alloced:%p, bd_mem:%p, size: %d)\n",
0591         priv->index, priv->bd_mem_alloced, priv->bd_mem, bdtabsize + BDTAB_ALIGN - 1);
0592     for (i=0; i<priv->hwsup.ndma_chans; i++) {
0593         /* Do DMA Channel Init, other variables etc. are inited
0594          * when respective DMA channel is opened.
0595          *
0596          * index & core are initialized by probe function.
0597          */
0598         priv->dma[i].open = 0;
0599         priv->dma[i].rx_bds = (struct grspw_rxbd *)
0600             (priv->bd_mem + i*BDTAB_SIZE*2);
0601         priv->dma[i].rx_bds_hwa = (struct grspw_rxbd *)
0602             (hwa + BDTAB_SIZE*(2*i));
0603         priv->dma[i].tx_bds = (struct grspw_txbd *)
0604             (priv->bd_mem + BDTAB_SIZE*(2*i+1));
0605         priv->dma[i].tx_bds_hwa = (struct grspw_txbd *)
0606             (hwa + BDTAB_SIZE*(2*i+1));
0607         GRSPW_DBG("  DMA[%i]: RX %p - %p (%p - %p)   TX %p - %p (%p - %p)\n",
0608             i,
0609             priv->dma[i].rx_bds, (void *)priv->dma[i].rx_bds + BDTAB_SIZE - 1,
0610             priv->dma[i].rx_bds_hwa, (void *)priv->dma[i].rx_bds_hwa + BDTAB_SIZE - 1,
0611             priv->dma[i].tx_bds, (void *)priv->dma[i].tx_bds + BDTAB_SIZE - 1,
0612             priv->dma[i].tx_bds_hwa, (void *)priv->dma[i].tx_bds_hwa + BDTAB_SIZE - 1);
0613     }
0614 
0615     /* Basic initialization of hardware, clear some registers but
0616      * keep Link/RMAP/Node-Address registers intact.
0617      */
0618     grspw_hw_stop(priv);
0619 
0620     /* Register Interrupt handler and enable IRQ at IRQ ctrl */
0621     drvmgr_interrupt_register(priv->dev, 0, priv->devname, grspw_isr, priv);
0622 
0623     /* Take the device */
0624     priv->open = 1;
0625 out:
0626     rtems_semaphore_release(grspw_sem);
0627     return priv;
0628 }
0629 
0630 int grspw_close(void *d)
0631 {
0632     struct grspw_priv *priv = d;
0633     int i;
0634 
0635     /* Take GRSPW lock - Wait until we get semaphore */
0636     if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
0637         != RTEMS_SUCCESSFUL)
0638         return -1;
0639 
0640     /* Check that user has stopped and closed all DMA channels
0641      * appropriately. At this point the Hardware shall not be doing DMA
0642      * or generating Interrupts. We want HW in a "startup-state".
0643      */
0644     for (i=0; i<priv->hwsup.ndma_chans; i++) {
0645         if (priv->dma[i].open) {
0646             rtems_semaphore_release(grspw_sem);
0647             return 1;
0648         }
0649     }
0650     grspw_hw_stop(priv);
0651 
0652     /* Uninstall Interrupt handler */
0653     drvmgr_interrupt_unregister(priv->dev, 0, grspw_isr, priv);
0654 
0655     /* Free descriptor table memory if allocated using malloc() */
0656     if (priv->bd_mem_alloced) {
0657         free((void *)priv->bd_mem_alloced);
0658         priv->bd_mem_alloced = 0;
0659     }
0660 
0661     /* Mark not open */
0662     priv->open = 0;
0663     rtems_semaphore_release(grspw_sem);
0664     return 0;
0665 }
0666 
0667 void grspw_hw_support(void *d, struct grspw_hw_sup *hw)
0668 {
0669     struct grspw_priv *priv = d;
0670 
0671     *hw = priv->hwsup;
0672 }
0673 
0674 void grspw_addr_ctrl(void *d, struct grspw_addr_config *cfg)
0675 {
0676     struct grspw_priv *priv = d;
0677     struct grspw_regs *regs;
0678     unsigned int ctrl, nodeaddr;
0679     SPIN_IRQFLAGS(irqflags);
0680     int i;
0681 
0682     if (!priv || !cfg)
0683         return;
0684 
0685         regs = priv->regs;
0686 
0687     SPIN_LOCK_IRQ(&priv->devlock, irqflags);
0688 
0689     if (cfg->promiscuous != -1) {
0690         /* Set Configuration */
0691         ctrl = REG_READ(&regs->ctrl);
0692         if (cfg->promiscuous)
0693             ctrl |= GRSPW_CTRL_PM;
0694         else
0695             ctrl &= ~GRSPW_CTRL_PM;
0696         REG_WRITE(&regs->ctrl, ctrl);
0697         REG_WRITE(&regs->nodeaddr, (cfg->def_mask<<8) | cfg->def_addr);
0698 
0699         for (i=0; i<priv->hwsup.ndma_chans; i++) {
0700             ctrl = REG_READ(&regs->dma[i].ctrl);
0701             ctrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
0702             if (cfg->dma_nacfg[i].node_en) {
0703                 ctrl |= GRSPW_DMACTRL_EN;
0704                 REG_WRITE(&regs->dma[i].addr,
0705                           (cfg->dma_nacfg[i].node_addr & 0xff) |
0706                           ((cfg->dma_nacfg[i].node_mask & 0xff)<<8));
0707             } else {
0708                 ctrl &= ~GRSPW_DMACTRL_EN;
0709             }
0710             REG_WRITE(&regs->dma[i].ctrl, ctrl);
0711         }
0712     }
0713 
0714     /* Read Current Configuration */
0715     cfg->promiscuous = REG_READ(&regs->ctrl) & GRSPW_CTRL_PM;
0716     nodeaddr = REG_READ(&regs->nodeaddr);
0717     cfg->def_addr = (nodeaddr & GRSPW_DEF_ADDR) >> GRSPW_DEF_ADDR_BIT;
0718     cfg->def_mask = (nodeaddr & GRSPW_DEF_MASK) >> GRSPW_DEF_MASK_BIT;
0719     for (i=0; i<priv->hwsup.ndma_chans; i++) {
0720         cfg->dma_nacfg[i].node_en = REG_READ(&regs->dma[i].ctrl) &
0721                         GRSPW_DMACTRL_EN;
0722         ctrl = REG_READ(&regs->dma[i].addr);
0723         cfg->dma_nacfg[i].node_addr = (ctrl & GRSPW_DMAADR_ADDR) >>
0724                         GRSPW_DMAADR_ADDR_BIT;
0725         cfg->dma_nacfg[i].node_mask = (ctrl & GRSPW_DMAADR_MASK) >>
0726                         GRSPW_DMAADR_MASK_BIT;
0727     }
0728     SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
0729     for (; i<4; i++) {
0730         cfg->dma_nacfg[i].node_en = 0;
0731         cfg->dma_nacfg[i].node_addr = 0;
0732         cfg->dma_nacfg[i].node_mask = 0;
0733     }
0734 }
0735 
0736 /* Return Current DMA CTRL/Status Register */
0737 unsigned int grspw_dma_ctrlsts(void *c)
0738 {
0739     struct grspw_dma_priv *dma = c;
0740 
0741     return REG_READ(&dma->regs->ctrl);
0742 }
0743 
0744 /* Return Current Status Register */
0745 unsigned int grspw_link_status(void *d)
0746 {
0747     struct grspw_priv *priv = d;
0748 
0749     return REG_READ(&priv->regs->status);
0750 }
0751 
0752 /* Clear Status Register bits */
0753 void grspw_link_status_clr(void *d, unsigned int mask)
0754 {
0755     struct grspw_priv *priv = d;
0756 
0757     REG_WRITE(&priv->regs->status, mask);
0758 }
0759 
0760 /* Return Current Link State */
0761 spw_link_state_t grspw_link_state(void *d)
0762 {
0763     struct grspw_priv *priv = d;
0764     unsigned int status = REG_READ(&priv->regs->status);
0765 
0766     return (status & GRSPW_STS_LS) >> GRSPW_STS_LS_BIT;
0767 }
0768 
0769 /* Enable Global IRQ only if some irq source is set */
0770 static inline int grspw_is_irqsource_set(unsigned int ctrl, unsigned int icctrl)
0771 {
0772     return (ctrl & GRSPW_CTRL_IRQSRC_MASK) ||
0773         (icctrl & GRSPW_ICCTRL_IRQSRC_MASK);
0774 }
0775 
0776 
0777 /* options and clkdiv [in/out]: set to -1 to only read current config */
0778 void grspw_link_ctrl(void *d, int *options, int *stscfg, int *clkdiv)
0779 {
0780     struct grspw_priv *priv = d;
0781     struct grspw_regs *regs = priv->regs;
0782     unsigned int ctrl;
0783     SPIN_IRQFLAGS(irqflags);
0784 
0785     /* Write? */
0786     if (clkdiv) {
0787         if (*clkdiv != -1)
0788             REG_WRITE(&regs->clkdiv, *clkdiv & GRSPW_CLKDIV_MASK);
0789         *clkdiv = REG_READ(&regs->clkdiv) & GRSPW_CLKDIV_MASK;
0790     }
0791     if (options) {
0792         SPIN_LOCK_IRQ(&priv->devlock, irqflags);
0793         ctrl = REG_READ(&regs->ctrl);
0794         if (*options != -1) {
0795             ctrl = (ctrl & ~GRSPW_LINK_CFG) |
0796                 (*options & GRSPW_LINK_CFG);
0797 
0798             /* Enable Global IRQ only if some irq source is set */
0799             if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
0800                 ctrl |= GRSPW_CTRL_IE;
0801             else
0802                 ctrl &= ~GRSPW_CTRL_IE;
0803 
0804             REG_WRITE(&regs->ctrl, ctrl);
0805             /* Store the link disable events for use in
0806             ISR. The LINKOPTS_DIS_ON_* options are actually the
0807             corresponding bits in the status register, shifted
0808             by 16. */
0809             priv->dis_link_on_err = *options &
0810                 (LINKOPTS_MASK_DIS_ON | LINKOPTS_DIS_ONERR);
0811         }
0812         SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
0813         *options = (ctrl & GRSPW_LINK_CFG) | priv->dis_link_on_err;
0814     }
0815     if (stscfg) {
0816         if (*stscfg != -1) {
0817             priv->stscfg = *stscfg & LINKSTS_MASK;
0818         }
0819         *stscfg = priv->stscfg;
0820     }
0821 }
0822 
0823 /* Generate Tick-In (increment Time Counter, Send Time Code) */
0824 void grspw_tc_tx(void *d)
0825 {
0826     struct grspw_priv *priv = d;
0827     struct grspw_regs *regs = priv->regs;
0828     SPIN_IRQFLAGS(irqflags);
0829 
0830     SPIN_LOCK_IRQ(&priv->devlock, irqflags);
0831     REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_TI);
0832     SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
0833 }
0834 
0835 void grspw_tc_ctrl(void *d, int *options)
0836 {
0837     struct grspw_priv *priv = d;
0838     struct grspw_regs *regs = priv->regs;
0839     unsigned int ctrl;
0840     SPIN_IRQFLAGS(irqflags);
0841 
0842     if (options == NULL)
0843         return;
0844 
0845     /* Write? */
0846     if (*options != -1) {
0847         SPIN_LOCK_IRQ(&priv->devlock, irqflags);
0848         ctrl = REG_READ(&regs->ctrl);
0849         ctrl &= ~(GRSPW_CTRL_TR|GRSPW_CTRL_TT|GRSPW_CTRL_TQ);
0850         ctrl |= (*options & 0xd) << GRSPW_CTRL_TQ_BIT;
0851 
0852         /* Enable Global IRQ only if some irq source is set */
0853         if (grspw_is_irqsource_set(ctrl, REG_READ(&regs->icctrl)))
0854             ctrl |= GRSPW_CTRL_IE;
0855         else
0856             ctrl &= ~GRSPW_CTRL_IE;
0857 
0858         REG_WRITE(&regs->ctrl, ctrl);
0859         SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
0860     } else
0861         ctrl = REG_READ(&regs->ctrl);
0862     *options = (ctrl >> GRSPW_CTRL_TQ_BIT) & 0xd;
0863 }
0864 
0865 /* Assign ISR Function to TimeCode RX IRQ */
0866 void grspw_tc_isr(void *d, void (*tcisr)(void *data, int tc), void *data)
0867 {
0868     struct grspw_priv *priv = d;
0869 
0870     priv->tcisr_arg = data;
0871     priv->tcisr = tcisr;
0872 }
0873 
0874 /* Read/Write TCTRL and TIMECNT. Write if not -1, always read current value
0875  * TCTRL   = bits 7 and 6
0876  * TIMECNT = bits 5 to 0
0877  */
0878 void grspw_tc_time(void *d, int *time)
0879 {
0880     struct grspw_priv *priv = d;
0881     struct grspw_regs *regs = priv->regs;
0882 
0883     if (time == NULL)
0884         return;
0885     if (*time != -1)
0886         REG_WRITE(&regs->time, *time & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL));
0887     *time = REG_READ(&regs->time) & (GRSPW_TIME_TCNT | GRSPW_TIME_CTRL);
0888 }
0889 
0890 /* Generate Tick-In for the given Interrupt-code and check for generation
0891  * error.
0892  *
0893  * Returns zero on success and non-zero on failure
0894  */
0895 int grspw_ic_tickin(void *d, int ic)
0896 {
0897     struct grspw_priv *priv = d;
0898     struct grspw_regs *regs = priv->regs;
0899     SPIN_IRQFLAGS(irqflags);
0900     unsigned int icctrl, mask;
0901 
0902     /* Prepare before turning off IRQ */
0903     mask = 0x3f << GRSPW_ICCTRL_TXIRQ_BIT;
0904     ic = ((ic << GRSPW_ICCTRL_TXIRQ_BIT) & mask) |
0905          GRSPW_ICCTRL_II | GRSPW_ICCTRL_ID;
0906 
0907     SPIN_LOCK_IRQ(&priv->devlock, irqflags);
0908     icctrl = REG_READ(&regs->icctrl);
0909     icctrl &= ~mask;
0910     icctrl |= ic;
0911     REG_WRITE(&regs->icctrl, icctrl); /* Generate SpW Interrupt Tick-In */
0912     /* the ID bit is valid after two clocks, so we not to wait here */
0913     icctrl = REG_READ(&regs->icctrl); /* Check SpW-Int generation error */
0914     SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
0915 
0916     return icctrl & GRSPW_ICCTRL_ID;
0917 }
0918 
0919 #define ICOPTS_CTRL_MASK ICOPTS_EN_FLAGFILTER
0920 #define ICOPTS_ICCTRL_MASK                      \
0921     (ICOPTS_INTNUM | ICOPTS_EN_SPWIRQ_ON_EE  | ICOPTS_EN_SPWIRQ_ON_IA | \
0922      ICOPTS_EN_PRIO | ICOPTS_EN_TIMEOUTIRQ | ICOPTS_EN_ACKIRQ | \
0923      ICOPTS_EN_TICKOUTIRQ | ICOPTS_EN_RX | ICOPTS_EN_TX | \
0924      ICOPTS_BASEIRQ)
0925 
0926 /* Control Interrupt-code settings of core
0927  * Write if not pointing to -1, always read current value
0928  *
0929  * TODO: A lot of code duplication with grspw_tc_ctrl
0930  */
0931 void grspw_ic_ctrl(void *d, unsigned int *options)
0932 {
0933     struct grspw_priv *priv = d;
0934     struct grspw_regs *regs = priv->regs;
0935     unsigned int ctrl;
0936     unsigned int icctrl;
0937     SPIN_IRQFLAGS(irqflags);
0938 
0939     if (options == NULL)
0940         return;
0941 
0942     if (*options != -1) {
0943         SPIN_LOCK_IRQ(&priv->devlock, irqflags);
0944 
0945         ctrl = REG_READ(&regs->ctrl);
0946         ctrl &= ~GRSPW_CTRL_TF; /* Depends on one to one relation between
0947                      * irqopts bits and ctrl bits */
0948         ctrl |= (*options & ICOPTS_CTRL_MASK) <<
0949             (GRSPW_CTRL_TF_BIT - 0);
0950 
0951         icctrl = REG_READ(&regs->icctrl);
0952         icctrl &= ~ICOPTS_ICCTRL_MASK; /* Depends on one to one relation between
0953                         * irqopts bits and icctrl bits */
0954         icctrl |= *options & ICOPTS_ICCTRL_MASK;
0955 
0956         /* Enable Global IRQ only if some irq source is set */
0957         if (grspw_is_irqsource_set(ctrl, icctrl))
0958             ctrl |= GRSPW_CTRL_IE;
0959         else
0960             ctrl &= ~GRSPW_CTRL_IE;
0961 
0962         REG_WRITE(&regs->ctrl, ctrl);
0963         REG_WRITE(&regs->icctrl, icctrl);
0964         SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
0965     }
0966     *options = ((REG_READ(&regs->ctrl) & ICOPTS_CTRL_MASK) |
0967             (REG_READ(&regs->icctrl) & ICOPTS_ICCTRL_MASK));
0968 }
0969 
0970 void grspw_ic_config(void *d, int rw, struct spwpkt_ic_config *cfg)
0971 {
0972     struct grspw_priv *priv = d;
0973     struct grspw_regs *regs = priv->regs;
0974 
0975     if (!cfg)
0976         return;
0977 
0978     if (rw & 1) {
0979         REG_WRITE(&regs->ictickomask, cfg->tomask);
0980         REG_WRITE(&regs->icaamask, cfg->aamask);
0981         REG_WRITE(&regs->icrlpresc, cfg->scaler);
0982         REG_WRITE(&regs->icrlisr, cfg->isr_reload);
0983         REG_WRITE(&regs->icrlintack, cfg->ack_reload);
0984     }
0985     if (rw & 2) {
0986         cfg->tomask = REG_READ(&regs->ictickomask);
0987         cfg->aamask = REG_READ(&regs->icaamask);
0988         cfg->scaler = REG_READ(&regs->icrlpresc);
0989         cfg->isr_reload = REG_READ(&regs->icrlisr);
0990         cfg->ack_reload = REG_READ(&regs->icrlintack);
0991     }
0992 }
0993 
0994 /* Read or Write Interrupt-code status registers */
0995 void grspw_ic_sts(void *d, unsigned int *rxirq, unsigned int *rxack, unsigned int *intto)
0996 {
0997     struct grspw_priv *priv = d;
0998     struct grspw_regs *regs = priv->regs;
0999 
1000     /* No locking needed since the status bits are clear-on-write */
1001 
1002     if (rxirq) {
1003         if (*rxirq != 0)
1004             REG_WRITE(&regs->icrx, *rxirq);
1005         else
1006             *rxirq = REG_READ(&regs->icrx);
1007     }
1008 
1009     if (rxack) {
1010         if (*rxack != 0)
1011             REG_WRITE(&regs->icack, *rxack);
1012         else
1013             *rxack = REG_READ(&regs->icack);
1014     }
1015 
1016     if (intto) {
1017         if (*intto != 0)
1018             REG_WRITE(&regs->ictimeout, *intto);
1019         else
1020             *intto = REG_READ(&regs->ictimeout);
1021     }
1022 }
1023 
1024 /* Assign handler function to Interrupt-code tick out IRQ */
1025 void grspw_ic_isr(void *d, spwpkt_ic_isr_t handler, void *data)
1026 {
1027     struct grspw_priv *priv = d;
1028 
1029     priv->icisr_arg = data;
1030     priv->icisr = handler;
1031 }
1032 
1033 /* Set (not -1) and/or read RMAP options. */
1034 int grspw_rmap_ctrl(void *d, int *options, int *dstkey)
1035 {
1036     struct grspw_priv *priv = d;
1037     struct grspw_regs *regs = priv->regs;
1038     unsigned int ctrl;
1039     SPIN_IRQFLAGS(irqflags);
1040 
1041     if (dstkey) {
1042         if (*dstkey != -1)
1043             REG_WRITE(&regs->destkey, *dstkey & GRSPW_DK_DESTKEY);
1044         *dstkey = REG_READ(&regs->destkey) & GRSPW_DK_DESTKEY;
1045     }
1046     if (options) {
1047         if (*options != -1) {
1048             if ((*options & RMAPOPTS_EN_RMAP) && !priv->hwsup.rmap)
1049                 return -1;
1050 
1051 
1052             SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1053             ctrl = REG_READ(&regs->ctrl);
1054             ctrl &= ~(GRSPW_CTRL_RE|GRSPW_CTRL_RD);
1055             ctrl |= (*options & 0x3) << GRSPW_CTRL_RE_BIT;
1056             REG_WRITE(&regs->ctrl, ctrl);
1057             SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1058         }
1059         *options = (REG_READ(&regs->ctrl) >> GRSPW_CTRL_RE_BIT) & 0x3;
1060     }
1061 
1062     return 0;
1063 }
1064 
1065 void grspw_rmap_support(void *d, char *rmap, char *rmap_crc)
1066 {
1067     struct grspw_priv *priv = d;
1068 
1069     if (rmap)
1070         *rmap = priv->hwsup.rmap;
1071     if (rmap_crc)
1072         *rmap_crc = priv->hwsup.rmap_crc;
1073 }
1074 
1075 /* Select port, if 
1076  * -1=The current selected port is returned
1077  * 0=Port 0
1078  * 1=Port 1
1079  * Others=Both Port0 and Port1
1080  */
1081 int grspw_port_ctrl(void *d, int *port)
1082 {
1083     struct grspw_priv *priv = d;
1084     struct grspw_regs *regs = priv->regs;
1085     unsigned int ctrl;
1086     SPIN_IRQFLAGS(irqflags);
1087 
1088     if (port == NULL)
1089         return -1;
1090 
1091     if ((*port == 1) || (*port == 0)) {
1092         /* Select port user selected */
1093         if ((*port == 1) && (priv->hwsup.nports < 2))
1094             return -1; /* Changing to Port 1, but only one port available */
1095         SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1096         ctrl = REG_READ(&regs->ctrl);
1097         ctrl &= ~(GRSPW_CTRL_NP | GRSPW_CTRL_PS);
1098         ctrl |= (*port & 1) << GRSPW_CTRL_PS_BIT;
1099         REG_WRITE(&regs->ctrl, ctrl);
1100         SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1101     } else if (*port > 1) {
1102         /* Select both ports */
1103         SPIN_LOCK_IRQ(&priv->devlock, irqflags);
1104         REG_WRITE(&regs->ctrl, REG_READ(&regs->ctrl) | GRSPW_CTRL_NP);
1105         SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
1106     }
1107 
1108     /* Get current settings */
1109     ctrl = REG_READ(&regs->ctrl);
1110     if (ctrl & GRSPW_CTRL_NP) {
1111         /* Any port, selected by hardware */
1112         if (priv->hwsup.nports > 1)
1113             *port = 3;
1114         else
1115             *port = 0; /* Port0 the only port available */
1116     } else {
1117         *port = (ctrl & GRSPW_CTRL_PS) >> GRSPW_CTRL_PS_BIT;
1118     }
1119 
1120     return 0;
1121 }
1122 
1123 /* Returns Number ports available in hardware */
1124 int grspw_port_count(void *d)
1125 {
1126     struct grspw_priv *priv = d;
1127 
1128     return priv->hwsup.nports;
1129 }
1130 
1131 /* Current active port: 0 or 1 */
1132 int grspw_port_active(void *d)
1133 {
1134     struct grspw_priv *priv = d;
1135     unsigned int status;
1136 
1137     status = REG_READ(&priv->regs->status);
1138 
1139     return (status & GRSPW_STS_AP) >> GRSPW_STS_AP_BIT;
1140 }
1141 
1142 void grspw_stats_read(void *d, struct grspw_core_stats *sts)
1143 {
1144     struct grspw_priv *priv = d;
1145 
1146     if (sts == NULL)
1147         return;
1148     memcpy(sts, &priv->stats, sizeof(priv->stats));
1149 }
1150 
1151 void grspw_stats_clr(void *d)
1152 {
1153     struct grspw_priv *priv = d;
1154 
1155     /* Clear most of the statistics */  
1156     memset(&priv->stats, 0, sizeof(priv->stats));
1157 }
1158 
1159 /*** DMA Interface ***/
1160 
1161 /* Initialize the RX and TX Descriptor Ring, empty of packets */
1162 STATIC void grspw_bdrings_init(struct grspw_dma_priv *dma)
1163 {
1164     struct grspw_ring *r;
1165     int i;
1166 
1167     /* Empty BD rings */
1168     dma->rx_ring_head = dma->rx_ring_base;
1169     dma->rx_ring_tail = dma->rx_ring_base;
1170     dma->tx_ring_head = dma->tx_ring_base;
1171     dma->tx_ring_tail = dma->tx_ring_base;
1172 
1173     /* Init RX Descriptors */
1174     r = (struct grspw_ring *)dma->rx_ring_base;
1175     for (i=0; i<GRSPW_RXBD_NR; i++) {
1176 
1177         /* Init Ring Entry */
1178         r[i].next = &r[i+1];
1179         r[i].bd.rx = &dma->rx_bds[i];
1180         r[i].pkt = NULL;
1181 
1182         /* Init HW Descriptor */
1183         BD_WRITE(&r[i].bd.rx->ctrl, 0);
1184         BD_WRITE(&r[i].bd.rx->addr, 0);
1185     }
1186     r[GRSPW_RXBD_NR-1].next = &r[0];
1187 
1188     /* Init TX Descriptors */
1189     r = (struct grspw_ring *)dma->tx_ring_base;
1190     for (i=0; i<GRSPW_TXBD_NR; i++) {
1191 
1192         /* Init Ring Entry */
1193         r[i].next = &r[i+1];
1194         r[i].bd.tx = &dma->tx_bds[i];
1195         r[i].pkt = NULL;
1196 
1197         /* Init HW Descriptor */
1198         BD_WRITE(&r[i].bd.tx->ctrl, 0);
1199         BD_WRITE(&r[i].bd.tx->haddr, 0);
1200         BD_WRITE(&r[i].bd.tx->dlen, 0);
1201         BD_WRITE(&r[i].bd.tx->daddr, 0);
1202     }
1203     r[GRSPW_TXBD_NR-1].next = &r[0];
1204 }
1205 
1206 /* Try to populate descriptor ring with as many as possible READY unused packet
1207  * buffers. The packets assigned with to a descriptor are put in the end of 
1208  * the scheduled list.
1209  *
1210  * The number of Packets scheduled is returned.
1211  *
1212  *  - READY List -> RX-SCHED List
1213  *  - Descriptors are initialized and enabled for reception
1214  */
1215 STATIC int grspw_rx_schedule_ready(struct grspw_dma_priv *dma)
1216 {
1217     int cnt;
1218     unsigned int ctrl, dmactrl;
1219     void *hwaddr;
1220     struct grspw_rxring *curr_bd;
1221     struct grspw_pkt *curr_pkt, *last_pkt;
1222     struct grspw_list lst;
1223     SPIN_IRQFLAGS(irqflags);
1224 
1225     /* Is Ready Q empty? */
1226     if (grspw_list_is_empty(&dma->ready))
1227         return 0;
1228 
1229     cnt = 0;
1230     lst.head = curr_pkt = dma->ready.head;
1231     curr_bd = dma->rx_ring_head;
1232     while (!curr_bd->pkt) {
1233 
1234         /* Assign Packet to descriptor */
1235         curr_bd->pkt = curr_pkt;
1236 
1237         /* Prepare descriptor address. */
1238         hwaddr = curr_pkt->data;
1239         if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1240             drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1241                      hwaddr, &hwaddr);
1242             if (curr_pkt->data == hwaddr) /* translation needed? */
1243                 curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1244         }
1245         BD_WRITE(&curr_bd->bd->addr, hwaddr);
1246 
1247         ctrl = GRSPW_RXBD_EN;
1248         if (curr_bd->next == dma->rx_ring_base) {
1249             /* Wrap around (only needed when smaller descriptor
1250              * table)
1251              */
1252             ctrl |= GRSPW_RXBD_WR;
1253         }
1254 
1255         /* Is this Packet going to be an interrupt Packet? */
1256         if ((--dma->rx_irq_en_cnt_curr) <= 0) {
1257             if (dma->cfg.rx_irq_en_cnt == 0) {
1258                 /* IRQ is disabled. A big number to avoid
1259                  * equal to zero too often
1260                  */
1261                 dma->rx_irq_en_cnt_curr = 0x3fffffff;
1262             } else {
1263                 dma->rx_irq_en_cnt_curr = dma->cfg.rx_irq_en_cnt;
1264                 ctrl |= GRSPW_RXBD_IE;
1265             }
1266         }
1267 
1268         if (curr_pkt->flags & RXPKT_FLAG_IE)
1269             ctrl |= GRSPW_RXBD_IE;
1270 
1271         /* Enable descriptor */
1272         BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1273 
1274         last_pkt = curr_pkt;
1275         curr_bd = curr_bd->next;
1276         cnt++;
1277 
1278         /* Get Next Packet from Ready Queue */
1279         if (curr_pkt == dma->ready.tail) {
1280             /* Handled all in ready queue. */
1281             curr_pkt = NULL;
1282             break;
1283         }
1284         curr_pkt = curr_pkt->next;
1285     }
1286 
1287     /* Has Packets been scheduled? */
1288     if (cnt > 0) {
1289         /* Prepare list for insertion/deleation */
1290         lst.tail = last_pkt;
1291 
1292         /* Remove scheduled packets from ready queue */
1293         grspw_list_remove_head_list(&dma->ready, &lst);
1294         dma->ready_cnt -= cnt;
1295         if (dma->stats.ready_cnt_min > dma->ready_cnt)
1296             dma->stats.ready_cnt_min = dma->ready_cnt;
1297 
1298         /* Insert scheduled packets into scheduled queue */
1299         grspw_list_append_list(&dma->rx_sched, &lst);
1300         dma->rx_sched_cnt += cnt;
1301         if (dma->stats.rx_sched_cnt_max < dma->rx_sched_cnt)
1302             dma->stats.rx_sched_cnt_max = dma->rx_sched_cnt;
1303 
1304         /* Update TX ring posistion */
1305         dma->rx_ring_head = curr_bd;
1306 
1307         /* Make hardware aware of the newly enabled descriptors 
1308          * We must protect from ISR which writes RI|TI
1309          */
1310         SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1311         dmactrl = REG_READ(&dma->regs->ctrl);
1312         dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1313         dmactrl |= GRSPW_DMACTRL_RE | GRSPW_DMACTRL_RD;
1314         REG_WRITE(&dma->regs->ctrl, dmactrl);
1315         SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1316     }
1317 
1318     return cnt;
1319 }
1320 
1321 /* Scans the RX desciptor table for scheduled Packet that has been received,
1322  * and moves these Packet from the head of the scheduled queue to the
1323  * tail of the recv queue.
1324  *
1325  * Also, for all packets the status is updated.
1326  *
1327  *  - SCHED List -> SENT List
1328  *
1329  * Return Value
1330  * Number of packets moved
1331  */
1332 STATIC int grspw_rx_process_scheduled(struct grspw_dma_priv *dma)
1333 {
1334     struct grspw_rxring *curr;
1335     struct grspw_pkt *last_pkt;
1336     int recv_pkt_cnt = 0;
1337     unsigned int ctrl;
1338     struct grspw_list lst;
1339 
1340     curr = dma->rx_ring_tail;
1341 
1342     /* Step into RX ring to find if packets have been scheduled for 
1343      * reception.
1344      */
1345     if (!curr->pkt)
1346         return 0; /* No scheduled packets, thus no received, abort */
1347 
1348     /* There has been Packets scheduled ==> scheduled Packets may have been
1349      * received and needs to be collected into RECV List.
1350      *
1351      * A temporary list "lst" with all received packets is created.
1352      */
1353     lst.head = curr->pkt;
1354 
1355     /* Loop until first enabled "unrecveived" SpW Packet is found.
1356      * An unused descriptor is indicated by an unassigned pkt field.
1357      */
1358     while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_RXBD_EN)) {
1359         /* Handle one received Packet */
1360 
1361         /* Remember last handled Packet so that insertion/removal from
1362          * Packet lists go fast.
1363          */
1364         last_pkt = curr->pkt;
1365 
1366         /* Get Length of Packet in bytes, and reception options */
1367         last_pkt->dlen = (ctrl & GRSPW_RXBD_LEN) >> GRSPW_RXBD_LEN_BIT;
1368 
1369         /* Set flags to indicate error(s) and CRC information,
1370          * and Mark Received.
1371          */
1372         last_pkt->flags = (last_pkt->flags & ~RXPKT_FLAG_OUTPUT_MASK) |
1373                           ((ctrl >> 20) & RXPKT_FLAG_OUTPUT_MASK) |
1374                           RXPKT_FLAG_RX;
1375 
1376         /* Packet was Truncated? */
1377         if (ctrl & GRSPW_RXBD_TR)
1378             dma->stats.rx_err_trunk++;
1379 
1380         /* Error End-Of-Packet? */
1381         if (ctrl & GRSPW_RXBD_EP)
1382             dma->stats.rx_err_endpkt++;
1383         curr->pkt = NULL; /* Mark descriptor unused */
1384 
1385         /* Increment */
1386         curr = curr->next;
1387         recv_pkt_cnt++;
1388     }
1389 
1390     /* 1. Remove all handled packets from scheduled queue
1391      * 2. Put all handled packets into recv queue
1392      */
1393     if (recv_pkt_cnt > 0) {
1394 
1395         /* Update Stats, Number of Received Packets */
1396         dma->stats.rx_pkts += recv_pkt_cnt;
1397 
1398         /* Save RX ring posistion */
1399         dma->rx_ring_tail = curr;
1400 
1401         /* Prepare list for insertion/deleation */
1402         lst.tail = last_pkt;
1403 
1404         /* Remove received Packets from RX-SCHED queue */
1405         grspw_list_remove_head_list(&dma->rx_sched, &lst);
1406         dma->rx_sched_cnt -= recv_pkt_cnt;
1407         if (dma->stats.rx_sched_cnt_min > dma->rx_sched_cnt)
1408             dma->stats.rx_sched_cnt_min = dma->rx_sched_cnt;
1409 
1410         /* Insert received Packets into RECV queue */
1411         grspw_list_append_list(&dma->recv, &lst);
1412         dma->recv_cnt += recv_pkt_cnt;
1413         if (dma->stats.recv_cnt_max < dma->recv_cnt)
1414             dma->stats.recv_cnt_max = dma->recv_cnt;
1415     }
1416 
1417     return recv_pkt_cnt;
1418 }
1419 
1420 /* Try to populate descriptor ring with as many SEND packets as possible. The
1421  * packets assigned with to a descriptor are put in the end of 
1422  * the scheduled list.
1423  *
1424  * The number of Packets scheduled is returned.
1425  *
1426  *  - SEND List -> TX-SCHED List
1427  *  - Descriptors are initialized and enabled for transmission
1428  */
1429 STATIC int grspw_tx_schedule_send(struct grspw_dma_priv *dma)
1430 {
1431     int cnt;
1432     unsigned int ctrl, dmactrl;
1433     void *hwaddr;
1434     struct grspw_txring *curr_bd;
1435     struct grspw_pkt *curr_pkt, *last_pkt;
1436     struct grspw_list lst;
1437     SPIN_IRQFLAGS(irqflags);
1438 
1439     /* Is Ready Q empty? */
1440     if (grspw_list_is_empty(&dma->send))
1441         return 0;
1442 
1443     cnt = 0;
1444     lst.head = curr_pkt = dma->send.head;
1445     curr_bd = dma->tx_ring_head;
1446     while (!curr_bd->pkt) {
1447 
1448         /* Assign Packet to descriptor */
1449         curr_bd->pkt = curr_pkt;
1450 
1451         /* Set up header transmission */
1452         if (curr_pkt->hdr && curr_pkt->hlen) {
1453             hwaddr = curr_pkt->hdr;
1454             if (curr_pkt->flags & PKT_FLAG_TR_HDR) {
1455                 drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1456                          hwaddr, &hwaddr);
1457                 /* translation needed? */
1458                 if (curr_pkt->hdr == hwaddr)
1459                     curr_pkt->flags &= ~PKT_FLAG_TR_HDR;
1460             }
1461             BD_WRITE(&curr_bd->bd->haddr, hwaddr);
1462             ctrl = GRSPW_TXBD_EN |
1463                    (curr_pkt->hlen & GRSPW_TXBD_HLEN);
1464         } else {
1465             ctrl = GRSPW_TXBD_EN;
1466         }
1467         /* Enable IRQ generation and CRC options as specified
1468          * by user.
1469          */
1470         ctrl |= (curr_pkt->flags & TXPKT_FLAG_INPUT_MASK) << 8;
1471 
1472         if (curr_bd->next == dma->tx_ring_base) {
1473             /* Wrap around (only needed when smaller descriptor table) */
1474             ctrl |= GRSPW_TXBD_WR;
1475         }
1476 
1477         /* Is this Packet going to be an interrupt Packet? */
1478         if ((--dma->tx_irq_en_cnt_curr) <= 0) {
1479             if (dma->cfg.tx_irq_en_cnt == 0) {
1480                 /* IRQ is disabled.
1481                  * A big number to avoid equal to zero too often 
1482                  */
1483                 dma->tx_irq_en_cnt_curr = 0x3fffffff;
1484             } else {
1485                 dma->tx_irq_en_cnt_curr = dma->cfg.tx_irq_en_cnt;
1486                 ctrl |= GRSPW_TXBD_IE;
1487             }
1488         }
1489 
1490         /* Prepare descriptor address. Parts of CTRL is written to
1491          * DLEN for debug-only (CTRL is cleared by HW).
1492          */
1493         if (curr_pkt->data && curr_pkt->dlen) {
1494             hwaddr = curr_pkt->data;
1495             if (curr_pkt->flags & PKT_FLAG_TR_DATA) {
1496                 drvmgr_translate(dma->core->dev, CPUMEM_TO_DMA,
1497                          hwaddr, &hwaddr);
1498                 /* translation needed? */
1499                 if (curr_pkt->data == hwaddr)
1500                     curr_pkt->flags &= ~PKT_FLAG_TR_DATA;
1501             }
1502             BD_WRITE(&curr_bd->bd->daddr, hwaddr);
1503             BD_WRITE(&curr_bd->bd->dlen, curr_pkt->dlen |
1504                                          ((ctrl & 0x3f000) << 12));
1505         } else {
1506             BD_WRITE(&curr_bd->bd->daddr, 0);
1507             BD_WRITE(&curr_bd->bd->dlen, ((ctrl & 0x3f000) << 12));
1508         }
1509 
1510         /* Enable descriptor */
1511         BD_WRITE(&curr_bd->bd->ctrl, ctrl);
1512 
1513         last_pkt = curr_pkt;
1514         curr_bd = curr_bd->next;
1515         cnt++;
1516 
1517         /* Get Next Packet from Ready Queue */
1518         if (curr_pkt == dma->send.tail) {
1519             /* Handled all in ready queue. */
1520             curr_pkt = NULL;
1521             break;
1522         }
1523         curr_pkt = curr_pkt->next;
1524     }
1525 
1526     /* Have Packets been scheduled? */
1527     if (cnt > 0) {
1528         /* Prepare list for insertion/deleation */
1529         lst.tail = last_pkt;
1530 
1531         /* Remove scheduled packets from ready queue */
1532         grspw_list_remove_head_list(&dma->send, &lst);
1533         dma->send_cnt -= cnt;
1534         if (dma->stats.send_cnt_min > dma->send_cnt)
1535             dma->stats.send_cnt_min = dma->send_cnt;
1536 
1537         /* Insert scheduled packets into scheduled queue */
1538         grspw_list_append_list(&dma->tx_sched, &lst);
1539         dma->tx_sched_cnt += cnt;
1540         if (dma->stats.tx_sched_cnt_max < dma->tx_sched_cnt)
1541             dma->stats.tx_sched_cnt_max = dma->tx_sched_cnt;
1542 
1543         /* Update TX ring posistion */
1544         dma->tx_ring_head = curr_bd;
1545 
1546         /* Make hardware aware of the newly enabled descriptors */
1547         SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1548         dmactrl = REG_READ(&dma->regs->ctrl);
1549         dmactrl &= ~(GRSPW_DMACTRL_PS|GRSPW_DMACTRL_PR|GRSPW_DMA_STATUS_ERROR);
1550         dmactrl |= GRSPW_DMACTRL_TE;
1551         REG_WRITE(&dma->regs->ctrl, dmactrl);
1552         SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1553     }
1554     return cnt;
1555 }
1556 
1557 /* Scans the TX desciptor table for transmitted packets, and moves these 
1558  * packets from the head of the scheduled queue to the tail of the sent queue.
1559  *
1560  * Also, for all packets the status is updated.
1561  *
1562  *  - SCHED List -> SENT List
1563  *
1564  * Return Value
1565  * Number of packet moved
1566  */
1567 STATIC int grspw_tx_process_scheduled(struct grspw_dma_priv *dma)
1568 {
1569     struct grspw_txring *curr;
1570     struct grspw_pkt *last_pkt;
1571     int sent_pkt_cnt = 0;
1572     unsigned int ctrl;
1573     struct grspw_list lst;
1574 
1575     curr = dma->tx_ring_tail;
1576 
1577     /* Step into TX ring to find if packets have been scheduled for 
1578      * transmission.
1579      */
1580     if (!curr->pkt)
1581         return 0; /* No scheduled packets, thus no sent, abort */
1582 
1583     /* There has been Packets scheduled ==> scheduled Packets may have been
1584      * transmitted and needs to be collected into SENT List.
1585      *
1586      * A temporary list "lst" with all sent packets is created.
1587      */
1588     lst.head = curr->pkt;
1589 
1590     /* Loop until first enabled "un-transmitted" SpW Packet is found.
1591      * An unused descriptor is indicated by an unassigned pkt field.
1592      */
1593     while (curr->pkt && !((ctrl=BD_READ(&curr->bd->ctrl)) & GRSPW_TXBD_EN)) {
1594         /* Handle one sent Packet */
1595 
1596         /* Remember last handled Packet so that insertion/removal from
1597          * packet lists go fast.
1598          */
1599         last_pkt = curr->pkt;
1600 
1601         /* Set flags to indicate error(s) and Mark Sent.
1602          */
1603         last_pkt->flags = (last_pkt->flags & ~TXPKT_FLAG_OUTPUT_MASK) |
1604                     (ctrl & TXPKT_FLAG_LINKERR) |
1605                     TXPKT_FLAG_TX;
1606 
1607         /* Sent packet experienced link error? */
1608         if (ctrl & GRSPW_TXBD_LE)
1609             dma->stats.tx_err_link++;
1610 
1611         curr->pkt = NULL; /* Mark descriptor unused */
1612 
1613         /* Increment */
1614         curr = curr->next;
1615         sent_pkt_cnt++;
1616     }
1617 
1618     /* 1. Remove all handled packets from TX-SCHED queue
1619      * 2. Put all handled packets into SENT queue
1620      */
1621     if (sent_pkt_cnt > 0) {
1622         /* Update Stats, Number of Transmitted Packets */
1623         dma->stats.tx_pkts += sent_pkt_cnt;
1624 
1625         /* Save TX ring posistion */
1626         dma->tx_ring_tail = curr;
1627 
1628         /* Prepare list for insertion/deleation */
1629         lst.tail = last_pkt;
1630 
1631         /* Remove sent packets from TX-SCHED queue */
1632         grspw_list_remove_head_list(&dma->tx_sched, &lst);
1633         dma->tx_sched_cnt -= sent_pkt_cnt;
1634         if (dma->stats.tx_sched_cnt_min > dma->tx_sched_cnt)
1635             dma->stats.tx_sched_cnt_min = dma->tx_sched_cnt;
1636 
1637         /* Insert received packets into SENT queue */
1638         grspw_list_append_list(&dma->sent, &lst);
1639         dma->sent_cnt += sent_pkt_cnt;
1640         if (dma->stats.sent_cnt_max < dma->sent_cnt)
1641             dma->stats.sent_cnt_max = dma->sent_cnt;
1642     }
1643 
1644     return sent_pkt_cnt;
1645 }
1646 
1647 void *grspw_dma_open(void *d, int chan_no)
1648 {
1649     struct grspw_priv *priv = d;
1650     struct grspw_dma_priv *dma;
1651     int size;
1652 
1653     if ((chan_no < 0) || (priv->hwsup.ndma_chans <= chan_no))
1654         return NULL;
1655 
1656     dma = &priv->dma[chan_no];
1657 
1658     /* Take GRSPW lock */
1659     if (rtems_semaphore_obtain(grspw_sem, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1660         != RTEMS_SUCCESSFUL)
1661         return NULL;
1662 
1663     if (dma->open) {
1664         dma = NULL;
1665         goto out;
1666     }
1667 
1668     dma->started = 0;
1669 
1670     /* Set Default Configuration:
1671      *
1672      *  - MAX RX Packet Length = 
1673      *  - Disable IRQ generation
1674      *  -
1675      */
1676     dma->cfg.rxmaxlen = DEFAULT_RXMAX;
1677     dma->cfg.rx_irq_en_cnt = 0;
1678     dma->cfg.tx_irq_en_cnt = 0;
1679     dma->cfg.flags = DMAFLAG_NO_SPILL;
1680 
1681     /* set to NULL so that error exit works correctly */
1682     dma->sem_rxdma = RTEMS_ID_NONE;
1683     dma->sem_txdma = RTEMS_ID_NONE;
1684     dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1685     dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1686     dma->rx_ring_base = NULL;
1687 
1688     /* DMA Channel Semaphore created with count = 1 */
1689     if (rtems_semaphore_create(
1690         rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2), 1,
1691         RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1692         RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1693         RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_rxdma) != RTEMS_SUCCESSFUL) {
1694         dma->sem_rxdma = RTEMS_ID_NONE;
1695         goto err;
1696     }
1697     if (rtems_semaphore_create(
1698         rtems_build_name('S', 'D', '0' + priv->index, '0' + chan_no*2+1), 1,
1699         RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1700         RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1701         RTEMS_NO_PRIORITY_CEILING, 0, &dma->sem_txdma) != RTEMS_SUCCESSFUL) {
1702         dma->sem_txdma = RTEMS_ID_NONE;
1703         goto err;
1704     }
1705 
1706     /* Allocate memory for the two descriptor rings */
1707     size = sizeof(struct grspw_ring) * (GRSPW_RXBD_NR + GRSPW_TXBD_NR);
1708     dma->rx_ring_base = grlib_malloc(size);
1709     dma->tx_ring_base = (struct grspw_txring *)&dma->rx_ring_base[GRSPW_RXBD_NR];
1710     if (dma->rx_ring_base == NULL)
1711         goto err;
1712 
1713     /* Create DMA RX and TX Channel sempahore with count = 0 */
1714     if (rtems_semaphore_create(
1715         rtems_build_name('S', 'R', '0' + priv->index, '0' + chan_no), 0,
1716         RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1717         RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1718         RTEMS_NO_PRIORITY_CEILING, 0, &dma->rx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1719         dma->rx_wait.sem_wait = RTEMS_ID_NONE;
1720         goto err;
1721     }
1722     if (rtems_semaphore_create(
1723         rtems_build_name('S', 'T', '0' + priv->index, '0' + chan_no), 0,
1724         RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
1725         RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
1726         RTEMS_NO_PRIORITY_CEILING, 0, &dma->tx_wait.sem_wait) != RTEMS_SUCCESSFUL) {
1727         dma->tx_wait.sem_wait = RTEMS_ID_NONE;
1728         goto err;
1729     }
1730 
1731     /* Reset software structures */
1732     grspw_dma_reset(dma);
1733 
1734     /* Take the device */
1735     dma->open = 1;
1736 out:
1737     /* Return GRSPW Lock */
1738     rtems_semaphore_release(grspw_sem);
1739 
1740     return dma;
1741 
1742     /* initialization error happended */
1743 err:
1744     if (dma->sem_rxdma != RTEMS_ID_NONE)
1745         rtems_semaphore_delete(dma->sem_rxdma);
1746     if (dma->sem_txdma != RTEMS_ID_NONE)
1747         rtems_semaphore_delete(dma->sem_txdma);
1748     if (dma->rx_wait.sem_wait != RTEMS_ID_NONE)
1749         rtems_semaphore_delete(dma->rx_wait.sem_wait);
1750     if (dma->tx_wait.sem_wait != RTEMS_ID_NONE)
1751         rtems_semaphore_delete(dma->tx_wait.sem_wait);
1752     if (dma->rx_ring_base)
1753         free(dma->rx_ring_base);
1754     dma = NULL;
1755     goto out;
1756 }
1757 
1758 /* Initialize Software Structures:
1759  *  - Clear all Queues 
1760  *  - init BD ring 
1761  *  - init IRQ counter
1762  *  - clear statistics counters
1763  *  - init wait structures and semaphores
1764  */
1765 STATIC void grspw_dma_reset(struct grspw_dma_priv *dma)
1766 {
1767     /* Empty RX and TX queues */
1768     grspw_list_clr(&dma->ready);
1769     grspw_list_clr(&dma->rx_sched);
1770     grspw_list_clr(&dma->recv);
1771     grspw_list_clr(&dma->send);
1772     grspw_list_clr(&dma->tx_sched);
1773     grspw_list_clr(&dma->sent);
1774     dma->ready_cnt = 0;
1775     dma->rx_sched_cnt = 0;
1776     dma->recv_cnt = 0;
1777     dma->send_cnt = 0;
1778     dma->tx_sched_cnt = 0;
1779     dma->sent_cnt = 0;
1780 
1781     dma->rx_irq_en_cnt_curr = 0;
1782     dma->tx_irq_en_cnt_curr = 0;
1783 
1784     grspw_bdrings_init(dma);
1785 
1786     dma->rx_wait.waiting = 0;
1787     dma->tx_wait.waiting = 0;
1788 
1789     grspw_dma_stats_clr(dma);
1790 }
1791 
1792 int grspw_dma_close(void *c)
1793 {
1794     struct grspw_dma_priv *dma = c;
1795 
1796     if (!dma->open)
1797         return 0;
1798 
1799     /* Take device lock - Wait until we get semaphore */
1800     if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1801         != RTEMS_SUCCESSFUL)
1802         return -1;
1803     if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1804         != RTEMS_SUCCESSFUL) {
1805             rtems_semaphore_release(dma->sem_rxdma);
1806         return -1;
1807     }
1808 
1809     /* Can not close active DMA channel. User must stop DMA and make sure
1810      * no threads are active/blocked within driver.
1811      */
1812     if (dma->started || dma->rx_wait.waiting || dma->tx_wait.waiting) {
1813             rtems_semaphore_release(dma->sem_txdma);
1814         rtems_semaphore_release(dma->sem_rxdma);
1815         return 1;
1816     }
1817 
1818     /* Free resources */
1819     rtems_semaphore_delete(dma->rx_wait.sem_wait);
1820     rtems_semaphore_delete(dma->tx_wait.sem_wait);
1821     /* Release and delete lock. Operations requiring lock will fail */
1822     rtems_semaphore_delete(dma->sem_txdma);
1823     rtems_semaphore_delete(dma->sem_rxdma);
1824     dma->sem_txdma = RTEMS_ID_NONE;
1825     dma->sem_rxdma = RTEMS_ID_NONE;
1826 
1827     /* Free memory */
1828     if (dma->rx_ring_base)
1829         free(dma->rx_ring_base);
1830     dma->rx_ring_base = NULL;
1831     dma->tx_ring_base = NULL;
1832 
1833     dma->open = 0;
1834     return 0;
1835 }
1836 
1837 unsigned int grspw_dma_enable_int(void *c, int rxtx, int force)
1838 {
1839     struct grspw_dma_priv *dma = c;
1840     int rc = 0;
1841     unsigned int ctrl, ctrl_old;
1842     SPIN_IRQFLAGS(irqflags);
1843 
1844     SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
1845     if (dma->started == 0) {
1846         rc = 1; /* DMA stopped */
1847         goto out;
1848     }
1849     ctrl = REG_READ(&dma->regs->ctrl);
1850     ctrl_old = ctrl;
1851 
1852     /* Read/Write DMA error ? */
1853     if (ctrl & GRSPW_DMA_STATUS_ERROR) {
1854         rc = 2; /* DMA error */
1855         goto out;
1856     }
1857 
1858     /* DMA has finished a TX/RX packet and user wants work-task to
1859      * take care of DMA table processing.
1860      */
1861     ctrl &= ~GRSPW_DMACTRL_AT;
1862 
1863     if ((rxtx & 1) == 0)
1864         ctrl &= ~GRSPW_DMACTRL_PR;
1865     else if (force || ((dma->cfg.rx_irq_en_cnt != 0) ||
1866          (dma->cfg.flags & DMAFLAG2_RXIE)))
1867         ctrl |= GRSPW_DMACTRL_RI;
1868 
1869     if ((rxtx & 2) == 0)
1870         ctrl &= ~GRSPW_DMACTRL_PS;
1871     else if (force || ((dma->cfg.tx_irq_en_cnt != 0) ||
1872          (dma->cfg.flags & DMAFLAG2_TXIE)))
1873         ctrl |= GRSPW_DMACTRL_TI;
1874 
1875     REG_WRITE(&dma->regs->ctrl, ctrl);
1876     /* Re-enabled interrupts previously enabled */
1877     rc = ctrl_old & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS);
1878 out:
1879     SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
1880     return rc;
1881 }
1882 
1883 /* Schedule List of packets for transmission at some point in
1884  * future.
1885  *
1886  * 1. Move transmitted packets to SENT List (SCHED->SENT)
1887  * 2. Add the requested packets to the SEND List (USER->SEND)
1888  * 3. Schedule as many packets as possible (SEND->SCHED)
1889  */
1890 int grspw_dma_tx_send(void *c, int opts, struct grspw_list *pkts, int count)
1891 {
1892     struct grspw_dma_priv *dma = c;
1893     int ret;
1894 
1895     /* Take DMA channel lock */
1896     if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1897         != RTEMS_SUCCESSFUL)
1898         return -1;
1899 
1900     if (dma->started == 0) {
1901         ret = 1; /* signal DMA has been stopped */
1902         goto out;
1903     }
1904     ret = 0;
1905 
1906     /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1907     if ((opts & 1) == 0)
1908         grspw_tx_process_scheduled(dma);
1909 
1910     /* 2. Add the requested packets to the SEND List (USER->SEND) */
1911     if (pkts && (count > 0)) {
1912         grspw_list_append_list(&dma->send, pkts);
1913         dma->send_cnt += count;
1914         if (dma->stats.send_cnt_max < dma->send_cnt)
1915             dma->stats.send_cnt_max = dma->send_cnt;
1916     }
1917 
1918     /* 3. Schedule as many packets as possible (SEND->SCHED) */
1919     if ((opts & 2) == 0)
1920         grspw_tx_schedule_send(dma);
1921 
1922 out:
1923     /* Unlock DMA channel */
1924     rtems_semaphore_release(dma->sem_txdma);
1925 
1926     return ret;
1927 }
1928 
1929 int grspw_dma_tx_reclaim(void *c, int opts, struct grspw_list *pkts, int *count)
1930 {
1931     struct grspw_dma_priv *dma = c;
1932     struct grspw_pkt *pkt, *lastpkt;
1933     int cnt, started;
1934 
1935     /* Take DMA channel lock */
1936     if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1937         != RTEMS_SUCCESSFUL)
1938         return -1;
1939 
1940     /* 1. Move transmitted packets to SENT List (SCHED->SENT) */
1941     started = dma->started;
1942     if ((started > 0) && ((opts & 1) == 0))
1943         grspw_tx_process_scheduled(dma);
1944 
1945     /* Move all/count SENT packet to the callers list (SENT->USER) */
1946     if (pkts) {
1947         if ((count == NULL) || (*count == -1) ||
1948             (*count >= dma->sent_cnt)) {
1949             /* Move all SENT Packets */
1950             *pkts = dma->sent;
1951             grspw_list_clr(&dma->sent);
1952             if (count)
1953                 *count = dma->sent_cnt;
1954             dma->sent_cnt = 0;
1955         } else {
1956             /* Move a number of SENT Packets */
1957             pkts->head = pkt = lastpkt = dma->sent.head;
1958             cnt = 0;
1959             while (cnt < *count) {
1960                 lastpkt = pkt;
1961                 pkt = pkt->next;
1962                 cnt++;
1963             }
1964             if (cnt > 0) {
1965                 pkts->tail = lastpkt;
1966                 grspw_list_remove_head_list(&dma->sent, pkts);
1967                 dma->sent_cnt -= cnt;
1968             } else {
1969                 grspw_list_clr(pkts);
1970             }
1971         }
1972     } else if (count) {
1973         *count = 0;
1974     }
1975 
1976     /* 3. Schedule as many packets as possible (SEND->SCHED) */
1977     if ((started > 0) && ((opts & 2) == 0))
1978         grspw_tx_schedule_send(dma);
1979 
1980     /* Unlock DMA channel */
1981     rtems_semaphore_release(dma->sem_txdma);
1982 
1983     return (~started) & 1; /* signal DMA has been stopped */
1984 }
1985 
1986 void grspw_dma_tx_count(void *c, int *send, int *sched, int *sent, int *hw)
1987 {
1988     struct grspw_dma_priv *dma = c;
1989     int sched_cnt, diff;
1990     unsigned int hwbd;
1991     struct grspw_txbd *tailbd;
1992 
1993     /* Take device lock - Wait until we get semaphore.
1994      * The lock is taken so that the counters are in sync with each other
1995      * and that DMA descriptor table and tx_ring_tail is not being updated
1996      * during HW counter processing in this function.
1997      */
1998     if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
1999         != RTEMS_SUCCESSFUL)
2000         return;
2001 
2002     if (send)
2003         *send = dma->send_cnt;
2004     sched_cnt = dma->tx_sched_cnt;
2005     if (sched)
2006         *sched = sched_cnt;
2007     if (sent)
2008         *sent = dma->sent_cnt;
2009     if (hw) {
2010         /* Calculate number of descriptors (processed by HW) between
2011          * HW pointer and oldest SW pointer.
2012          */
2013         hwbd = REG_READ(&dma->regs->txdesc);
2014         tailbd = dma->tx_ring_tail->bd;
2015         diff = ((hwbd - (unsigned int)tailbd) / GRSPW_TXBD_SIZE) &
2016             (GRSPW_TXBD_NR - 1);
2017         /* Handle special case when HW and SW pointers are equal
2018          * because all TX descriptors have been processed by HW.
2019          */
2020         if ((diff == 0) && (sched_cnt == GRSPW_TXBD_NR) &&
2021             ((BD_READ(&tailbd->ctrl) & GRSPW_TXBD_EN) == 0)) {
2022             diff = GRSPW_TXBD_NR;
2023         }
2024         *hw = diff;
2025     }
2026 
2027     /* Unlock DMA channel */
2028     rtems_semaphore_release(dma->sem_txdma);
2029 }
2030 
2031 static inline int grspw_tx_wait_eval(struct grspw_dma_priv *dma)
2032 {
2033     int send_val, sent_val;
2034 
2035     if (dma->tx_wait.send_cnt >= (dma->send_cnt + dma->tx_sched_cnt))
2036         send_val = 1;
2037     else
2038         send_val = 0;
2039 
2040     if (dma->tx_wait.sent_cnt <= dma->sent_cnt)
2041         sent_val = 1;
2042     else
2043         sent_val = 0;
2044 
2045     /* AND or OR ? */
2046     if (dma->tx_wait.op == 0)
2047         return send_val & sent_val; /* AND */
2048     else
2049         return send_val | sent_val; /* OR */
2050 }
2051 
2052 /* Block until send_cnt or fewer packets are Queued in "Send and Scheduled" Q,
2053  * op (AND or OR), sent_cnt or more packet "have been sent" (Sent Q) condition
2054  * is met.
2055  * If a link error occurs and the Stop on Link error is defined, this function
2056  * will also return to caller.
2057  */
2058 int grspw_dma_tx_wait(void *c, int send_cnt, int op, int sent_cnt, int timeout)
2059 {
2060     struct grspw_dma_priv *dma = c;
2061     int ret, rc, initialized = 0;
2062 
2063     if (timeout == 0)
2064         timeout = RTEMS_NO_TIMEOUT;
2065 
2066 check_condition:
2067 
2068     /* Take DMA channel lock */
2069     if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2070         != RTEMS_SUCCESSFUL)
2071         return -1;
2072 
2073     /* Check so that no other thread is waiting, this driver only supports
2074      * one waiter at a time.
2075      */
2076     if (initialized == 0 && dma->tx_wait.waiting) {
2077         ret = 3;
2078         goto out_release;
2079     }
2080 
2081     /* Stop if link error or similar (DMA stopped), abort */
2082     if (dma->started == 0) {
2083         ret = 1;
2084         goto out_release;
2085     }
2086 
2087     /* Set up Condition */
2088     dma->tx_wait.send_cnt = send_cnt;
2089     dma->tx_wait.op = op;
2090     dma->tx_wait.sent_cnt = sent_cnt;
2091 
2092     if (grspw_tx_wait_eval(dma) == 0) {
2093         /* Prepare Wait */
2094         initialized = 1;
2095         dma->tx_wait.waiting = 1;
2096 
2097         /* Release DMA channel lock */
2098         rtems_semaphore_release(dma->sem_txdma);
2099 
2100         /* Try to take Wait lock, if this fail link may have gone down
2101          * or user stopped this DMA channel
2102          */
2103         rc = rtems_semaphore_obtain(dma->tx_wait.sem_wait, RTEMS_WAIT,
2104                         timeout);
2105         if (rc == RTEMS_TIMEOUT) {
2106             ret = 2;
2107             goto out;
2108         } else if (rc == RTEMS_UNSATISFIED ||
2109                    rc == RTEMS_OBJECT_WAS_DELETED) {
2110             ret = 1; /* sem was flushed/deleted, means DMA stop */
2111             goto out;
2112         } else if (rc != RTEMS_SUCCESSFUL) {
2113             /* Unknown Error */
2114             ret = -1;
2115             goto out;
2116         } else if (dma->started == 0) {
2117             ret = 1;
2118             goto out;
2119         }
2120 
2121         /* Check condition once more */
2122         goto check_condition;
2123     }
2124 
2125     ret = 0;
2126 
2127 out_release:
2128     /* Unlock DMA channel */
2129     rtems_semaphore_release(dma->sem_txdma);
2130 
2131 out:
2132     if (initialized)
2133         dma->tx_wait.waiting = 0;
2134     return ret;
2135 }
2136 
2137 int grspw_dma_rx_recv(void *c, int opts, struct grspw_list *pkts, int *count)
2138 {
2139     struct grspw_dma_priv *dma = c;
2140     struct grspw_pkt *pkt, *lastpkt;
2141     int cnt, started;
2142 
2143     /* Take DMA channel lock */
2144     if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2145         != RTEMS_SUCCESSFUL)
2146         return -1;
2147 
2148     /* 1. Move Scheduled packets to RECV List (SCHED->RECV) */
2149     started = dma->started;
2150     if (((opts & 1) == 0) && (started > 0))
2151         grspw_rx_process_scheduled(dma);
2152 
2153     /* Move all RECV packet to the callers list */
2154     if (pkts) {
2155         if ((count == NULL) || (*count == -1) ||
2156             (*count >= dma->recv_cnt)) {
2157             /* Move all Received packets */
2158             *pkts = dma->recv;
2159             grspw_list_clr(&dma->recv);
2160             if ( count )
2161                 *count = dma->recv_cnt;
2162             dma->recv_cnt = 0;
2163         } else {
2164             /* Move a number of RECV Packets */
2165             pkts->head = pkt = lastpkt = dma->recv.head;
2166             cnt = 0;
2167             while (cnt < *count) {
2168                 lastpkt = pkt;
2169                 pkt = pkt->next;
2170                 cnt++;
2171             }
2172             if (cnt > 0) {
2173                 pkts->tail = lastpkt;
2174                 grspw_list_remove_head_list(&dma->recv, pkts);
2175                 dma->recv_cnt -= cnt;
2176             } else {
2177                 grspw_list_clr(pkts);
2178             }
2179         }
2180     } else if (count) {
2181         *count = 0;
2182     }
2183 
2184     /* 3. Schedule as many free packet buffers as possible (READY->SCHED) */
2185     if (((opts & 2) == 0) && (started > 0))
2186         grspw_rx_schedule_ready(dma);
2187 
2188     /* Unlock DMA channel */
2189     rtems_semaphore_release(dma->sem_rxdma);
2190 
2191     return (~started) & 1;
2192 }
2193 
2194 int grspw_dma_rx_prepare(void *c, int opts, struct grspw_list *pkts, int count)
2195 {
2196     struct grspw_dma_priv *dma = c;
2197     int ret;
2198 
2199     /* Take DMA channel lock */
2200     if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2201         != RTEMS_SUCCESSFUL)
2202         return -1;
2203 
2204     if (dma->started == 0) {
2205         ret = 1;
2206         goto out;
2207     }
2208 
2209     /* 1. Move Received packets to RECV List (SCHED->RECV) */
2210     if ((opts & 1) == 0)
2211         grspw_rx_process_scheduled(dma);
2212 
2213     /* 2. Add the "free/ready" packet buffers to the READY List (USER->READY) */
2214     if (pkts && (count > 0)) {
2215         grspw_list_append_list(&dma->ready, pkts);
2216         dma->ready_cnt += count;
2217         if (dma->stats.ready_cnt_max < dma->ready_cnt)
2218             dma->stats.ready_cnt_max = dma->ready_cnt;
2219     }
2220 
2221     /* 3. Schedule as many packets as possible (READY->SCHED) */
2222     if ((opts & 2) == 0)
2223         grspw_rx_schedule_ready(dma);
2224 
2225     ret = 0;
2226 out:
2227     /* Unlock DMA channel */
2228     rtems_semaphore_release(dma->sem_rxdma);
2229 
2230     return ret;
2231 }
2232 
2233 void grspw_dma_rx_count(void *c, int *ready, int *sched, int *recv, int *hw)
2234 {
2235     struct grspw_dma_priv *dma = c;
2236     int sched_cnt, diff;
2237     unsigned int hwbd;
2238     struct grspw_rxbd *tailbd;
2239 
2240     /* Take device lock - Wait until we get semaphore.
2241      * The lock is taken so that the counters are in sync with each other
2242      * and that DMA descriptor table and rx_ring_tail is not being updated
2243      * during HW counter processing in this function.
2244      */
2245     if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2246         != RTEMS_SUCCESSFUL)
2247         return;
2248 
2249     if (ready)
2250         *ready = dma->ready_cnt;
2251     sched_cnt = dma->rx_sched_cnt;
2252     if (sched)
2253         *sched = sched_cnt;
2254     if (recv)
2255         *recv = dma->recv_cnt;
2256     if (hw) {
2257         /* Calculate number of descriptors (processed by HW) between
2258          * HW pointer and oldest SW pointer.
2259          */
2260         hwbd = REG_READ(&dma->regs->rxdesc);
2261         tailbd = dma->rx_ring_tail->bd;
2262         diff = ((hwbd - (unsigned int)tailbd) / GRSPW_RXBD_SIZE) &
2263             (GRSPW_RXBD_NR - 1);
2264         /* Handle special case when HW and SW pointers are equal
2265          * because all RX descriptors have been processed by HW.
2266          */
2267         if ((diff == 0) && (sched_cnt == GRSPW_RXBD_NR) &&
2268             ((BD_READ(&tailbd->ctrl) & GRSPW_RXBD_EN) == 0)) {
2269             diff = GRSPW_RXBD_NR;
2270         }
2271         *hw = diff;
2272     }
2273 
2274     /* Unlock DMA channel */
2275     rtems_semaphore_release(dma->sem_rxdma);
2276 }
2277 
2278 static inline int grspw_rx_wait_eval(struct grspw_dma_priv *dma)
2279 {
2280     int ready_val, recv_val;
2281 
2282     if (dma->rx_wait.ready_cnt >= (dma->ready_cnt + dma->rx_sched_cnt))
2283         ready_val = 1;
2284     else
2285         ready_val = 0;
2286 
2287     if (dma->rx_wait.recv_cnt <= dma->recv_cnt)
2288         recv_val = 1;
2289     else
2290         recv_val = 0;
2291 
2292     /* AND or OR ? */
2293     if (dma->rx_wait.op == 0)
2294         return ready_val & recv_val; /* AND */
2295     else
2296         return ready_val | recv_val; /* OR */
2297 }
2298 
2299 /* Block until recv_cnt or more packets are Queued in RECV Q, op (AND or OR), 
2300  * ready_cnt or fewer packet buffers are available in the "READY and Scheduled" Q,
2301  * condition is met.
2302  * If a link error occurs and the Stop on Link error is defined, this function
2303  * will also return to caller, however with an error.
2304  */
2305 int grspw_dma_rx_wait(void *c, int recv_cnt, int op, int ready_cnt, int timeout)
2306 {
2307     struct grspw_dma_priv *dma = c;
2308     int ret, rc, initialized = 0;
2309 
2310     if (timeout == 0)
2311         timeout = RTEMS_NO_TIMEOUT;
2312 
2313 check_condition:
2314 
2315     /* Take DMA channel lock */
2316     if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2317         != RTEMS_SUCCESSFUL)
2318         return -1;
2319 
2320     /* Check so that no other thread is waiting, this driver only supports
2321      * one waiter at a time.
2322      */
2323     if (initialized == 0 && dma->rx_wait.waiting) {
2324         ret = 3;
2325         goto out_release;
2326     }
2327 
2328     /* Stop if link error or similar (DMA stopped), abort */
2329     if (dma->started == 0) {
2330         ret = 1;
2331         goto out_release;
2332     }
2333 
2334     /* Set up Condition */
2335     dma->rx_wait.recv_cnt = recv_cnt;
2336     dma->rx_wait.op = op;
2337     dma->rx_wait.ready_cnt = ready_cnt;
2338 
2339     if (grspw_rx_wait_eval(dma) == 0) {
2340         /* Prepare Wait */
2341         initialized = 1;
2342         dma->rx_wait.waiting = 1;
2343 
2344         /* Release channel lock */
2345         rtems_semaphore_release(dma->sem_rxdma);
2346 
2347         /* Try to take Wait lock, if this fail link may have gone down
2348          * or user stopped this DMA channel
2349          */
2350         rc = rtems_semaphore_obtain(dma->rx_wait.sem_wait, RTEMS_WAIT,
2351                                    timeout);
2352         if (rc == RTEMS_TIMEOUT) {
2353                 ret = 2;
2354             goto out;
2355         } else if (rc == RTEMS_UNSATISFIED ||
2356                    rc == RTEMS_OBJECT_WAS_DELETED) {
2357             ret = 1; /* sem was flushed/deleted, means DMA stop */
2358             goto out;
2359         } else if (rc != RTEMS_SUCCESSFUL) {
2360                 /* Unknown Error */
2361             ret = -1;
2362             goto out;
2363         } else if (dma->started == 0) {
2364             ret = 1;
2365             goto out;
2366         }
2367 
2368         /* Check condition once more */
2369         goto check_condition;
2370     }
2371 
2372     ret = 0;
2373 
2374 out_release:
2375     /* Unlock DMA channel */
2376     rtems_semaphore_release(dma->sem_rxdma);
2377 
2378 out:
2379     if (initialized)
2380         dma->rx_wait.waiting = 0;
2381     return ret;
2382 }
2383 
2384 int grspw_dma_config(void *c, struct grspw_dma_config *cfg)
2385 {
2386     struct grspw_dma_priv *dma = c;
2387 
2388     if (dma->started || !cfg)
2389         return -1;
2390 
2391     if (cfg->flags & ~(DMAFLAG_MASK | DMAFLAG2_MASK))
2392         return -1;
2393 
2394     /* Update Configuration */
2395     memcpy(&dma->cfg, cfg, sizeof(*cfg));
2396 
2397     return 0;
2398 }
2399 
2400 void grspw_dma_config_read(void *c, struct grspw_dma_config *cfg)
2401 {
2402     struct grspw_dma_priv *dma = c;
2403 
2404     /* Copy Current Configuration */
2405     memcpy(cfg, &dma->cfg, sizeof(*cfg));
2406 }
2407 
2408 void grspw_dma_stats_read(void *c, struct grspw_dma_stats *sts)
2409 {
2410     struct grspw_dma_priv *dma = c;
2411 
2412     memcpy(sts, &dma->stats, sizeof(dma->stats));
2413 }
2414 
2415 void grspw_dma_stats_clr(void *c)
2416 {
2417     struct grspw_dma_priv *dma = c;
2418 
2419     /* Clear most of the statistics */  
2420     memset(&dma->stats, 0, sizeof(dma->stats));
2421 
2422     /* Init proper default values so that comparisons will work the
2423      * first time.
2424      */
2425     dma->stats.send_cnt_min = 0x3fffffff;
2426     dma->stats.tx_sched_cnt_min = 0x3fffffff;
2427     dma->stats.ready_cnt_min = 0x3fffffff;
2428     dma->stats.rx_sched_cnt_min = 0x3fffffff;
2429 }
2430 
2431 int grspw_dma_start(void *c)
2432 {
2433     struct grspw_dma_priv *dma = c;
2434     struct grspw_dma_regs *dregs = dma->regs;
2435     unsigned int ctrl;
2436     SPIN_IRQFLAGS(irqflags);
2437 
2438     if (dma->started)
2439         return 0;
2440 
2441     /* Initialize Software Structures:
2442      *  - Clear all Queues
2443      *  - init BD ring 
2444      *  - init IRQ counter
2445      *  - clear statistics counters
2446      *  - init wait structures and semaphores
2447      */
2448     grspw_dma_reset(dma);
2449 
2450     /* RX&RD and TX is not enabled until user fills SEND and READY Queue
2451      * with SpaceWire Packet buffers. So we do not have to worry about
2452      * IRQs for this channel just yet. However other DMA channels
2453      * may be active.
2454      *
2455      * Some functionality that is not changed during started mode is set up
2456      * once and for all here:
2457      *
2458      *   - RX MAX Packet length
2459      *   - TX Descriptor base address to first BD in TX ring (not enabled)
2460      *   - RX Descriptor base address to first BD in RX ring (not enabled)
2461      *   - IRQs (TX DMA, RX DMA, DMA ERROR)
2462      *   - Strip PID
2463      *   - Strip Address
2464      *   - No Spill
2465      *   - Receiver Enable
2466      *   - disable on link error (LE)
2467      *
2468      * Note that the address register and the address enable bit in DMACTRL
2469      * register must be left untouched, they are configured on a GRSPW
2470      * core level.
2471      *
2472      * Note that the receiver is enabled here, but since descriptors are
2473      * not enabled the GRSPW core may stop/pause RX (if NS bit set) until
2474      * descriptors are enabled or it may ignore RX packets (NS=0) until
2475      * descriptors are enabled (writing RD bit).
2476      */
2477     REG_WRITE(&dregs->txdesc, dma->tx_bds_hwa);
2478     REG_WRITE(&dregs->rxdesc, dma->rx_bds_hwa);
2479 
2480     /* MAX Packet length */
2481     REG_WRITE(&dma->regs->rxmax, dma->cfg.rxmaxlen);
2482 
2483     ctrl =  GRSPW_DMACTRL_AI | GRSPW_DMACTRL_PS | GRSPW_DMACTRL_PR |
2484         GRSPW_DMACTRL_TA | GRSPW_DMACTRL_RA | GRSPW_DMACTRL_RE |
2485         (dma->cfg.flags & DMAFLAG_MASK) << GRSPW_DMACTRL_NS_BIT;
2486     if (dma->core->dis_link_on_err & LINKOPTS_DIS_ONERR)
2487         ctrl |= GRSPW_DMACTRL_LE;
2488     if (dma->cfg.rx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_RXIE)
2489         ctrl |= GRSPW_DMACTRL_RI;
2490     if (dma->cfg.tx_irq_en_cnt != 0 || dma->cfg.flags & DMAFLAG2_TXIE)
2491         ctrl |= GRSPW_DMACTRL_TI;
2492     SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2493     ctrl |= REG_READ(&dma->regs->ctrl) & GRSPW_DMACTRL_EN;
2494     REG_WRITE(&dregs->ctrl, ctrl);
2495     SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2496 
2497     dma->started = 1; /* open up other DMA interfaces */
2498 
2499     return 0;
2500 }
2501 
2502 STATIC void grspw_dma_stop_locked(struct grspw_dma_priv *dma)
2503 {
2504     SPIN_IRQFLAGS(irqflags);
2505 
2506     if (dma->started == 0)
2507         return;
2508     dma->started = 0;
2509 
2510     SPIN_LOCK_IRQ(&dma->core->devlock, irqflags);
2511     grspw_hw_dma_stop(dma);
2512     SPIN_UNLOCK_IRQ(&dma->core->devlock, irqflags);
2513 
2514     /* From here no more packets will be sent, however
2515      * there may still exist scheduled packets that has been
2516      * sent, and packets in the SEND Queue waiting for free
2517      * descriptors. All packets are moved to the SENT Queue
2518      * so that the user may get its buffers back, the user
2519      * must look at the TXPKT_FLAG_TX in order to determine
2520      * if the packet was sent or not.
2521      */
2522 
2523     /* Retreive scheduled all sent packets */
2524     grspw_tx_process_scheduled(dma);
2525 
2526     /* Move un-sent packets in SEND and SCHED queue to the
2527      * SENT Queue. (never marked sent)
2528      */
2529     if (!grspw_list_is_empty(&dma->tx_sched)) {
2530         grspw_list_append_list(&dma->sent, &dma->tx_sched);
2531         grspw_list_clr(&dma->tx_sched);
2532         dma->sent_cnt += dma->tx_sched_cnt;
2533         dma->tx_sched_cnt = 0;
2534     }
2535     if (!grspw_list_is_empty(&dma->send)) {
2536         grspw_list_append_list(&dma->sent, &dma->send);
2537         grspw_list_clr(&dma->send);
2538         dma->sent_cnt += dma->send_cnt;
2539         dma->send_cnt = 0;
2540     }
2541 
2542     /* Similar for RX */
2543     grspw_rx_process_scheduled(dma);
2544     if (!grspw_list_is_empty(&dma->rx_sched)) {
2545         grspw_list_append_list(&dma->recv, &dma->rx_sched);
2546         grspw_list_clr(&dma->rx_sched);
2547         dma->recv_cnt += dma->rx_sched_cnt;
2548         dma->rx_sched_cnt = 0;
2549     }
2550     if (!grspw_list_is_empty(&dma->ready)) {
2551         grspw_list_append_list(&dma->recv, &dma->ready);
2552         grspw_list_clr(&dma->ready);
2553         dma->recv_cnt += dma->ready_cnt;
2554         dma->ready_cnt = 0;
2555     }
2556 
2557     /* Throw out blocked threads */
2558     rtems_semaphore_flush(dma->rx_wait.sem_wait);
2559     rtems_semaphore_flush(dma->tx_wait.sem_wait);
2560 }
2561 
2562 void grspw_dma_stop(void *c)
2563 {
2564     struct grspw_dma_priv *dma = c;
2565 
2566     /* If DMA channel is closed we should not access the semaphore */
2567     if (!dma->open)
2568         return;
2569 
2570     /* Take DMA Channel lock */
2571     if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2572         != RTEMS_SUCCESSFUL)
2573         return;
2574     if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2575         != RTEMS_SUCCESSFUL) {
2576         rtems_semaphore_release(dma->sem_rxdma);
2577         return;
2578     }
2579 
2580     grspw_dma_stop_locked(dma);
2581 
2582     rtems_semaphore_release(dma->sem_txdma);
2583     rtems_semaphore_release(dma->sem_rxdma);
2584 }
2585 
2586 /* Do general work, invoked indirectly from ISR */
2587 static void grspw_work_shutdown_func(struct grspw_priv *priv)
2588 {
2589     int i;
2590 
2591     /* Link is down for some reason, and the user has configured
2592      * that we stop all (open) DMA channels and throw out all their
2593      * blocked threads.
2594      */
2595     for (i=0; i<priv->hwsup.ndma_chans; i++)
2596         grspw_dma_stop(&priv->dma[i]);
2597     grspw_hw_stop(priv);
2598 }
2599 
2600 /* Do DMA work on one channel, invoked indirectly from ISR */
2601 static void grspw_work_dma_func(struct grspw_dma_priv *dma, unsigned int msg)
2602 {
2603     int tx_cond_true, rx_cond_true, rxtx;
2604 
2605     /* If DMA channel is closed we should not access the semaphore */
2606     if (dma->open == 0)
2607         return;
2608 
2609     dma->stats.irq_cnt++;
2610 
2611     /* Look at cause we were woken up and clear source */
2612     rxtx = 0;
2613     if (msg & WORK_DMA_RX_MASK)
2614         rxtx |= 1;
2615     if (msg & WORK_DMA_TX_MASK)
2616         rxtx |= 2;
2617     switch (grspw_dma_enable_int(dma, rxtx, 0)) {
2618     case 1:
2619         /* DMA stopped */
2620         return;
2621     case 2:
2622         /* DMA error -> Stop DMA channel (both RX and TX) */
2623         if (msg & WORK_DMA_ER_MASK) {
2624             /* DMA error and user wants work-task to handle error */
2625             grspw_dma_stop(dma);
2626             grspw_work_event(WORKTASK_EV_DMA_STOP, msg);
2627         }
2628         return;
2629     default:
2630         break;
2631     }
2632     if (msg == 0)
2633         return;
2634 
2635     rx_cond_true = 0;
2636     tx_cond_true = 0;
2637 
2638     if ((dma->cfg.flags & DMAFLAG2_IRQD_MASK) == DMAFLAG2_IRQD_BOTH) {
2639         /* In case both interrupt sources are disabled simultaneously
2640          * by the ISR the re-enabling of the interrupt source must also
2641          * do so to avoid missing interrupts. Both RX and TX process
2642          * will be forced.
2643          */
2644         msg |= WORK_DMA_RX_MASK | WORK_DMA_TX_MASK;
2645     }
2646 
2647     if (msg & WORK_DMA_RX_MASK) {
2648         /* Do RX Work */
2649 
2650         /* Take DMA channel RX lock */
2651         if (rtems_semaphore_obtain(dma->sem_rxdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2652             != RTEMS_SUCCESSFUL)
2653             return;
2654 
2655         dma->stats.rx_work_cnt++;
2656         grspw_rx_process_scheduled(dma);
2657         if (dma->started) {
2658             dma->stats.rx_work_enabled +=
2659                 grspw_rx_schedule_ready(dma);
2660             /* Check to see if condition for waking blocked
2661              * USER task is fullfilled.
2662              */
2663             if (dma->rx_wait.waiting)
2664                 rx_cond_true = grspw_rx_wait_eval(dma);
2665         }
2666         rtems_semaphore_release(dma->sem_rxdma);
2667     }
2668 
2669     if (msg & WORK_DMA_TX_MASK) {
2670         /* Do TX Work */
2671 
2672         /* Take DMA channel TX lock */
2673         if (rtems_semaphore_obtain(dma->sem_txdma, RTEMS_WAIT, RTEMS_NO_TIMEOUT)
2674             != RTEMS_SUCCESSFUL)
2675             return;
2676 
2677         dma->stats.tx_work_cnt++;
2678         grspw_tx_process_scheduled(dma);
2679         if (dma->started) {
2680             dma->stats.tx_work_enabled += 
2681                 grspw_tx_schedule_send(dma);
2682             /* Check to see if condition for waking blocked
2683              * USER task is fullfilled.
2684              */
2685             if (dma->tx_wait.waiting)
2686                 tx_cond_true = grspw_tx_wait_eval(dma);
2687         }
2688         rtems_semaphore_release(dma->sem_txdma);
2689     }
2690 
2691     if (rx_cond_true)
2692         rtems_semaphore_release(dma->rx_wait.sem_wait);
2693 
2694     if (tx_cond_true)
2695         rtems_semaphore_release(dma->tx_wait.sem_wait);
2696 }
2697 
2698 /* Work task is receiving work for the work message queue posted from
2699  * the ISR.
2700  */
2701 void grspw_work_func(rtems_id msgQ)
2702 {
2703     unsigned int message = 0, msg;
2704     size_t size;
2705     struct grspw_priv *priv;
2706     int i;
2707 
2708     /* Wait for ISR to schedule work */
2709     while (rtems_message_queue_receive(msgQ, &message, &size,
2710            RTEMS_WAIT, RTEMS_NO_TIMEOUT) == RTEMS_SUCCESSFUL) {
2711         if (message & WORK_QUIT_TASK)
2712             break;
2713 
2714         /* Handle work */
2715         priv = priv_tab[message >> WORK_CORE_BIT];
2716         if (message & WORK_SHUTDOWN) {
2717             grspw_work_shutdown_func(priv);
2718                 
2719             grspw_work_event(WORKTASK_EV_SHUTDOWN, message);
2720         } else if (message & WORK_DMA_MASK) {
2721             for (i = 0; i < priv->hwsup.ndma_chans; i++) {
2722                 msg = message &
2723                       (WORK_CORE_MASK | WORK_DMA_CHAN_MASK(i));
2724                 if (msg)
2725                     grspw_work_dma_func(&priv->dma[i], msg);
2726             }
2727         }
2728         message = 0;
2729     }
2730 
2731     if (message & WORK_FREE_MSGQ)
2732         rtems_message_queue_delete(msgQ);
2733 
2734     grspw_work_event(WORKTASK_EV_QUIT, message);
2735     rtems_task_exit();
2736 }
2737 
2738 STATIC void grspw_isr(void *data)
2739 {
2740     struct grspw_priv *priv = data;
2741     unsigned int dma_stat, stat, stat_clrmsk, ctrl, icctrl, timecode, irqs;
2742     unsigned int rxirq, rxack, intto;
2743     int i, handled = 0, call_user_int_isr;
2744     unsigned int message = WORK_NONE, dma_en;
2745     SPIN_ISR_IRQFLAGS(irqflags);
2746 
2747     /* Get Status from Hardware */
2748     stat = REG_READ(&priv->regs->status);
2749     stat_clrmsk = stat & (GRSPW_STS_TO | GRSPW_STAT_ERROR) &
2750             (GRSPW_STS_TO | priv->stscfg);
2751 
2752     /* Make sure to put the timecode handling first in order to get the
2753      * smallest possible interrupt latency
2754      */
2755     if ((stat & GRSPW_STS_TO) && (priv->tcisr != NULL)) {
2756         ctrl = REG_READ(&priv->regs->ctrl);
2757         if (ctrl & GRSPW_CTRL_TQ) {
2758             /* Timecode received. Let custom function handle this */
2759             timecode = REG_READ(&priv->regs->time) &
2760                     (GRSPW_TIME_CTRL | GRSPW_TIME_TCNT);
2761             (priv->tcisr)(priv->tcisr_arg, timecode);
2762         }
2763     }
2764 
2765     /* Get Interrupt status from hardware */
2766     icctrl = REG_READ(&priv->regs->icctrl);
2767     if ((icctrl & GRSPW_ICCTRL_IRQSRC_MASK) && (priv->icisr != NULL)) {
2768             call_user_int_isr = 0;
2769         rxirq = rxack = intto = 0;
2770 
2771         if ((icctrl & GRSPW_ICCTRL_IQ) &&
2772             (rxirq = REG_READ(&priv->regs->icrx)) != 0)
2773             call_user_int_isr = 1;
2774 
2775         if ((icctrl & GRSPW_ICCTRL_AQ) &&
2776             (rxack = REG_READ(&priv->regs->icack)) != 0)
2777             call_user_int_isr = 1;
2778 
2779         if ((icctrl & GRSPW_ICCTRL_TQ) &&
2780             (intto = REG_READ(&priv->regs->ictimeout)) != 0)
2781             call_user_int_isr = 1;          
2782 
2783         /* Let custom functions handle this POTENTIAL SPW interrupt. The
2784          * user function is called even if no such IRQ has happened!
2785          * User must make sure to clear all interrupts that have been
2786          * handled from the three registers by writing a one.
2787          */
2788         if (call_user_int_isr)
2789             priv->icisr(priv->icisr_arg, rxirq, rxack, intto);
2790     }
2791 
2792     /* An Error occured? */
2793     if (stat & GRSPW_STAT_ERROR) {
2794         /* Wake Global WorkQ */
2795         handled = 1;
2796 
2797         if (stat & GRSPW_STS_EE)
2798             priv->stats.err_eeop++;
2799 
2800         if (stat & GRSPW_STS_IA)
2801             priv->stats.err_addr++;
2802 
2803         if (stat & GRSPW_STS_PE)
2804             priv->stats.err_parity++;
2805 
2806         if (stat & GRSPW_STS_DE)
2807             priv->stats.err_disconnect++;
2808 
2809         if (stat & GRSPW_STS_ER)
2810             priv->stats.err_escape++;
2811 
2812         if (stat & GRSPW_STS_CE)
2813             priv->stats.err_credit++;
2814 
2815         if (stat & GRSPW_STS_WE)
2816             priv->stats.err_wsync++;
2817 
2818         if (((priv->dis_link_on_err >> 16) & stat) &&
2819             (REG_READ(&priv->regs->ctrl) & GRSPW_CTRL_IE)) {
2820             /* Disable the link, no more transfers are expected
2821              * on any DMA channel.
2822              */
2823             SPIN_LOCK(&priv->devlock, irqflags);
2824             ctrl = REG_READ(&priv->regs->ctrl);
2825             REG_WRITE(&priv->regs->ctrl, GRSPW_CTRL_LD |
2826                 (ctrl & ~(GRSPW_CTRL_IE|GRSPW_CTRL_LS)));
2827             SPIN_UNLOCK(&priv->devlock, irqflags);
2828             /* Signal to work-thread to stop DMA and clean up */
2829             message = WORK_SHUTDOWN;
2830         }
2831     }
2832 
2833     /* Clear Status Flags */
2834     if (stat_clrmsk) {
2835         handled = 1;
2836         REG_WRITE(&priv->regs->status, stat_clrmsk);
2837     }
2838 
2839     /* A DMA transfer or Error occured? In that case disable more IRQs
2840      * from the DMA channel, then invoke the workQ.
2841      *
2842      * Also the GI interrupt flag may not be available for older
2843      * designs where (was added together with mutiple DMA channels).
2844      */
2845     SPIN_LOCK(&priv->devlock, irqflags);
2846     for (i=0; i<priv->hwsup.ndma_chans; i++) {
2847         dma_stat = REG_READ(&priv->regs->dma[i].ctrl);
2848         /* Check for Errors and if Packets been sent or received if
2849          * respective IRQ are enabled
2850          */
2851         irqs = (((dma_stat << 3) & (GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS))
2852             | GRSPW_DMA_STATUS_ERROR) & dma_stat;
2853         if (!irqs)
2854             continue;
2855 
2856         handled = 1;
2857 
2858         /* DMA error has priority, if error happens it is assumed that
2859          * the common work-queue stops the DMA operation for that
2860          * channel and makes the DMA tasks exit from their waiting
2861          * functions (both RX and TX tasks).
2862          * 
2863          * Disable Further IRQs (until enabled again)
2864          * from this DMA channel. Let the status
2865          * bit remain so that they can be handled by
2866          * work function.
2867          */
2868         if (irqs & GRSPW_DMA_STATUS_ERROR) {
2869             REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat & 
2870                 ~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2871                   GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2872                   GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2873                   GRSPW_DMACTRL_AT));
2874             message |= WORK_DMA_ER(i);
2875         } else {
2876             /* determine if RX/TX interrupt source(s) shall remain
2877              * enabled.
2878              */
2879             if (priv->dma[i].cfg.flags & DMAFLAG2_IRQD_SRC) {
2880                 dma_en = ~irqs >> 3;
2881             } else {
2882                 dma_en = priv->dma[i].cfg.flags >>
2883                  (DMAFLAG2_IRQD_BIT - GRSPW_DMACTRL_TI_BIT);
2884             }
2885             dma_en &= (GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI);
2886             REG_WRITE(&priv->regs->dma[i].ctrl, dma_stat &
2887                 (~(GRSPW_DMACTRL_RI | GRSPW_DMACTRL_TI |
2888                    GRSPW_DMACTRL_PR | GRSPW_DMACTRL_PS |
2889                    GRSPW_DMACTRL_RA | GRSPW_DMACTRL_TA |
2890                    GRSPW_DMACTRL_AT) | dma_en));
2891             message |= WORK_DMA(i, irqs >> GRSPW_DMACTRL_PS_BIT);
2892         }
2893     }
2894     SPIN_UNLOCK(&priv->devlock, irqflags);
2895 
2896     if (handled != 0)
2897         priv->stats.irq_cnt++;
2898 
2899     /* Schedule work by sending message to work thread */
2900     if (message != WORK_NONE && priv->wc.msgisr) {
2901         int status;
2902         message |= WORK_CORE(priv->index);
2903         /* func interface compatible with msgQSend() on purpose, but
2904          * at the same time the user can assign a custom function to
2905          * handle DMA RX/TX operations as indicated by the "message"
2906          * and clear the handled bits before given to msgQSend().
2907          */
2908         status = priv->wc.msgisr(priv->wc.msgisr_arg, &message, 4);
2909         if (status != RTEMS_SUCCESSFUL) {
2910             printk("grspw_isr(%d): message fail %d (0x%x)\n",
2911                 priv->index, status, message);
2912         }
2913     }
2914 }
2915 
2916 STATIC void grspw_hw_dma_stop(struct grspw_dma_priv *dma)
2917 {
2918     unsigned int ctrl;
2919     struct grspw_dma_regs *dregs = dma->regs;
2920 
2921     ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN |
2922            GRSPW_DMACTRL_SP | GRSPW_DMACTRL_SA | GRSPW_DMACTRL_NS);
2923     ctrl |= GRSPW_DMACTRL_AT;
2924     REG_WRITE(&dregs->ctrl, ctrl);
2925 }
2926 
2927 STATIC void grspw_hw_dma_softreset(struct grspw_dma_priv *dma)
2928 {
2929     unsigned int ctrl;
2930     struct grspw_dma_regs *dregs = dma->regs;
2931 
2932     ctrl = REG_READ(&dregs->ctrl) & (GRSPW_DMACTRL_LE | GRSPW_DMACTRL_EN);
2933     REG_WRITE(&dregs->ctrl, ctrl);
2934 
2935     REG_WRITE(&dregs->rxmax, DEFAULT_RXMAX);
2936     REG_WRITE(&dregs->txdesc, 0);
2937     REG_WRITE(&dregs->rxdesc, 0);
2938 }
2939 
2940 /* Hardware Action:
2941  *  - stop DMA
2942  *  - do not bring down the link (RMAP may be active)
2943  *  - RMAP settings untouched (RMAP may be active)
2944  *  - port select untouched (RMAP may be active)
2945  *  - timecodes are disabled
2946  *  - IRQ generation disabled
2947  *  - status not cleared (let user analyze it if requested later on)
2948  *  - Node address / First DMA channels Node address
2949  *    is untouched (RMAP may be active)
2950  */
2951 STATIC void grspw_hw_stop(struct grspw_priv *priv)
2952 {
2953     int i;
2954     unsigned int ctrl;
2955     SPIN_IRQFLAGS(irqflags);
2956 
2957     SPIN_LOCK_IRQ(&priv->devlock, irqflags);
2958 
2959     for (i=0; i<priv->hwsup.ndma_chans; i++)
2960         grspw_hw_dma_stop(&priv->dma[i]);
2961 
2962     ctrl = REG_READ(&priv->regs->ctrl);
2963     REG_WRITE(&priv->regs->ctrl, ctrl & (
2964         GRSPW_CTRL_LD | GRSPW_CTRL_LS | GRSPW_CTRL_AS |
2965         GRSPW_CTRL_RE | GRSPW_CTRL_RD |
2966         GRSPW_CTRL_NP | GRSPW_CTRL_PS));
2967 
2968     SPIN_UNLOCK_IRQ(&priv->devlock, irqflags);
2969 }
2970 
2971 /* Soft reset of GRSPW core registers */
2972 STATIC void grspw_hw_softreset(struct grspw_priv *priv)
2973 {
2974     int i;
2975     unsigned int tmp;
2976 
2977     for (i=0; i<priv->hwsup.ndma_chans; i++)
2978         grspw_hw_dma_softreset(&priv->dma[i]);
2979 
2980     REG_WRITE(&priv->regs->status, 0xffffffff);
2981     REG_WRITE(&priv->regs->time, 0);
2982     /* Clear all but valuable reset values of ICCTRL */
2983     tmp = REG_READ(&priv->regs->icctrl);
2984     tmp &= GRSPW_ICCTRL_INUM | GRSPW_ICCTRL_BIRQ | GRSPW_ICCTRL_TXIRQ;
2985     tmp |= GRSPW_ICCTRL_ID;
2986     REG_WRITE(&priv->regs->icctrl, tmp);
2987     REG_WRITE(&priv->regs->icrx, 0xffffffff);
2988     REG_WRITE(&priv->regs->icack, 0xffffffff);
2989     REG_WRITE(&priv->regs->ictimeout, 0xffffffff);
2990 }
2991 
2992 int grspw_dev_count(void)
2993 {
2994     return grspw_count;
2995 }
2996 
2997 void grspw_initialize_user(void *(*devfound)(int), void (*devremove)(int,void*))
2998 {
2999     int i;
3000     struct grspw_priv *priv;
3001 
3002     /* Set new Device Found Handler */
3003     grspw_dev_add = devfound;
3004     grspw_dev_del = devremove;
3005 
3006     if (grspw_initialized == 1 && grspw_dev_add) {
3007         /* Call callback for every previously found device */
3008         for (i=0; i<grspw_count; i++) {
3009             priv = priv_tab[i];
3010             if (priv)
3011                 priv->data = grspw_dev_add(i);
3012         }
3013     }
3014 }
3015 
3016 /* Get a value at least 6.4us in number of clock cycles */
3017 static unsigned int grspw1_calc_timer64(int freq_khz)
3018 {
3019     unsigned int timer64 = (freq_khz * 64 + 9999) / 10000;
3020     return timer64 & 0xfff;
3021 }
3022 
3023 /* Get a value at least 850ns in number of clock cycles - 3 */
3024 static unsigned int grspw1_calc_discon(int freq_khz)
3025 {
3026     unsigned int discon = ((freq_khz * 85 + 99999) / 100000) - 3;
3027     return discon & 0x3ff;
3028 }
3029 
3030 /******************* Driver manager interface ***********************/
3031 
3032 /* Driver prototypes */
3033 static int grspw_common_init(void);
3034 static int grspw2_init3(struct drvmgr_dev *dev);
3035 
3036 static struct drvmgr_drv_ops grspw2_ops =
3037 {
3038     .init = {NULL,  NULL, grspw2_init3, NULL},
3039     .remove = NULL,
3040     .info = NULL
3041 };
3042 
3043 static struct amba_dev_id grspw2_ids[] =
3044 {
3045     {VENDOR_GAISLER, GAISLER_SPW}, /* not yet supported */
3046     {VENDOR_GAISLER, GAISLER_SPW2},
3047     {VENDOR_GAISLER, GAISLER_SPW2_DMA},
3048     {0, 0}      /* Mark end of table */
3049 };
3050 
3051 static struct amba_drv_info grspw2_drv_info =
3052 {
3053     {
3054         DRVMGR_OBJ_DRV,         /* Driver */
3055         NULL,               /* Next driver */
3056         NULL,               /* Device list */
3057         DRIVER_AMBAPP_GAISLER_GRSPW2_ID,/* Driver ID */
3058         "GRSPW_PKT_DRV",        /* Driver Name */
3059         DRVMGR_BUS_TYPE_AMBAPP,     /* Bus Type */
3060         &grspw2_ops,
3061         NULL,               /* Funcs */
3062         0,              /* No devices yet */
3063         sizeof(struct grspw_priv),  /* Let DrvMgr alloc priv */
3064     },
3065     &grspw2_ids[0]
3066 };
3067 
3068 void grspw2_register_drv (void)
3069 {
3070     GRSPW_DBG("Registering GRSPW2 packet driver\n");
3071     drvmgr_drv_register(&grspw2_drv_info.general);
3072 }
3073 
3074 static int grspw2_init3(struct drvmgr_dev *dev)
3075 {
3076     struct grspw_priv *priv;
3077     struct amba_dev_info *ambadev;
3078     struct ambapp_core *pnpinfo;
3079     int i;
3080     unsigned int ctrl, icctrl, numi;
3081     union drvmgr_key_value *value;
3082 
3083     GRSPW_DBG("GRSPW[%d] on bus %s\n", dev->minor_drv,
3084         dev->parent->dev->name);
3085 
3086     if (grspw_count >= GRSPW_MAX)
3087         return DRVMGR_ENORES;
3088 
3089     priv = dev->priv;
3090     if (priv == NULL)
3091         return DRVMGR_NOMEM;
3092     priv->dev = dev;
3093 
3094     /* If first device init common part of driver */
3095     if (grspw_common_init())
3096         return DRVMGR_FAIL;
3097 
3098     /*** Now we take care of device initialization ***/
3099 
3100     /* Get device information from AMBA PnP information */
3101     ambadev = (struct amba_dev_info *)dev->businfo;
3102     if (ambadev == NULL)
3103         return -1;
3104     pnpinfo = &ambadev->info;
3105     priv->irq = pnpinfo->irq;
3106     priv->regs = (struct grspw_regs *)pnpinfo->apb_slv->start;
3107 
3108     /* Read Hardware Support from Control Register */
3109     ctrl = REG_READ(&priv->regs->ctrl);
3110     priv->hwsup.rmap = (ctrl & GRSPW_CTRL_RA) >> GRSPW_CTRL_RA_BIT;
3111     priv->hwsup.rmap_crc = (ctrl & GRSPW_CTRL_RC) >> GRSPW_CTRL_RC_BIT;
3112     priv->hwsup.ccsds_crc = (ctrl & GRSPW_CTRL_CC) >> GRSPW_CTRL_CC_BIT;
3113     priv->hwsup.rx_unalign = (ctrl & GRSPW_CTRL_RX) >> GRSPW_CTRL_RX_BIT;
3114     priv->hwsup.nports = 1 + ((ctrl & GRSPW_CTRL_PO) >> GRSPW_CTRL_PO_BIT);
3115     priv->hwsup.ndma_chans = 1 + ((ctrl & GRSPW_CTRL_NCH) >> GRSPW_CTRL_NCH_BIT);
3116     priv->hwsup.irq = ((ctrl & GRSPW_CTRL_ID) >> GRSPW_CTRL_ID_BIT);
3117     icctrl = REG_READ(&priv->regs->icctrl);
3118     numi = (icctrl & GRSPW_ICCTRL_NUMI) >> GRSPW_ICCTRL_NUMI_BIT;
3119     if (numi > 0)
3120         priv->hwsup.irq_num = 1 << (numi - 1);
3121     else 
3122         priv->hwsup.irq_num = 0;
3123 
3124     /* Construct hardware version identification */
3125     priv->hwsup.hw_version = pnpinfo->device << 16 | pnpinfo->apb_slv->common.ver;
3126 
3127     if ((pnpinfo->device == GAISLER_SPW2) ||
3128         (pnpinfo->device == GAISLER_SPW2_DMA)) {
3129         priv->hwsup.strip_adr = 1; /* All GRSPW2 can strip Address */
3130         priv->hwsup.strip_pid = 1; /* All GRSPW2 can strip PID */
3131     } else {
3132         unsigned int apb_hz, apb_khz;
3133 
3134         /* Autodetect GRSPW1 features? */
3135         priv->hwsup.strip_adr = 0;
3136         priv->hwsup.strip_pid = 0;
3137 
3138         drvmgr_freq_get(dev, DEV_APB_SLV, &apb_hz);
3139         apb_khz = apb_hz / 1000;
3140 
3141         REG_WRITE(&priv->regs->timer,
3142             ((grspw1_calc_discon(apb_khz) & 0x3FF) << 12) |
3143             (grspw1_calc_timer64(apb_khz) & 0xFFF));
3144     }
3145 
3146     /* Probe width of SpaceWire Interrupt ISR timers. All have the same
3147      * width... so only the first is probed, if no timer result will be
3148      * zero.
3149      */
3150     REG_WRITE(&priv->regs->icrlpresc, 0x7fffffff);
3151     ctrl = REG_READ(&priv->regs->icrlpresc);
3152     REG_WRITE(&priv->regs->icrlpresc, 0);
3153     priv->hwsup.itmr_width = 0;
3154     while (ctrl & 1) {
3155         priv->hwsup.itmr_width++;
3156         ctrl = ctrl >> 1;
3157     }
3158 
3159     /* Let user limit the number of DMA channels on this core to save
3160      * space. Only the first nDMA channels will be available.
3161      */
3162     value = drvmgr_dev_key_get(priv->dev, "nDMA", DRVMGR_KT_INT);
3163     if (value && (value->i < priv->hwsup.ndma_chans))
3164         priv->hwsup.ndma_chans = value->i;
3165 
3166     /* Allocate and init Memory for all DMA channels */
3167     priv->dma = grlib_calloc(priv->hwsup.ndma_chans, sizeof(*priv->dma));
3168     if (priv->dma == NULL)
3169         return DRVMGR_NOMEM;
3170     for (i=0; i<priv->hwsup.ndma_chans; i++) {
3171         priv->dma[i].core = priv;
3172         priv->dma[i].index = i;
3173         priv->dma[i].regs = &priv->regs->dma[i];
3174     }
3175 
3176     /* Startup Action:
3177      *  - stop DMA
3178      *  - do not bring down the link (RMAP may be active)
3179      *  - RMAP settings untouched (RMAP may be active)
3180      *  - port select untouched (RMAP may be active)
3181      *  - timecodes are diabled
3182      *  - IRQ generation disabled
3183      *  - status cleared
3184      *  - Node address / First DMA channels Node address
3185      *    is untouched (RMAP may be active)
3186      */
3187     grspw_hw_stop(priv);
3188     grspw_hw_softreset(priv);
3189 
3190     /* Register character device in registered region */
3191     priv->index = grspw_count;
3192     priv_tab[priv->index] = priv;
3193     grspw_count++;
3194 
3195     /* Device name */
3196     sprintf(priv->devname, "grspw%d", priv->index);
3197 
3198     /* Tell above layer about new device */
3199     if (grspw_dev_add)
3200         priv->data = grspw_dev_add(priv->index);
3201 
3202     return DRVMGR_OK;
3203 }
3204 
3205 /******************* Driver Implementation ***********************/
3206 /* Creates a MsgQ (optional) and spawns a worker task associated with the
3207  * message Q. The task can also be associated with a custom msgQ if *msgQ.
3208  * is non-zero.
3209  */
3210 rtems_id grspw_work_spawn(int prio, int stack, rtems_id *pMsgQ, int msgMax)
3211 {
3212     rtems_id tid;
3213     int created_msgq = 0;
3214     static char work_name = 'A';
3215 
3216     if (pMsgQ == NULL)
3217         return OBJECTS_ID_NONE;
3218 
3219     if (*pMsgQ == OBJECTS_ID_NONE) {
3220         if (msgMax <= 0)
3221             msgMax = 32;
3222 
3223         if (rtems_message_queue_create(
3224             rtems_build_name('S', 'G', 'Q', work_name),
3225             msgMax, 4, RTEMS_FIFO, pMsgQ) !=
3226             RTEMS_SUCCESSFUL)
3227             return OBJECTS_ID_NONE;
3228         created_msgq = 1;
3229     }
3230 
3231     if (prio < 0)
3232         prio = grspw_work_task_priority; /* default prio */
3233     if (stack < 0x800)
3234         stack = RTEMS_MINIMUM_STACK_SIZE; /* default stack size */
3235 
3236     if (rtems_task_create(rtems_build_name('S', 'G', 'T', work_name),
3237         prio, stack, RTEMS_PREEMPT | RTEMS_NO_ASR,
3238         RTEMS_NO_FLOATING_POINT, &tid) != RTEMS_SUCCESSFUL)
3239         tid = OBJECTS_ID_NONE;
3240     else if (rtems_task_start(tid, (rtems_task_entry)grspw_work_func, *pMsgQ) !=
3241             RTEMS_SUCCESSFUL) {
3242         rtems_task_delete(tid);
3243         tid = OBJECTS_ID_NONE;
3244     }
3245 
3246     if (tid == OBJECTS_ID_NONE && created_msgq) {
3247         rtems_message_queue_delete(*pMsgQ);
3248         *pMsgQ = OBJECTS_ID_NONE;
3249     } else {
3250         if (++work_name > 'Z')
3251             work_name = 'A';
3252     }
3253     return tid;
3254 }
3255 
3256 /* Free task associated with message queue and optionally also the message
3257  * queue itself. The message queue is deleted by the work task and is therefore
3258  * delayed until it the work task resumes its execution.
3259  */
3260 rtems_status_code grspw_work_free(rtems_id msgQ, int freeMsgQ)
3261 {
3262     int msg = WORK_QUIT_TASK;
3263     if (freeMsgQ)
3264         msg |= WORK_FREE_MSGQ;
3265     return rtems_message_queue_send(msgQ, &msg, 4);
3266 }
3267 
3268 void grspw_work_cfg(void *d, struct grspw_work_config *wc)
3269 {
3270     struct grspw_priv *priv = (struct grspw_priv *)d;
3271 
3272     if (wc == NULL)
3273         wc = &grspw_wc_def; /* use default config */
3274     priv->wc = *wc;
3275 }
3276 
3277 #ifdef RTEMS_SMP
3278 int grspw_isr_affinity(void *d, const cpu_set_t *cpus)
3279 {
3280     return -1; /* BSP support only static configured IRQ affinity */
3281 }
3282 #endif
3283 
3284 static int grspw_common_init(void)
3285 {
3286     if (grspw_initialized == 1)
3287         return 0;
3288     if (grspw_initialized == -1)
3289         return -1;
3290     grspw_initialized = -1;
3291 
3292     /* Device Semaphore created with count = 1 */
3293     if (rtems_semaphore_create(rtems_build_name('S', 'G', 'L', 'S'), 1,
3294         RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
3295         RTEMS_NO_INHERIT_PRIORITY | RTEMS_LOCAL | \
3296         RTEMS_NO_PRIORITY_CEILING, 0, &grspw_sem) != RTEMS_SUCCESSFUL)
3297         return -1;
3298 
3299     /* Work queue, Work thread. Not created if user disables it.
3300      * user can disable it when interrupt is not used to save resources
3301      */
3302     if (grspw_work_task_priority != -1) {
3303         grspw_work_task = grspw_work_spawn(-1, 0,
3304             (rtems_id *)&grspw_wc_def.msgisr_arg, 0);
3305         if (grspw_work_task == OBJECTS_ID_NONE)
3306             return -2;
3307         grspw_wc_def.msgisr =
3308             (grspw_msgqisr_t) rtems_message_queue_send;
3309     } else {
3310         grspw_wc_def.msgisr = NULL;
3311         grspw_wc_def.msgisr_arg = NULL;
3312     }
3313 
3314     grspw_initialized = 1;
3315     return 0;
3316 }