Commit | Line | Data |
---|---|---|
ecba38ab | 1 | /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. |
1da177e4 LT |
2 | * Once again I am out to prove that every ethernet |
3 | * controller out there can be most efficiently programmed | |
4 | * if you make it look like a LANCE. | |
5 | * | |
ecba38ab | 6 | * Copyright (C) 1996, 1999, 2003, 2006 David S. Miller (davem@davemloft.net) |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 LT |
9 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/fcntl.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/ioport.h> | |
16 | #include <linux/in.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/crc32.h> | |
22 | #include <linux/netdevice.h> | |
23 | #include <linux/etherdevice.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/ethtool.h> | |
26 | #include <linux/bitops.h> | |
27 | ||
28 | #include <asm/system.h> | |
29 | #include <asm/io.h> | |
30 | #include <asm/dma.h> | |
31 | #include <asm/byteorder.h> | |
32 | #include <asm/idprom.h> | |
33 | #include <asm/sbus.h> | |
34 | #include <asm/openprom.h> | |
35 | #include <asm/oplib.h> | |
36 | #include <asm/auxio.h> | |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/irq.h> | |
39 | ||
40 | #include "sunqe.h" | |
41 | ||
10158286 | 42 | #define DRV_NAME "sunqe" |
ecba38ab DM |
43 | #define DRV_VERSION "4.0" |
44 | #define DRV_RELDATE "June 23, 2006" | |
45 | #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" | |
10158286 TC |
46 | |
47 | static char version[] = | |
48 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | |
49 | ||
50 | MODULE_VERSION(DRV_VERSION); | |
51 | MODULE_AUTHOR(DRV_AUTHOR); | |
52 | MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); | |
53 | MODULE_LICENSE("GPL"); | |
54 | ||
1da177e4 LT |
55 | static struct sunqec *root_qec_dev; |
56 | ||
57 | static void qe_set_multicast(struct net_device *dev); | |
58 | ||
59 | #define QEC_RESET_TRIES 200 | |
60 | ||
61 | static inline int qec_global_reset(void __iomem *gregs) | |
62 | { | |
63 | int tries = QEC_RESET_TRIES; | |
64 | ||
65 | sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); | |
66 | while (--tries) { | |
67 | u32 tmp = sbus_readl(gregs + GLOB_CTRL); | |
68 | if (tmp & GLOB_CTRL_RESET) { | |
69 | udelay(20); | |
70 | continue; | |
71 | } | |
72 | break; | |
73 | } | |
74 | if (tries) | |
75 | return 0; | |
76 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); | |
77 | return -1; | |
78 | } | |
79 | ||
80 | #define MACE_RESET_RETRIES 200 | |
81 | #define QE_RESET_RETRIES 200 | |
82 | ||
83 | static inline int qe_stop(struct sunqe *qep) | |
84 | { | |
85 | void __iomem *cregs = qep->qcregs; | |
86 | void __iomem *mregs = qep->mregs; | |
87 | int tries; | |
88 | ||
89 | /* Reset the MACE, then the QEC channel. */ | |
90 | sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); | |
91 | tries = MACE_RESET_RETRIES; | |
92 | while (--tries) { | |
93 | u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); | |
94 | if (tmp & MREGS_BCONFIG_RESET) { | |
95 | udelay(20); | |
96 | continue; | |
97 | } | |
98 | break; | |
99 | } | |
100 | if (!tries) { | |
101 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); | |
102 | return -1; | |
103 | } | |
104 | ||
105 | sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); | |
106 | tries = QE_RESET_RETRIES; | |
107 | while (--tries) { | |
108 | u32 tmp = sbus_readl(cregs + CREG_CTRL); | |
109 | if (tmp & CREG_CTRL_RESET) { | |
110 | udelay(20); | |
111 | continue; | |
112 | } | |
113 | break; | |
114 | } | |
115 | if (!tries) { | |
116 | printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); | |
117 | return -1; | |
118 | } | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static void qe_init_rings(struct sunqe *qep) | |
123 | { | |
124 | struct qe_init_block *qb = qep->qe_block; | |
125 | struct sunqe_buffers *qbufs = qep->buffers; | |
126 | __u32 qbufs_dvma = qep->buffers_dvma; | |
127 | int i; | |
128 | ||
129 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; | |
130 | memset(qb, 0, sizeof(struct qe_init_block)); | |
131 | memset(qbufs, 0, sizeof(struct sunqe_buffers)); | |
132 | for (i = 0; i < RX_RING_SIZE; i++) { | |
133 | qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); | |
134 | qb->qe_rxd[i].rx_flags = | |
135 | (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | |
136 | } | |
137 | } | |
138 | ||
139 | static int qe_init(struct sunqe *qep, int from_irq) | |
140 | { | |
141 | struct sunqec *qecp = qep->parent; | |
142 | void __iomem *cregs = qep->qcregs; | |
143 | void __iomem *mregs = qep->mregs; | |
144 | void __iomem *gregs = qecp->gregs; | |
145 | unsigned char *e = &qep->dev->dev_addr[0]; | |
146 | u32 tmp; | |
147 | int i; | |
148 | ||
149 | /* Shut it up. */ | |
150 | if (qe_stop(qep)) | |
151 | return -EAGAIN; | |
152 | ||
153 | /* Setup initial rx/tx init block pointers. */ | |
154 | sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); | |
155 | sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); | |
156 | ||
157 | /* Enable/mask the various irq's. */ | |
158 | sbus_writel(0, cregs + CREG_RIMASK); | |
159 | sbus_writel(1, cregs + CREG_TIMASK); | |
160 | ||
161 | sbus_writel(0, cregs + CREG_QMASK); | |
162 | sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); | |
163 | ||
164 | /* Setup the FIFO pointers into QEC local memory. */ | |
165 | tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); | |
166 | sbus_writel(tmp, cregs + CREG_RXRBUFPTR); | |
167 | sbus_writel(tmp, cregs + CREG_RXWBUFPTR); | |
168 | ||
169 | tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + | |
170 | sbus_readl(gregs + GLOB_RSIZE); | |
171 | sbus_writel(tmp, cregs + CREG_TXRBUFPTR); | |
172 | sbus_writel(tmp, cregs + CREG_TXWBUFPTR); | |
173 | ||
174 | /* Clear the channel collision counter. */ | |
175 | sbus_writel(0, cregs + CREG_CCNT); | |
176 | ||
177 | /* For 10baseT, inter frame space nor throttle seems to be necessary. */ | |
178 | sbus_writel(0, cregs + CREG_PIPG); | |
179 | ||
180 | /* Now dork with the AMD MACE. */ | |
181 | sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); | |
182 | sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); | |
183 | sbus_writeb(0, mregs + MREGS_RXFCNTL); | |
184 | ||
185 | /* The QEC dma's the rx'd packets from local memory out to main memory, | |
186 | * and therefore it interrupts when the packet reception is "complete". | |
187 | * So don't listen for the MACE talking about it. | |
188 | */ | |
189 | sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); | |
190 | sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); | |
191 | sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | | |
192 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), | |
193 | mregs + MREGS_FCONFIG); | |
194 | ||
195 | /* Only usable interface on QuadEther is twisted pair. */ | |
196 | sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); | |
197 | ||
198 | /* Tell MACE we are changing the ether address. */ | |
199 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, | |
200 | mregs + MREGS_IACONFIG); | |
201 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
202 | barrier(); | |
203 | sbus_writeb(e[0], mregs + MREGS_ETHADDR); | |
204 | sbus_writeb(e[1], mregs + MREGS_ETHADDR); | |
205 | sbus_writeb(e[2], mregs + MREGS_ETHADDR); | |
206 | sbus_writeb(e[3], mregs + MREGS_ETHADDR); | |
207 | sbus_writeb(e[4], mregs + MREGS_ETHADDR); | |
208 | sbus_writeb(e[5], mregs + MREGS_ETHADDR); | |
209 | ||
210 | /* Clear out the address filter. */ | |
211 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
212 | mregs + MREGS_IACONFIG); | |
213 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
214 | barrier(); | |
215 | for (i = 0; i < 8; i++) | |
216 | sbus_writeb(0, mregs + MREGS_FILTER); | |
217 | ||
218 | /* Address changes are now complete. */ | |
219 | sbus_writeb(0, mregs + MREGS_IACONFIG); | |
220 | ||
221 | qe_init_rings(qep); | |
222 | ||
223 | /* Wait a little bit for the link to come up... */ | |
224 | mdelay(5); | |
225 | if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { | |
226 | int tries = 50; | |
227 | ||
228 | while (tries--) { | |
229 | u8 tmp; | |
230 | ||
231 | mdelay(5); | |
232 | barrier(); | |
233 | tmp = sbus_readb(mregs + MREGS_PHYCONFIG); | |
234 | if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) | |
235 | break; | |
236 | } | |
237 | if (tries == 0) | |
238 | printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); | |
239 | } | |
240 | ||
241 | /* Missed packet counter is cleared on a read. */ | |
242 | sbus_readb(mregs + MREGS_MPCNT); | |
243 | ||
244 | /* Reload multicast information, this will enable the receiver | |
245 | * and transmitter. | |
246 | */ | |
247 | qe_set_multicast(qep->dev); | |
248 | ||
249 | /* QEC should now start to show interrupts. */ | |
250 | return 0; | |
251 | } | |
252 | ||
253 | /* Grrr, certain error conditions completely lock up the AMD MACE, | |
254 | * so when we get these we _must_ reset the chip. | |
255 | */ | |
256 | static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |
257 | { | |
258 | struct net_device *dev = qep->dev; | |
259 | int mace_hwbug_workaround = 0; | |
260 | ||
261 | if (qe_status & CREG_STAT_EDEFER) { | |
262 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); | |
09f75cd7 | 263 | dev->stats.tx_errors++; |
1da177e4 LT |
264 | } |
265 | ||
266 | if (qe_status & CREG_STAT_CLOSS) { | |
267 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); | |
09f75cd7 JG |
268 | dev->stats.tx_errors++; |
269 | dev->stats.tx_carrier_errors++; | |
1da177e4 LT |
270 | } |
271 | ||
272 | if (qe_status & CREG_STAT_ERETRIES) { | |
273 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); | |
09f75cd7 | 274 | dev->stats.tx_errors++; |
1da177e4 LT |
275 | mace_hwbug_workaround = 1; |
276 | } | |
277 | ||
278 | if (qe_status & CREG_STAT_LCOLL) { | |
279 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); | |
09f75cd7 JG |
280 | dev->stats.tx_errors++; |
281 | dev->stats.collisions++; | |
1da177e4 LT |
282 | mace_hwbug_workaround = 1; |
283 | } | |
284 | ||
285 | if (qe_status & CREG_STAT_FUFLOW) { | |
286 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); | |
09f75cd7 | 287 | dev->stats.tx_errors++; |
1da177e4 LT |
288 | mace_hwbug_workaround = 1; |
289 | } | |
290 | ||
291 | if (qe_status & CREG_STAT_JERROR) { | |
292 | printk(KERN_ERR "%s: Jabber error.\n", dev->name); | |
293 | } | |
294 | ||
295 | if (qe_status & CREG_STAT_BERROR) { | |
296 | printk(KERN_ERR "%s: Babble error.\n", dev->name); | |
297 | } | |
298 | ||
299 | if (qe_status & CREG_STAT_CCOFLOW) { | |
09f75cd7 JG |
300 | dev->stats.tx_errors += 256; |
301 | dev->stats.collisions += 256; | |
1da177e4 LT |
302 | } |
303 | ||
304 | if (qe_status & CREG_STAT_TXDERROR) { | |
305 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); | |
09f75cd7 JG |
306 | dev->stats.tx_errors++; |
307 | dev->stats.tx_aborted_errors++; | |
1da177e4 LT |
308 | mace_hwbug_workaround = 1; |
309 | } | |
310 | ||
311 | if (qe_status & CREG_STAT_TXLERR) { | |
312 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); | |
09f75cd7 | 313 | dev->stats.tx_errors++; |
1da177e4 LT |
314 | mace_hwbug_workaround = 1; |
315 | } | |
316 | ||
317 | if (qe_status & CREG_STAT_TXPERR) { | |
318 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); | |
09f75cd7 JG |
319 | dev->stats.tx_errors++; |
320 | dev->stats.tx_aborted_errors++; | |
1da177e4 LT |
321 | mace_hwbug_workaround = 1; |
322 | } | |
323 | ||
324 | if (qe_status & CREG_STAT_TXSERR) { | |
325 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); | |
09f75cd7 JG |
326 | dev->stats.tx_errors++; |
327 | dev->stats.tx_aborted_errors++; | |
1da177e4 LT |
328 | mace_hwbug_workaround = 1; |
329 | } | |
330 | ||
331 | if (qe_status & CREG_STAT_RCCOFLOW) { | |
09f75cd7 JG |
332 | dev->stats.rx_errors += 256; |
333 | dev->stats.collisions += 256; | |
1da177e4 LT |
334 | } |
335 | ||
336 | if (qe_status & CREG_STAT_RUOFLOW) { | |
09f75cd7 JG |
337 | dev->stats.rx_errors += 256; |
338 | dev->stats.rx_over_errors += 256; | |
1da177e4 LT |
339 | } |
340 | ||
341 | if (qe_status & CREG_STAT_MCOFLOW) { | |
09f75cd7 JG |
342 | dev->stats.rx_errors += 256; |
343 | dev->stats.rx_missed_errors += 256; | |
1da177e4 LT |
344 | } |
345 | ||
346 | if (qe_status & CREG_STAT_RXFOFLOW) { | |
347 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); | |
09f75cd7 JG |
348 | dev->stats.rx_errors++; |
349 | dev->stats.rx_over_errors++; | |
1da177e4 LT |
350 | } |
351 | ||
352 | if (qe_status & CREG_STAT_RLCOLL) { | |
353 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); | |
09f75cd7 JG |
354 | dev->stats.rx_errors++; |
355 | dev->stats.collisions++; | |
1da177e4 LT |
356 | } |
357 | ||
358 | if (qe_status & CREG_STAT_FCOFLOW) { | |
09f75cd7 JG |
359 | dev->stats.rx_errors += 256; |
360 | dev->stats.rx_frame_errors += 256; | |
1da177e4 LT |
361 | } |
362 | ||
363 | if (qe_status & CREG_STAT_CECOFLOW) { | |
09f75cd7 JG |
364 | dev->stats.rx_errors += 256; |
365 | dev->stats.rx_crc_errors += 256; | |
1da177e4 LT |
366 | } |
367 | ||
368 | if (qe_status & CREG_STAT_RXDROP) { | |
369 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); | |
09f75cd7 JG |
370 | dev->stats.rx_errors++; |
371 | dev->stats.rx_dropped++; | |
372 | dev->stats.rx_missed_errors++; | |
1da177e4 LT |
373 | } |
374 | ||
375 | if (qe_status & CREG_STAT_RXSMALL) { | |
376 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); | |
09f75cd7 JG |
377 | dev->stats.rx_errors++; |
378 | dev->stats.rx_length_errors++; | |
1da177e4 LT |
379 | } |
380 | ||
381 | if (qe_status & CREG_STAT_RXLERR) { | |
382 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); | |
09f75cd7 | 383 | dev->stats.rx_errors++; |
1da177e4 LT |
384 | mace_hwbug_workaround = 1; |
385 | } | |
386 | ||
387 | if (qe_status & CREG_STAT_RXPERR) { | |
388 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); | |
09f75cd7 JG |
389 | dev->stats.rx_errors++; |
390 | dev->stats.rx_missed_errors++; | |
1da177e4 LT |
391 | mace_hwbug_workaround = 1; |
392 | } | |
393 | ||
394 | if (qe_status & CREG_STAT_RXSERR) { | |
395 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); | |
09f75cd7 JG |
396 | dev->stats.rx_errors++; |
397 | dev->stats.rx_missed_errors++; | |
1da177e4 LT |
398 | mace_hwbug_workaround = 1; |
399 | } | |
400 | ||
401 | if (mace_hwbug_workaround) | |
402 | qe_init(qep, 1); | |
403 | return mace_hwbug_workaround; | |
404 | } | |
405 | ||
406 | /* Per-QE receive interrupt service routine. Just like on the happy meal | |
407 | * we receive directly into skb's with a small packet copy water mark. | |
408 | */ | |
409 | static void qe_rx(struct sunqe *qep) | |
410 | { | |
411 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; | |
09f75cd7 | 412 | struct net_device *dev = qep->dev; |
1da177e4 LT |
413 | struct qe_rxd *this; |
414 | struct sunqe_buffers *qbufs = qep->buffers; | |
415 | __u32 qbufs_dvma = qep->buffers_dvma; | |
416 | int elem = qep->rx_new, drops = 0; | |
417 | u32 flags; | |
418 | ||
419 | this = &rxbase[elem]; | |
420 | while (!((flags = this->rx_flags) & RXD_OWN)) { | |
421 | struct sk_buff *skb; | |
422 | unsigned char *this_qbuf = | |
423 | &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; | |
424 | __u32 this_qbuf_dvma = qbufs_dvma + | |
425 | qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); | |
426 | struct qe_rxd *end_rxd = | |
427 | &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; | |
428 | int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ | |
429 | ||
430 | /* Check for errors. */ | |
431 | if (len < ETH_ZLEN) { | |
09f75cd7 JG |
432 | dev->stats.rx_errors++; |
433 | dev->stats.rx_length_errors++; | |
434 | dev->stats.rx_dropped++; | |
1da177e4 LT |
435 | } else { |
436 | skb = dev_alloc_skb(len + 2); | |
437 | if (skb == NULL) { | |
438 | drops++; | |
09f75cd7 | 439 | dev->stats.rx_dropped++; |
1da177e4 | 440 | } else { |
1da177e4 LT |
441 | skb_reserve(skb, 2); |
442 | skb_put(skb, len); | |
8c7b7faa DM |
443 | skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, |
444 | len); | |
1da177e4 LT |
445 | skb->protocol = eth_type_trans(skb, qep->dev); |
446 | netif_rx(skb); | |
447 | qep->dev->last_rx = jiffies; | |
09f75cd7 JG |
448 | dev->stats.rx_packets++; |
449 | dev->stats.rx_bytes += len; | |
1da177e4 LT |
450 | } |
451 | } | |
452 | end_rxd->rx_addr = this_qbuf_dvma; | |
453 | end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); | |
6aa20a22 | 454 | |
1da177e4 LT |
455 | elem = NEXT_RX(elem); |
456 | this = &rxbase[elem]; | |
457 | } | |
458 | qep->rx_new = elem; | |
459 | if (drops) | |
460 | printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); | |
461 | } | |
462 | ||
463 | static void qe_tx_reclaim(struct sunqe *qep); | |
464 | ||
465 | /* Interrupts for all QE's get filtered out via the QEC master controller, | |
466 | * so we just run through each qe and check to see who is signaling | |
467 | * and thus needs to be serviced. | |
468 | */ | |
7d12e780 | 469 | static irqreturn_t qec_interrupt(int irq, void *dev_id) |
1da177e4 | 470 | { |
c31f28e7 | 471 | struct sunqec *qecp = dev_id; |
1da177e4 LT |
472 | u32 qec_status; |
473 | int channel = 0; | |
474 | ||
475 | /* Latch the status now. */ | |
476 | qec_status = sbus_readl(qecp->gregs + GLOB_STAT); | |
477 | while (channel < 4) { | |
478 | if (qec_status & 0xf) { | |
479 | struct sunqe *qep = qecp->qes[channel]; | |
480 | u32 qe_status; | |
481 | ||
482 | qe_status = sbus_readl(qep->qcregs + CREG_STAT); | |
483 | if (qe_status & CREG_STAT_ERRORS) { | |
484 | if (qe_is_bolixed(qep, qe_status)) | |
485 | goto next; | |
486 | } | |
487 | if (qe_status & CREG_STAT_RXIRQ) | |
488 | qe_rx(qep); | |
489 | if (netif_queue_stopped(qep->dev) && | |
490 | (qe_status & CREG_STAT_TXIRQ)) { | |
491 | spin_lock(&qep->lock); | |
492 | qe_tx_reclaim(qep); | |
493 | if (TX_BUFFS_AVAIL(qep) > 0) { | |
494 | /* Wake net queue and return to | |
495 | * lazy tx reclaim. | |
496 | */ | |
497 | netif_wake_queue(qep->dev); | |
498 | sbus_writel(1, qep->qcregs + CREG_TIMASK); | |
499 | } | |
500 | spin_unlock(&qep->lock); | |
501 | } | |
502 | next: | |
503 | ; | |
504 | } | |
505 | qec_status >>= 4; | |
506 | channel++; | |
507 | } | |
508 | ||
509 | return IRQ_HANDLED; | |
510 | } | |
511 | ||
512 | static int qe_open(struct net_device *dev) | |
513 | { | |
514 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
515 | ||
516 | qep->mconfig = (MREGS_MCONFIG_TXENAB | | |
517 | MREGS_MCONFIG_RXENAB | | |
518 | MREGS_MCONFIG_MBAENAB); | |
519 | return qe_init(qep, 0); | |
520 | } | |
521 | ||
522 | static int qe_close(struct net_device *dev) | |
523 | { | |
524 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
525 | ||
526 | qe_stop(qep); | |
527 | return 0; | |
528 | } | |
529 | ||
530 | /* Reclaim TX'd frames from the ring. This must always run under | |
531 | * the IRQ protected qep->lock. | |
532 | */ | |
533 | static void qe_tx_reclaim(struct sunqe *qep) | |
534 | { | |
535 | struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; | |
536 | int elem = qep->tx_old; | |
537 | ||
538 | while (elem != qep->tx_new) { | |
539 | u32 flags = txbase[elem].tx_flags; | |
540 | ||
541 | if (flags & TXD_OWN) | |
542 | break; | |
543 | elem = NEXT_TX(elem); | |
544 | } | |
545 | qep->tx_old = elem; | |
546 | } | |
547 | ||
548 | static void qe_tx_timeout(struct net_device *dev) | |
549 | { | |
550 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
551 | int tx_full; | |
552 | ||
553 | spin_lock_irq(&qep->lock); | |
554 | ||
555 | /* Try to reclaim, if that frees up some tx | |
556 | * entries, we're fine. | |
557 | */ | |
558 | qe_tx_reclaim(qep); | |
559 | tx_full = TX_BUFFS_AVAIL(qep) <= 0; | |
560 | ||
561 | spin_unlock_irq(&qep->lock); | |
562 | ||
563 | if (! tx_full) | |
564 | goto out; | |
565 | ||
566 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | |
567 | qe_init(qep, 1); | |
568 | ||
569 | out: | |
570 | netif_wake_queue(dev); | |
571 | } | |
572 | ||
573 | /* Get a packet queued to go onto the wire. */ | |
574 | static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
575 | { | |
576 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
577 | struct sunqe_buffers *qbufs = qep->buffers; | |
578 | __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; | |
579 | unsigned char *txbuf; | |
580 | int len, entry; | |
581 | ||
582 | spin_lock_irq(&qep->lock); | |
583 | ||
584 | qe_tx_reclaim(qep); | |
585 | ||
586 | len = skb->len; | |
587 | entry = qep->tx_new; | |
588 | ||
589 | txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; | |
590 | txbuf_dvma = qbufs_dvma + | |
591 | qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); | |
592 | ||
593 | /* Avoid a race... */ | |
594 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; | |
595 | ||
d626f62b | 596 | skb_copy_from_linear_data(skb, txbuf, len); |
1da177e4 LT |
597 | |
598 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; | |
599 | qep->qe_block->qe_txd[entry].tx_flags = | |
600 | (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); | |
601 | qep->tx_new = NEXT_TX(entry); | |
602 | ||
603 | /* Get it going. */ | |
604 | dev->trans_start = jiffies; | |
605 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); | |
606 | ||
09f75cd7 JG |
607 | dev->stats.tx_packets++; |
608 | dev->stats.tx_bytes += len; | |
1da177e4 LT |
609 | |
610 | if (TX_BUFFS_AVAIL(qep) <= 0) { | |
611 | /* Halt the net queue and enable tx interrupts. | |
612 | * When the tx queue empties the tx irq handler | |
613 | * will wake up the queue and return us back to | |
614 | * the lazy tx reclaim scheme. | |
615 | */ | |
616 | netif_stop_queue(dev); | |
617 | sbus_writel(0, qep->qcregs + CREG_TIMASK); | |
618 | } | |
619 | spin_unlock_irq(&qep->lock); | |
620 | ||
621 | dev_kfree_skb(skb); | |
622 | ||
623 | return 0; | |
624 | } | |
625 | ||
1da177e4 LT |
626 | static void qe_set_multicast(struct net_device *dev) |
627 | { | |
628 | struct sunqe *qep = (struct sunqe *) dev->priv; | |
629 | struct dev_mc_list *dmi = dev->mc_list; | |
630 | u8 new_mconfig = qep->mconfig; | |
631 | char *addrs; | |
632 | int i; | |
633 | u32 crc; | |
634 | ||
635 | /* Lock out others. */ | |
636 | netif_stop_queue(dev); | |
637 | ||
638 | if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { | |
639 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
640 | qep->mregs + MREGS_IACONFIG); | |
641 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
642 | barrier(); | |
643 | for (i = 0; i < 8; i++) | |
644 | sbus_writeb(0xff, qep->mregs + MREGS_FILTER); | |
645 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | |
646 | } else if (dev->flags & IFF_PROMISC) { | |
647 | new_mconfig |= MREGS_MCONFIG_PROMISC; | |
648 | } else { | |
649 | u16 hash_table[4]; | |
650 | u8 *hbytes = (unsigned char *) &hash_table[0]; | |
651 | ||
652 | for (i = 0; i < 4; i++) | |
653 | hash_table[i] = 0; | |
654 | ||
655 | for (i = 0; i < dev->mc_count; i++) { | |
656 | addrs = dmi->dmi_addr; | |
657 | dmi = dmi->next; | |
658 | ||
659 | if (!(*addrs & 1)) | |
660 | continue; | |
661 | crc = ether_crc_le(6, addrs); | |
662 | crc >>= 26; | |
663 | hash_table[crc >> 4] |= 1 << (crc & 0xf); | |
664 | } | |
665 | /* Program the qe with the new filter value. */ | |
666 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, | |
667 | qep->mregs + MREGS_IACONFIG); | |
668 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) | |
669 | barrier(); | |
670 | for (i = 0; i < 8; i++) { | |
671 | u8 tmp = *hbytes++; | |
672 | sbus_writeb(tmp, qep->mregs + MREGS_FILTER); | |
673 | } | |
674 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); | |
675 | } | |
676 | ||
677 | /* Any change of the logical address filter, the physical address, | |
678 | * or enabling/disabling promiscuous mode causes the MACE to disable | |
679 | * the receiver. So we must re-enable them here or else the MACE | |
680 | * refuses to listen to anything on the network. Sheesh, took | |
681 | * me a day or two to find this bug. | |
682 | */ | |
683 | qep->mconfig = new_mconfig; | |
684 | sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); | |
685 | ||
686 | /* Let us get going again. */ | |
687 | netif_wake_queue(dev); | |
688 | } | |
689 | ||
690 | /* Ethtool support... */ | |
691 | static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
692 | { | |
693 | struct sunqe *qep = dev->priv; | |
694 | ||
695 | strcpy(info->driver, "sunqe"); | |
696 | strcpy(info->version, "3.0"); | |
697 | sprintf(info->bus_info, "SBUS:%d", | |
698 | qep->qe_sdev->slot); | |
699 | } | |
700 | ||
701 | static u32 qe_get_link(struct net_device *dev) | |
702 | { | |
703 | struct sunqe *qep = dev->priv; | |
704 | void __iomem *mregs = qep->mregs; | |
705 | u8 phyconfig; | |
706 | ||
707 | spin_lock_irq(&qep->lock); | |
708 | phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); | |
709 | spin_unlock_irq(&qep->lock); | |
710 | ||
711 | return (phyconfig & MREGS_PHYCONFIG_LSTAT); | |
712 | } | |
713 | ||
7282d491 | 714 | static const struct ethtool_ops qe_ethtool_ops = { |
1da177e4 LT |
715 | .get_drvinfo = qe_get_drvinfo, |
716 | .get_link = qe_get_link, | |
717 | }; | |
718 | ||
719 | /* This is only called once at boot time for each card probed. */ | |
720 | static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev) | |
721 | { | |
722 | u8 bsizes = qecp->qec_bursts; | |
723 | ||
724 | if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) { | |
725 | sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); | |
726 | } else if (bsizes & DMA_BURST32) { | |
727 | sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); | |
728 | } else { | |
729 | sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); | |
730 | } | |
731 | ||
732 | /* Packetsize only used in 100baseT BigMAC configurations, | |
733 | * set it to zero just to be on the safe side. | |
734 | */ | |
735 | sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); | |
736 | ||
737 | /* Set the local memsize register, divided up to one piece per QE channel. */ | |
738 | sbus_writel((qsdev->reg_addrs[1].reg_size >> 2), | |
739 | qecp->gregs + GLOB_MSIZE); | |
740 | ||
741 | /* Divide up the local QEC memory amongst the 4 QE receiver and | |
742 | * transmitter FIFOs. Basically it is (total / 2 / num_channels). | |
743 | */ | |
744 | sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1, | |
745 | qecp->gregs + GLOB_TSIZE); | |
746 | sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1, | |
747 | qecp->gregs + GLOB_RSIZE); | |
748 | } | |
749 | ||
3edd76ca | 750 | static u8 __devinit qec_get_burst(struct device_node *dp) |
1da177e4 | 751 | { |
1da177e4 | 752 | u8 bsizes, bsizes_more; |
1da177e4 | 753 | |
ecba38ab DM |
754 | /* Find and set the burst sizes for the QEC, since it |
755 | * does the actual dma for all 4 channels. | |
756 | */ | |
757 | bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); | |
758 | bsizes &= 0xff; | |
759 | bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); | |
1da177e4 | 760 | |
ecba38ab DM |
761 | if (bsizes_more != 0xff) |
762 | bsizes &= bsizes_more; | |
763 | if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || | |
764 | (bsizes & DMA_BURST32)==0) | |
765 | bsizes = (DMA_BURST32 - 1); | |
1da177e4 | 766 | |
ecba38ab DM |
767 | return bsizes; |
768 | } | |
1da177e4 | 769 | |
3edd76ca | 770 | static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev) |
ecba38ab DM |
771 | { |
772 | struct sbus_dev *qec_sdev = child_sdev->parent; | |
773 | struct sunqec *qecp; | |
1da177e4 | 774 | |
ecba38ab DM |
775 | for (qecp = root_qec_dev; qecp; qecp = qecp->next_module) { |
776 | if (qecp->qec_sdev == qec_sdev) | |
777 | break; | |
1da177e4 | 778 | } |
ecba38ab DM |
779 | if (!qecp) { |
780 | qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); | |
781 | if (qecp) { | |
782 | u32 ctrl; | |
783 | ||
784 | qecp->qec_sdev = qec_sdev; | |
785 | qecp->gregs = sbus_ioremap(&qec_sdev->resource[0], 0, | |
786 | GLOB_REG_SIZE, | |
787 | "QEC Global Registers"); | |
788 | if (!qecp->gregs) | |
789 | goto fail; | |
790 | ||
791 | /* Make sure the QEC is in MACE mode. */ | |
792 | ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); | |
793 | ctrl &= 0xf0000000; | |
794 | if (ctrl != GLOB_CTRL_MMODE) { | |
795 | printk(KERN_ERR "qec: Not in MACE mode!\n"); | |
796 | goto fail; | |
797 | } | |
1da177e4 | 798 | |
ecba38ab DM |
799 | if (qec_global_reset(qecp->gregs)) |
800 | goto fail; | |
1da177e4 | 801 | |
ecba38ab | 802 | qecp->qec_bursts = qec_get_burst(qec_sdev->ofdev.node); |
1da177e4 | 803 | |
ecba38ab | 804 | qec_init_once(qecp, qec_sdev); |
1da177e4 | 805 | |
ecba38ab | 806 | if (request_irq(qec_sdev->irqs[0], &qec_interrupt, |
1fb9df5d | 807 | IRQF_SHARED, "qec", (void *) qecp)) { |
ecba38ab DM |
808 | printk(KERN_ERR "qec: Can't register irq.\n"); |
809 | goto fail; | |
810 | } | |
1da177e4 | 811 | |
ecba38ab DM |
812 | qecp->next_module = root_qec_dev; |
813 | root_qec_dev = qecp; | |
814 | } | |
1da177e4 LT |
815 | } |
816 | ||
ecba38ab | 817 | return qecp; |
1da177e4 | 818 | |
ecba38ab DM |
819 | fail: |
820 | if (qecp->gregs) | |
821 | sbus_iounmap(qecp->gregs, GLOB_REG_SIZE); | |
822 | kfree(qecp); | |
823 | return NULL; | |
824 | } | |
1da177e4 | 825 | |
3edd76ca | 826 | static int __devinit qec_ether_init(struct sbus_dev *sdev) |
ecba38ab DM |
827 | { |
828 | static unsigned version_printed; | |
829 | struct net_device *dev; | |
830 | struct sunqe *qe; | |
831 | struct sunqec *qecp; | |
832 | int i, res; | |
1da177e4 | 833 | |
ecba38ab DM |
834 | if (version_printed++ == 0) |
835 | printk(KERN_INFO "%s", version); | |
1da177e4 | 836 | |
ecba38ab DM |
837 | dev = alloc_etherdev(sizeof(struct sunqe)); |
838 | if (!dev) | |
839 | return -ENOMEM; | |
1da177e4 | 840 | |
d0dc1129 MN |
841 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); |
842 | ||
ecba38ab | 843 | qe = netdev_priv(dev); |
1da177e4 | 844 | |
ecba38ab DM |
845 | i = of_getintprop_default(sdev->ofdev.node, "channel#", -1); |
846 | if (i == -1) { | |
847 | struct sbus_dev *td = sdev->parent->child; | |
848 | i = 0; | |
849 | while (td != sdev) { | |
850 | td = td->next; | |
851 | i++; | |
1da177e4 | 852 | } |
1da177e4 | 853 | } |
ecba38ab DM |
854 | qe->channel = i; |
855 | spin_lock_init(&qe->lock); | |
6aa20a22 | 856 | |
ecba38ab DM |
857 | res = -ENODEV; |
858 | qecp = get_qec(sdev); | |
859 | if (!qecp) | |
860 | goto fail; | |
1da177e4 | 861 | |
ecba38ab DM |
862 | qecp->qes[qe->channel] = qe; |
863 | qe->dev = dev; | |
864 | qe->parent = qecp; | |
865 | qe->qe_sdev = sdev; | |
1da177e4 | 866 | |
ecba38ab DM |
867 | res = -ENOMEM; |
868 | qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0, | |
869 | CREG_REG_SIZE, "QEC Channel Registers"); | |
870 | if (!qe->qcregs) { | |
871 | printk(KERN_ERR "qe: Cannot map channel registers.\n"); | |
872 | goto fail; | |
1da177e4 LT |
873 | } |
874 | ||
ecba38ab DM |
875 | qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0, |
876 | MREGS_REG_SIZE, "QE MACE Registers"); | |
877 | if (!qe->mregs) { | |
878 | printk(KERN_ERR "qe: Cannot map MACE registers.\n"); | |
879 | goto fail; | |
1da177e4 LT |
880 | } |
881 | ||
ecba38ab DM |
882 | qe->qe_block = sbus_alloc_consistent(qe->qe_sdev, |
883 | PAGE_SIZE, | |
884 | &qe->qblock_dvma); | |
885 | qe->buffers = sbus_alloc_consistent(qe->qe_sdev, | |
886 | sizeof(struct sunqe_buffers), | |
887 | &qe->buffers_dvma); | |
888 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || | |
889 | qe->buffers == NULL || qe->buffers_dvma == 0) | |
890 | goto fail; | |
891 | ||
892 | /* Stop this QE. */ | |
893 | qe_stop(qe); | |
894 | ||
ecba38ab DM |
895 | SET_NETDEV_DEV(dev, &sdev->ofdev.dev); |
896 | ||
897 | dev->open = qe_open; | |
898 | dev->stop = qe_close; | |
899 | dev->hard_start_xmit = qe_start_xmit; | |
ecba38ab DM |
900 | dev->set_multicast_list = qe_set_multicast; |
901 | dev->tx_timeout = qe_tx_timeout; | |
902 | dev->watchdog_timeo = 5*HZ; | |
903 | dev->irq = sdev->irqs[0]; | |
904 | dev->dma = 0; | |
905 | dev->ethtool_ops = &qe_ethtool_ops; | |
906 | ||
907 | res = register_netdev(dev); | |
908 | if (res) | |
909 | goto fail; | |
910 | ||
911 | dev_set_drvdata(&sdev->ofdev.dev, qe); | |
912 | ||
913 | printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel); | |
914 | for (i = 0; i < 6; i++) | |
915 | printk ("%2.2x%c", | |
916 | dev->dev_addr[i], | |
917 | i == 5 ? ' ': ':'); | |
918 | printk("\n"); | |
1da177e4 | 919 | |
1da177e4 LT |
920 | |
921 | return 0; | |
922 | ||
ecba38ab DM |
923 | fail: |
924 | if (qe->qcregs) | |
925 | sbus_iounmap(qe->qcregs, CREG_REG_SIZE); | |
926 | if (qe->mregs) | |
927 | sbus_iounmap(qe->mregs, MREGS_REG_SIZE); | |
928 | if (qe->qe_block) | |
929 | sbus_free_consistent(qe->qe_sdev, | |
930 | PAGE_SIZE, | |
931 | qe->qe_block, | |
932 | qe->qblock_dvma); | |
933 | if (qe->buffers) | |
934 | sbus_free_consistent(qe->qe_sdev, | |
935 | sizeof(struct sunqe_buffers), | |
936 | qe->buffers, | |
937 | qe->buffers_dvma); | |
938 | ||
939 | free_netdev(dev); | |
940 | ||
1da177e4 LT |
941 | return res; |
942 | } | |
943 | ||
ecba38ab | 944 | static int __devinit qec_sbus_probe(struct of_device *dev, const struct of_device_id *match) |
1da177e4 | 945 | { |
ecba38ab | 946 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); |
1da177e4 | 947 | |
ecba38ab | 948 | return qec_ether_init(sdev); |
1da177e4 LT |
949 | } |
950 | ||
ecba38ab | 951 | static int __devexit qec_sbus_remove(struct of_device *dev) |
1da177e4 | 952 | { |
ecba38ab DM |
953 | struct sunqe *qp = dev_get_drvdata(&dev->dev); |
954 | struct net_device *net_dev = qp->dev; | |
955 | ||
d0dc1129 | 956 | unregister_netdev(net_dev); |
ecba38ab DM |
957 | |
958 | sbus_iounmap(qp->qcregs, CREG_REG_SIZE); | |
959 | sbus_iounmap(qp->mregs, MREGS_REG_SIZE); | |
960 | sbus_free_consistent(qp->qe_sdev, | |
961 | PAGE_SIZE, | |
962 | qp->qe_block, | |
963 | qp->qblock_dvma); | |
964 | sbus_free_consistent(qp->qe_sdev, | |
965 | sizeof(struct sunqe_buffers), | |
966 | qp->buffers, | |
967 | qp->buffers_dvma); | |
968 | ||
969 | free_netdev(net_dev); | |
970 | ||
971 | dev_set_drvdata(&dev->dev, NULL); | |
972 | ||
1da177e4 LT |
973 | return 0; |
974 | } | |
975 | ||
ecba38ab DM |
976 | static struct of_device_id qec_sbus_match[] = { |
977 | { | |
978 | .name = "qe", | |
979 | }, | |
980 | {}, | |
981 | }; | |
982 | ||
983 | MODULE_DEVICE_TABLE(of, qec_sbus_match); | |
984 | ||
985 | static struct of_platform_driver qec_sbus_driver = { | |
986 | .name = "qec", | |
987 | .match_table = qec_sbus_match, | |
988 | .probe = qec_sbus_probe, | |
989 | .remove = __devexit_p(qec_sbus_remove), | |
990 | }; | |
991 | ||
992 | static int __init qec_init(void) | |
993 | { | |
994 | return of_register_driver(&qec_sbus_driver, &sbus_bus_type); | |
995 | } | |
996 | ||
997 | static void __exit qec_exit(void) | |
1da177e4 | 998 | { |
ecba38ab | 999 | of_unregister_driver(&qec_sbus_driver); |
1da177e4 LT |
1000 | |
1001 | while (root_qec_dev) { | |
ecba38ab DM |
1002 | struct sunqec *next = root_qec_dev->next_module; |
1003 | ||
1004 | free_irq(root_qec_dev->qec_sdev->irqs[0], | |
1005 | (void *) root_qec_dev); | |
1da177e4 | 1006 | sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE); |
ecba38ab | 1007 | |
1da177e4 | 1008 | kfree(root_qec_dev); |
ecba38ab DM |
1009 | |
1010 | root_qec_dev = next; | |
1da177e4 LT |
1011 | } |
1012 | } | |
1013 | ||
ecba38ab DM |
1014 | module_init(qec_init); |
1015 | module_exit(qec_exit); |