Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[linux-2.6] / drivers / net / wireless / bcm43xx / bcm43xx_dma.c
1 /*
2
3   Broadcom BCM43xx wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 static inline int free_slots(struct bcm43xx_dmaring *ring)
44 {
45         return (ring->nr_slots - ring->used_slots);
46 }
47
48 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49 {
50         assert(slot >= -1 && slot <= ring->nr_slots - 1);
51         if (slot == ring->nr_slots - 1)
52                 return 0;
53         return slot + 1;
54 }
55
56 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57 {
58         assert(slot >= 0 && slot <= ring->nr_slots - 1);
59         if (slot == 0)
60                 return ring->nr_slots - 1;
61         return slot - 1;
62 }
63
64 /* Request a slot for usage. */
65 static inline
66 int request_slot(struct bcm43xx_dmaring *ring)
67 {
68         int slot;
69
70         assert(ring->tx);
71         assert(!ring->suspended);
72         assert(free_slots(ring) != 0);
73
74         slot = next_slot(ring, ring->current_slot);
75         ring->current_slot = slot;
76         ring->used_slots++;
77
78         /* Check the number of available slots and suspend TX,
79          * if we are running low on free slots.
80          */
81         if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82                 netif_stop_queue(ring->bcm->net_dev);
83                 ring->suspended = 1;
84         }
85 #ifdef CONFIG_BCM43XX_DEBUG
86         if (ring->used_slots > ring->max_used_slots)
87                 ring->max_used_slots = ring->used_slots;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
89
90         return slot;
91 }
92
93 /* Return a slot to the free slots. */
94 static inline
95 void return_slot(struct bcm43xx_dmaring *ring, int slot)
96 {
97         assert(ring->tx);
98
99         ring->used_slots--;
100
101         /* Check if TX is suspended and check if we have
102          * enough free slots to resume it again.
103          */
104         if (unlikely(ring->suspended)) {
105                 if (free_slots(ring) >= ring->resume_mark) {
106                         ring->suspended = 0;
107                         netif_wake_queue(ring->bcm->net_dev);
108                 }
109         }
110 }
111
112 u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113 {
114         static const u16 map64[] = {
115                 BCM43xx_MMIO_DMA64_BASE0,
116                 BCM43xx_MMIO_DMA64_BASE1,
117                 BCM43xx_MMIO_DMA64_BASE2,
118                 BCM43xx_MMIO_DMA64_BASE3,
119                 BCM43xx_MMIO_DMA64_BASE4,
120                 BCM43xx_MMIO_DMA64_BASE5,
121         };
122         static const u16 map32[] = {
123                 BCM43xx_MMIO_DMA32_BASE0,
124                 BCM43xx_MMIO_DMA32_BASE1,
125                 BCM43xx_MMIO_DMA32_BASE2,
126                 BCM43xx_MMIO_DMA32_BASE3,
127                 BCM43xx_MMIO_DMA32_BASE4,
128                 BCM43xx_MMIO_DMA32_BASE5,
129         };
130
131         if (dma64bit) {
132                 assert(controller_idx >= 0 &&
133                        controller_idx < ARRAY_SIZE(map64));
134                 return map64[controller_idx];
135         }
136         assert(controller_idx >= 0 &&
137                controller_idx < ARRAY_SIZE(map32));
138         return map32[controller_idx];
139 }
140
141 static inline
142 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
143                           unsigned char *buf,
144                           size_t len,
145                           int tx)
146 {
147         dma_addr_t dmaaddr;
148         int direction = PCI_DMA_FROMDEVICE;
149
150         if (tx)
151                 direction = PCI_DMA_TODEVICE;
152
153         dmaaddr = pci_map_single(ring->bcm->pci_dev,
154                                          buf, len,
155                                          direction);
156
157         return dmaaddr;
158 }
159
160 static inline
161 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
162                       dma_addr_t addr,
163                       size_t len,
164                       int tx)
165 {
166         if (tx) {
167                 pci_unmap_single(ring->bcm->pci_dev,
168                                  addr, len,
169                                  PCI_DMA_TODEVICE);
170         } else {
171                 pci_unmap_single(ring->bcm->pci_dev,
172                                  addr, len,
173                                  PCI_DMA_FROMDEVICE);
174         }
175 }
176
177 static inline
178 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
179                              dma_addr_t addr,
180                              size_t len)
181 {
182         assert(!ring->tx);
183
184         pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
185                                     addr, len, PCI_DMA_FROMDEVICE);
186 }
187
188 static inline
189 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
190                                 dma_addr_t addr,
191                                 size_t len)
192 {
193         assert(!ring->tx);
194
195         pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
196                                     addr, len, PCI_DMA_TODEVICE);
197 }
198
199 /* Unmap and free a descriptor buffer. */
200 static inline
201 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
202                             struct bcm43xx_dmadesc_meta *meta,
203                             int irq_context)
204 {
205         assert(meta->skb);
206         if (irq_context)
207                 dev_kfree_skb_irq(meta->skb);
208         else
209                 dev_kfree_skb(meta->skb);
210         meta->skb = NULL;
211 }
212
213 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
214 {
215         ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
216                                             &(ring->dmabase));
217         if (!ring->descbase) {
218                 /* Allocation may have failed due to pci_alloc_consistent
219                    insisting on use of GFP_DMA, which is more restrictive
220                    than necessary...  */
221                 struct dma_desc *rx_ring;
222                 dma_addr_t rx_ring_dma;
223
224                 rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
225                 if (!rx_ring)
226                         goto out_err;
227
228                 rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
229                                              BCM43xx_DMA_RINGMEMSIZE,
230                                              PCI_DMA_BIDIRECTIONAL);
231
232                 if (pci_dma_mapping_error(rx_ring_dma) ||
233                     rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
234                         /* Sigh... */
235                         if (!pci_dma_mapping_error(rx_ring_dma))
236                                 pci_unmap_single(ring->bcm->pci_dev,
237                                                  rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
238                                                  PCI_DMA_BIDIRECTIONAL);
239                         rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
240                                                  rx_ring, BCM43xx_DMA_RINGMEMSIZE,
241                                                  PCI_DMA_BIDIRECTIONAL);
242                         if (pci_dma_mapping_error(rx_ring_dma) ||
243                             rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
244                                 assert(0);
245                                 if (!pci_dma_mapping_error(rx_ring_dma))
246                                         pci_unmap_single(ring->bcm->pci_dev,
247                                                          rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
248                                                          PCI_DMA_BIDIRECTIONAL);
249                                 goto out_err;
250                         }
251                 }
252
253                 ring->descbase = rx_ring;
254                 ring->dmabase = rx_ring_dma;
255         }
256         memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
257
258         return 0;
259 out_err:
260         printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
261         return -ENOMEM;
262 }
263
264 static void free_ringmemory(struct bcm43xx_dmaring *ring)
265 {
266         struct device *dev = &(ring->bcm->pci_dev->dev);
267
268         dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
269                           ring->descbase, ring->dmabase);
270 }
271
272 /* Reset the RX DMA channel */
273 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
274                                    u16 mmio_base, int dma64)
275 {
276         int i;
277         u32 value;
278         u16 offset;
279
280         offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
281         bcm43xx_write32(bcm, mmio_base + offset, 0);
282         for (i = 0; i < 1000; i++) {
283                 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
284                 value = bcm43xx_read32(bcm, mmio_base + offset);
285                 if (dma64) {
286                         value &= BCM43xx_DMA64_RXSTAT;
287                         if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
288                                 i = -1;
289                                 break;
290                         }
291                 } else {
292                         value &= BCM43xx_DMA32_RXSTATE;
293                         if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
294                                 i = -1;
295                                 break;
296                         }
297                 }
298                 udelay(10);
299         }
300         if (i != -1) {
301                 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
302                 return -ENODEV;
303         }
304
305         return 0;
306 }
307
308 /* Reset the RX DMA channel */
309 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
310                                    u16 mmio_base, int dma64)
311 {
312         int i;
313         u32 value;
314         u16 offset;
315
316         for (i = 0; i < 1000; i++) {
317                 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
318                 value = bcm43xx_read32(bcm, mmio_base + offset);
319                 if (dma64) {
320                         value &= BCM43xx_DMA64_TXSTAT;
321                         if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
322                             value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
323                             value == BCM43xx_DMA64_TXSTAT_STOPPED)
324                                 break;
325                 } else {
326                         value &= BCM43xx_DMA32_TXSTATE;
327                         if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
328                             value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
329                             value == BCM43xx_DMA32_TXSTAT_STOPPED)
330                                 break;
331                 }
332                 udelay(10);
333         }
334         offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
335         bcm43xx_write32(bcm, mmio_base + offset, 0);
336         for (i = 0; i < 1000; i++) {
337                 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
338                 value = bcm43xx_read32(bcm, mmio_base + offset);
339                 if (dma64) {
340                         value &= BCM43xx_DMA64_TXSTAT;
341                         if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
342                                 i = -1;
343                                 break;
344                         }
345                 } else {
346                         value &= BCM43xx_DMA32_TXSTATE;
347                         if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
348                                 i = -1;
349                                 break;
350                         }
351                 }
352                 udelay(10);
353         }
354         if (i != -1) {
355                 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
356                 return -ENODEV;
357         }
358         /* ensure the reset is completed. */
359         udelay(300);
360
361         return 0;
362 }
363
364 static void fill_descriptor(struct bcm43xx_dmaring *ring,
365                             struct bcm43xx_dmadesc_generic *desc,
366                             dma_addr_t dmaaddr,
367                             u16 bufsize,
368                             int start, int end, int irq)
369 {
370         int slot;
371
372         slot = bcm43xx_dma_desc2idx(ring, desc);
373         assert(slot >= 0 && slot < ring->nr_slots);
374
375         if (ring->dma64) {
376                 u32 ctl0 = 0, ctl1 = 0;
377                 u32 addrlo, addrhi;
378                 u32 addrext;
379
380                 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
381                 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
382                 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
383                 addrhi |= ring->routing;
384                 if (slot == ring->nr_slots - 1)
385                         ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
386                 if (start)
387                         ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
388                 if (end)
389                         ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
390                 if (irq)
391                         ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
392                 ctl1 |= (bufsize - ring->frameoffset)
393                         & BCM43xx_DMA64_DCTL1_BYTECNT;
394                 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
395                         & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
396
397                 desc->dma64.control0 = cpu_to_le32(ctl0);
398                 desc->dma64.control1 = cpu_to_le32(ctl1);
399                 desc->dma64.address_low = cpu_to_le32(addrlo);
400                 desc->dma64.address_high = cpu_to_le32(addrhi);
401         } else {
402                 u32 ctl;
403                 u32 addr;
404                 u32 addrext;
405
406                 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
407                 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
408                            >> BCM43xx_DMA32_ROUTING_SHIFT;
409                 addr |= ring->routing;
410                 ctl = (bufsize - ring->frameoffset)
411                       & BCM43xx_DMA32_DCTL_BYTECNT;
412                 if (slot == ring->nr_slots - 1)
413                         ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
414                 if (start)
415                         ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
416                 if (end)
417                         ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
418                 if (irq)
419                         ctl |= BCM43xx_DMA32_DCTL_IRQ;
420                 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
421                        & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
422
423                 desc->dma32.control = cpu_to_le32(ctl);
424                 desc->dma32.address = cpu_to_le32(addr);
425         }
426 }
427
428 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
429                                struct bcm43xx_dmadesc_generic *desc,
430                                struct bcm43xx_dmadesc_meta *meta,
431                                gfp_t gfp_flags)
432 {
433         struct bcm43xx_rxhdr *rxhdr;
434         struct bcm43xx_hwxmitstatus *xmitstat;
435         dma_addr_t dmaaddr;
436         struct sk_buff *skb;
437
438         assert(!ring->tx);
439
440         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
441         if (unlikely(!skb))
442                 return -ENOMEM;
443         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
444         /* This hardware bug work-around adapted from the b44 driver.
445            The chip may be unable to do PCI DMA to/from anything above 1GB */
446         if (pci_dma_mapping_error(dmaaddr) ||
447             dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
448                 /* This one has 30-bit addressing... */
449                 if (!pci_dma_mapping_error(dmaaddr))
450                         pci_unmap_single(ring->bcm->pci_dev,
451                                          dmaaddr, ring->rx_buffersize,
452                                          PCI_DMA_FROMDEVICE);
453                 dev_kfree_skb_any(skb);
454                 skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
455                 if (skb == NULL)
456                         return -ENOMEM;
457                 dmaaddr = pci_map_single(ring->bcm->pci_dev,
458                                          skb->data, ring->rx_buffersize,
459                                          PCI_DMA_FROMDEVICE);
460                 if (pci_dma_mapping_error(dmaaddr) ||
461                     dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
462                         assert(0);
463                         dev_kfree_skb_any(skb);
464                         return -ENOMEM;
465                 }
466         }
467         meta->skb = skb;
468         meta->dmaaddr = dmaaddr;
469         skb->dev = ring->bcm->net_dev;
470
471         fill_descriptor(ring, desc, dmaaddr,
472                         ring->rx_buffersize, 0, 0, 0);
473
474         rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
475         rxhdr->frame_length = 0;
476         rxhdr->flags1 = 0;
477         xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
478         xmitstat->cookie = 0;
479
480         return 0;
481 }
482
483 /* Allocate the initial descbuffers.
484  * This is used for an RX ring only.
485  */
486 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
487 {
488         int i, err = -ENOMEM;
489         struct bcm43xx_dmadesc_generic *desc;
490         struct bcm43xx_dmadesc_meta *meta;
491
492         for (i = 0; i < ring->nr_slots; i++) {
493                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
494
495                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
496                 if (err)
497                         goto err_unwind;
498         }
499         mb();
500         ring->used_slots = ring->nr_slots;
501         err = 0;
502 out:
503         return err;
504
505 err_unwind:
506         for (i--; i >= 0; i--) {
507                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
508
509                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
510                 dev_kfree_skb(meta->skb);
511         }
512         goto out;
513 }
514
515 /* Do initial setup of the DMA controller.
516  * Reset the controller, write the ring busaddress
517  * and switch the "enable" bit on.
518  */
519 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
520 {
521         int err = 0;
522         u32 value;
523         u32 addrext;
524
525         if (ring->tx) {
526                 if (ring->dma64) {
527                         u64 ringbase = (u64)(ring->dmabase);
528
529                         addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
530                         value = BCM43xx_DMA64_TXENABLE;
531                         value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
532                                 & BCM43xx_DMA64_TXADDREXT_MASK;
533                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
534                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
535                                         (ringbase & 0xFFFFFFFF));
536                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
537                                         ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
538                                         | ring->routing);
539                 } else {
540                         u32 ringbase = (u32)(ring->dmabase);
541
542                         addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
543                         value = BCM43xx_DMA32_TXENABLE;
544                         value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
545                                 & BCM43xx_DMA32_TXADDREXT_MASK;
546                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
547                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
548                                         (ringbase & ~BCM43xx_DMA32_ROUTING)
549                                         | ring->routing);
550                 }
551         } else {
552                 err = alloc_initial_descbuffers(ring);
553                 if (err)
554                         goto out;
555                 if (ring->dma64) {
556                         u64 ringbase = (u64)(ring->dmabase);
557
558                         addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
559                         value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
560                         value |= BCM43xx_DMA64_RXENABLE;
561                         value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
562                                 & BCM43xx_DMA64_RXADDREXT_MASK;
563                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
564                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
565                                         (ringbase & 0xFFFFFFFF));
566                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
567                                         ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
568                                         | ring->routing);
569                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
570                 } else {
571                         u32 ringbase = (u32)(ring->dmabase);
572
573                         addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
574                         value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
575                         value |= BCM43xx_DMA32_RXENABLE;
576                         value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
577                                 & BCM43xx_DMA32_RXADDREXT_MASK;
578                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
579                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
580                                         (ringbase & ~BCM43xx_DMA32_ROUTING)
581                                         | ring->routing);
582                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
583                 }
584         }
585
586 out:
587         return err;
588 }
589
590 /* Shutdown the DMA controller. */
591 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
592 {
593         if (ring->tx) {
594                 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
595                 if (ring->dma64) {
596                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
597                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
598                 } else
599                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
600         } else {
601                 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
602                 if (ring->dma64) {
603                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
604                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
605                 } else
606                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
607         }
608 }
609
610 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
611 {
612         struct bcm43xx_dmadesc_generic *desc;
613         struct bcm43xx_dmadesc_meta *meta;
614         int i;
615
616         if (!ring->used_slots)
617                 return;
618         for (i = 0; i < ring->nr_slots; i++) {
619                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
620
621                 if (!meta->skb) {
622                         assert(ring->tx);
623                         continue;
624                 }
625                 if (ring->tx) {
626                         unmap_descbuffer(ring, meta->dmaaddr,
627                                         meta->skb->len, 1);
628                 } else {
629                         unmap_descbuffer(ring, meta->dmaaddr,
630                                         ring->rx_buffersize, 0);
631                 }
632                 free_descriptor_buffer(ring, meta, 0);
633         }
634 }
635
636 /* Main initialization function. */
637 static
638 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
639                                                int controller_index,
640                                                int for_tx,
641                                                int dma64)
642 {
643         struct bcm43xx_dmaring *ring;
644         int err;
645         int nr_slots;
646
647         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
648         if (!ring)
649                 goto out;
650
651         nr_slots = BCM43xx_RXRING_SLOTS;
652         if (for_tx)
653                 nr_slots = BCM43xx_TXRING_SLOTS;
654
655         ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
656                              GFP_KERNEL);
657         if (!ring->meta)
658                 goto err_kfree_ring;
659
660         ring->routing = BCM43xx_DMA32_CLIENTTRANS;
661         if (dma64)
662                 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
663 #ifdef CONFIG_BCM947XX
664         if (bcm->pci_dev->bus->number == 0)
665                 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
666 #endif
667
668         ring->bcm = bcm;
669         ring->nr_slots = nr_slots;
670         ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
671         ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
672         assert(ring->suspend_mark < ring->resume_mark);
673         ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
674         ring->index = controller_index;
675         ring->dma64 = !!dma64;
676         if (for_tx) {
677                 ring->tx = 1;
678                 ring->current_slot = -1;
679         } else {
680                 if (ring->index == 0) {
681                         ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
682                         ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
683                 } else if (ring->index == 3) {
684                         ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
685                         ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
686                 } else
687                         assert(0);
688         }
689
690         err = alloc_ringmemory(ring);
691         if (err)
692                 goto err_kfree_meta;
693         err = dmacontroller_setup(ring);
694         if (err)
695                 goto err_free_ringmemory;
696         return ring;
697
698 out:
699         printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
700         return ring;
701
702 err_free_ringmemory:
703         free_ringmemory(ring);
704 err_kfree_meta:
705         kfree(ring->meta);
706 err_kfree_ring:
707         kfree(ring);
708         ring = NULL;
709         goto out;
710 }
711
712 /* Main cleanup function. */
713 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
714 {
715         if (!ring)
716                 return;
717
718         dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
719                 (ring->dma64) ? "64" : "32",
720                 ring->mmio_base,
721                 (ring->tx) ? "TX" : "RX",
722                 ring->max_used_slots, ring->nr_slots);
723         /* Device IRQs are disabled prior entering this function,
724          * so no need to take care of concurrency with rx handler stuff.
725          */
726         dmacontroller_cleanup(ring);
727         free_all_descbuffers(ring);
728         free_ringmemory(ring);
729
730         kfree(ring->meta);
731         kfree(ring);
732 }
733
734 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
735 {
736         struct bcm43xx_dma *dma;
737
738         if (bcm43xx_using_pio(bcm))
739                 return;
740         dma = bcm43xx_current_dma(bcm);
741
742         bcm43xx_destroy_dmaring(dma->rx_ring3);
743         dma->rx_ring3 = NULL;
744         bcm43xx_destroy_dmaring(dma->rx_ring0);
745         dma->rx_ring0 = NULL;
746
747         bcm43xx_destroy_dmaring(dma->tx_ring5);
748         dma->tx_ring5 = NULL;
749         bcm43xx_destroy_dmaring(dma->tx_ring4);
750         dma->tx_ring4 = NULL;
751         bcm43xx_destroy_dmaring(dma->tx_ring3);
752         dma->tx_ring3 = NULL;
753         bcm43xx_destroy_dmaring(dma->tx_ring2);
754         dma->tx_ring2 = NULL;
755         bcm43xx_destroy_dmaring(dma->tx_ring1);
756         dma->tx_ring1 = NULL;
757         bcm43xx_destroy_dmaring(dma->tx_ring0);
758         dma->tx_ring0 = NULL;
759 }
760
761 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
762 {
763         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
764         struct bcm43xx_dmaring *ring;
765         int err = -ENOMEM;
766         int dma64 = 0;
767
768         bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
769         if (bcm->dma_mask == DMA_64BIT_MASK)
770                 dma64 = 1;
771         err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
772         if (err)
773                 goto no_dma;
774         err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
775         if (err)
776                 goto no_dma;
777
778         /* setup TX DMA channels. */
779         ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
780         if (!ring)
781                 goto out;
782         dma->tx_ring0 = ring;
783
784         ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
785         if (!ring)
786                 goto err_destroy_tx0;
787         dma->tx_ring1 = ring;
788
789         ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
790         if (!ring)
791                 goto err_destroy_tx1;
792         dma->tx_ring2 = ring;
793
794         ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
795         if (!ring)
796                 goto err_destroy_tx2;
797         dma->tx_ring3 = ring;
798
799         ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
800         if (!ring)
801                 goto err_destroy_tx3;
802         dma->tx_ring4 = ring;
803
804         ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
805         if (!ring)
806                 goto err_destroy_tx4;
807         dma->tx_ring5 = ring;
808
809         /* setup RX DMA channels. */
810         ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
811         if (!ring)
812                 goto err_destroy_tx5;
813         dma->rx_ring0 = ring;
814
815         if (bcm->current_core->rev < 5) {
816                 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
817                 if (!ring)
818                         goto err_destroy_rx0;
819                 dma->rx_ring3 = ring;
820         }
821
822         dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
823                 (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
824                 (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
825         err = 0;
826 out:
827         return err;
828
829 err_destroy_rx0:
830         bcm43xx_destroy_dmaring(dma->rx_ring0);
831         dma->rx_ring0 = NULL;
832 err_destroy_tx5:
833         bcm43xx_destroy_dmaring(dma->tx_ring5);
834         dma->tx_ring5 = NULL;
835 err_destroy_tx4:
836         bcm43xx_destroy_dmaring(dma->tx_ring4);
837         dma->tx_ring4 = NULL;
838 err_destroy_tx3:
839         bcm43xx_destroy_dmaring(dma->tx_ring3);
840         dma->tx_ring3 = NULL;
841 err_destroy_tx2:
842         bcm43xx_destroy_dmaring(dma->tx_ring2);
843         dma->tx_ring2 = NULL;
844 err_destroy_tx1:
845         bcm43xx_destroy_dmaring(dma->tx_ring1);
846         dma->tx_ring1 = NULL;
847 err_destroy_tx0:
848         bcm43xx_destroy_dmaring(dma->tx_ring0);
849         dma->tx_ring0 = NULL;
850 no_dma:
851 #ifdef CONFIG_BCM43XX_PIO
852         printk(KERN_WARNING PFX "DMA not supported on this device."
853                                 " Falling back to PIO.\n");
854         bcm->__using_pio = 1;
855         return -ENOSYS;
856 #else
857         printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
858                             "Please recompile the driver with PIO support.\n");
859         return -ENODEV;
860 #endif /* CONFIG_BCM43XX_PIO */
861 }
862
863 /* Generate a cookie for the TX header. */
864 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
865                            int slot)
866 {
867         u16 cookie = 0x1000;
868
869         /* Use the upper 4 bits of the cookie as
870          * DMA controller ID and store the slot number
871          * in the lower 12 bits.
872          * Note that the cookie must never be 0, as this
873          * is a special value used in RX path.
874          */
875         switch (ring->index) {
876         case 0:
877                 cookie = 0xA000;
878                 break;
879         case 1:
880                 cookie = 0xB000;
881                 break;
882         case 2:
883                 cookie = 0xC000;
884                 break;
885         case 3:
886                 cookie = 0xD000;
887                 break;
888         case 4:
889                 cookie = 0xE000;
890                 break;
891         case 5:
892                 cookie = 0xF000;
893                 break;
894         }
895         assert(((u16)slot & 0xF000) == 0x0000);
896         cookie |= (u16)slot;
897
898         return cookie;
899 }
900
901 /* Inspect a cookie and find out to which controller/slot it belongs. */
902 static
903 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
904                                       u16 cookie, int *slot)
905 {
906         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
907         struct bcm43xx_dmaring *ring = NULL;
908
909         switch (cookie & 0xF000) {
910         case 0xA000:
911                 ring = dma->tx_ring0;
912                 break;
913         case 0xB000:
914                 ring = dma->tx_ring1;
915                 break;
916         case 0xC000:
917                 ring = dma->tx_ring2;
918                 break;
919         case 0xD000:
920                 ring = dma->tx_ring3;
921                 break;
922         case 0xE000:
923                 ring = dma->tx_ring4;
924                 break;
925         case 0xF000:
926                 ring = dma->tx_ring5;
927                 break;
928         default:
929                 assert(0);
930         }
931         *slot = (cookie & 0x0FFF);
932         assert(*slot >= 0 && *slot < ring->nr_slots);
933
934         return ring;
935 }
936
937 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
938                                   int slot)
939 {
940         u16 offset;
941         int descsize;
942
943         /* Everything is ready to start. Buffers are DMA mapped and
944          * associated with slots.
945          * "slot" is the last slot of the new frame we want to transmit.
946          * Close your seat belts now, please.
947          */
948         wmb();
949         slot = next_slot(ring, slot);
950         offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
951         descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
952                 : sizeof(struct bcm43xx_dmadesc32);
953         bcm43xx_dma_write(ring, offset,
954                         (u32)(slot * descsize));
955 }
956
957 static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
958                             struct sk_buff *skb,
959                             u8 cur_frag)
960 {
961         int slot;
962         struct bcm43xx_dmadesc_generic *desc;
963         struct bcm43xx_dmadesc_meta *meta;
964         dma_addr_t dmaaddr;
965         struct sk_buff *bounce_skb;
966
967         assert(skb_shinfo(skb)->nr_frags == 0);
968
969         slot = request_slot(ring);
970         desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
971
972         /* Add a device specific TX header. */
973         assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
974         /* Reserve enough headroom for the device tx header. */
975         __skb_push(skb, sizeof(struct bcm43xx_txhdr));
976         /* Now calculate and add the tx header.
977          * The tx header includes the PLCP header.
978          */
979         bcm43xx_generate_txhdr(ring->bcm,
980                                (struct bcm43xx_txhdr *)skb->data,
981                                skb->data + sizeof(struct bcm43xx_txhdr),
982                                skb->len - sizeof(struct bcm43xx_txhdr),
983                                (cur_frag == 0),
984                                generate_cookie(ring, slot));
985         dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
986         if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
987                 /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
988                 if (!dma_mapping_error(dmaaddr))
989                         unmap_descbuffer(ring, dmaaddr, skb->len, 1);
990                 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
991                 if (!bounce_skb)
992                         return;
993                 dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
994                 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
995                         if (!dma_mapping_error(dmaaddr))
996                                 unmap_descbuffer(ring, dmaaddr, skb->len, 1);
997                         dev_kfree_skb_any(bounce_skb);
998                         assert(0);
999                         return;
1000                 }
1001                 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1002                 dev_kfree_skb_any(skb);
1003                 skb = bounce_skb;
1004         }
1005
1006         meta->skb = skb;
1007         meta->dmaaddr = dmaaddr;
1008
1009         fill_descriptor(ring, desc, dmaaddr,
1010                         skb->len, 1, 1, 1);
1011
1012         /* Now transfer the whole frame. */
1013         dmacontroller_poke_tx(ring, slot);
1014 }
1015
1016 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
1017                    struct ieee80211_txb *txb)
1018 {
1019         /* We just received a packet from the kernel network subsystem.
1020          * Add headers and DMA map the memory. Poke
1021          * the device to send the stuff.
1022          * Note that this is called from atomic context.
1023          */
1024         struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
1025         u8 i;
1026         struct sk_buff *skb;
1027
1028         assert(ring->tx);
1029         if (unlikely(free_slots(ring) < txb->nr_frags)) {
1030                 /* The queue should be stopped,
1031                  * if we are low on free slots.
1032                  * If this ever triggers, we have to lower the suspend_mark.
1033                  */
1034                 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
1035                 return -ENOMEM;
1036         }
1037
1038         for (i = 0; i < txb->nr_frags; i++) {
1039                 skb = txb->fragments[i];
1040                 /* Take skb from ieee80211_txb_free */
1041                 txb->fragments[i] = NULL;
1042                 dma_tx_fragment(ring, skb, i);
1043         }
1044         ieee80211_txb_free(txb);
1045
1046         return 0;
1047 }
1048
1049 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
1050                                    struct bcm43xx_xmitstatus *status)
1051 {
1052         struct bcm43xx_dmaring *ring;
1053         struct bcm43xx_dmadesc_generic *desc;
1054         struct bcm43xx_dmadesc_meta *meta;
1055         int is_last_fragment;
1056         int slot;
1057         u32 tmp;
1058
1059         ring = parse_cookie(bcm, status->cookie, &slot);
1060         assert(ring);
1061         assert(ring->tx);
1062         while (1) {
1063                 assert(slot >= 0 && slot < ring->nr_slots);
1064                 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
1065
1066                 if (ring->dma64) {
1067                         tmp = le32_to_cpu(desc->dma64.control0);
1068                         is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
1069                 } else {
1070                         tmp = le32_to_cpu(desc->dma32.control);
1071                         is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
1072                 }
1073                 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1074                 free_descriptor_buffer(ring, meta, 1);
1075                 /* Everything belonging to the slot is unmapped
1076                  * and freed, so we can return it.
1077                  */
1078                 return_slot(ring, slot);
1079
1080                 if (is_last_fragment)
1081                         break;
1082                 slot = next_slot(ring, slot);
1083         }
1084         bcm->stats.last_tx = jiffies;
1085 }
1086
1087 static void dma_rx(struct bcm43xx_dmaring *ring,
1088                    int *slot)
1089 {
1090         struct bcm43xx_dmadesc_generic *desc;
1091         struct bcm43xx_dmadesc_meta *meta;
1092         struct bcm43xx_rxhdr *rxhdr;
1093         struct sk_buff *skb;
1094         u16 len;
1095         int err;
1096         dma_addr_t dmaaddr;
1097
1098         desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1099
1100         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1101         skb = meta->skb;
1102
1103         if (ring->index == 3) {
1104                 /* We received an xmit status. */
1105                 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
1106                 struct bcm43xx_xmitstatus stat;
1107                 int i = 0;
1108
1109                 stat.cookie = le16_to_cpu(hw->cookie);
1110                 while (stat.cookie == 0) {
1111                         if (unlikely(++i >= 10000)) {
1112                                 assert(0);
1113                                 break;
1114                         }
1115                         udelay(2);
1116                         barrier();
1117                         stat.cookie = le16_to_cpu(hw->cookie);
1118                 }
1119                 stat.flags = hw->flags;
1120                 stat.cnt1 = hw->cnt1;
1121                 stat.cnt2 = hw->cnt2;
1122                 stat.seq = le16_to_cpu(hw->seq);
1123                 stat.unknown = le16_to_cpu(hw->unknown);
1124
1125                 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
1126                 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
1127                 /* recycle the descriptor buffer. */
1128                 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1129
1130                 return;
1131         }
1132         rxhdr = (struct bcm43xx_rxhdr *)skb->data;
1133         len = le16_to_cpu(rxhdr->frame_length);
1134         if (len == 0) {
1135                 int i = 0;
1136
1137                 do {
1138                         udelay(2);
1139                         barrier();
1140                         len = le16_to_cpu(rxhdr->frame_length);
1141                 } while (len == 0 && i++ < 5);
1142                 if (unlikely(len == 0)) {
1143                         /* recycle the descriptor buffer. */
1144                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1145                                                    ring->rx_buffersize);
1146                         goto drop;
1147                 }
1148         }
1149         if (unlikely(len > ring->rx_buffersize)) {
1150                 /* The data did not fit into one descriptor buffer
1151                  * and is split over multiple buffers.
1152                  * This should never happen, as we try to allocate buffers
1153                  * big enough. So simply ignore this packet.
1154                  */
1155                 int cnt = 0;
1156                 s32 tmp = len;
1157
1158                 while (1) {
1159                         desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1160                         /* recycle the descriptor buffer. */
1161                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1162                                                    ring->rx_buffersize);
1163                         *slot = next_slot(ring, *slot);
1164                         cnt++;
1165                         tmp -= ring->rx_buffersize;
1166                         if (tmp <= 0)
1167                                 break;
1168                 }
1169                 printkl(KERN_ERR PFX "DMA RX buffer too small "
1170                         "(len: %u, buffer: %u, nr-dropped: %d)\n",
1171                         len, ring->rx_buffersize, cnt);
1172                 goto drop;
1173         }
1174         len -= IEEE80211_FCS_LEN;
1175
1176         dmaaddr = meta->dmaaddr;
1177         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1178         if (unlikely(err)) {
1179                 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1180                 sync_descbuffer_for_device(ring, dmaaddr,
1181                                            ring->rx_buffersize);
1182                 goto drop;
1183         }
1184
1185         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1186         skb_put(skb, len + ring->frameoffset);
1187         skb_pull(skb, ring->frameoffset);
1188
1189         err = bcm43xx_rx(ring->bcm, skb, rxhdr);
1190         if (err) {
1191                 dev_kfree_skb_irq(skb);
1192                 goto drop;
1193         }
1194
1195 drop:
1196         return;
1197 }
1198
1199 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1200 {
1201         u32 status;
1202         u16 descptr;
1203         int slot, current_slot;
1204 #ifdef CONFIG_BCM43XX_DEBUG
1205         int used_slots = 0;
1206 #endif
1207
1208         assert(!ring->tx);
1209         if (ring->dma64) {
1210                 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
1211                 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1212                 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1213         } else {
1214                 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1215                 descptr = (status & BCM43xx_DMA32_RXDPTR);
1216                 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1217         }
1218         assert(current_slot >= 0 && current_slot < ring->nr_slots);
1219
1220         slot = ring->current_slot;
1221         for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1222                 dma_rx(ring, &slot);
1223 #ifdef CONFIG_BCM43XX_DEBUG
1224                 if (++used_slots > ring->max_used_slots)
1225                         ring->max_used_slots = used_slots;
1226 #endif
1227         }
1228         if (ring->dma64) {
1229                 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1230                                 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1231         } else {
1232                 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1233                                 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1234         }
1235         ring->current_slot = slot;
1236 }
1237
1238 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
1239 {
1240         assert(ring->tx);
1241         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
1242         if (ring->dma64) {
1243                 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1244                                 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1245                                 | BCM43xx_DMA64_TXSUSPEND);
1246         } else {
1247                 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1248                                 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1249                                 | BCM43xx_DMA32_TXSUSPEND);
1250         }
1251 }
1252
1253 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
1254 {
1255         assert(ring->tx);
1256         if (ring->dma64) {
1257                 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1258                                 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1259                                 & ~BCM43xx_DMA64_TXSUSPEND);
1260         } else {
1261                 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1262                                 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1263                                 & ~BCM43xx_DMA32_TXSUSPEND);
1264         }
1265         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
1266 }