Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6] / drivers / net / wireless / bcm43xx / bcm43xx_dma.c
1 /*
2
3   Broadcom BCM43xx wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 static inline int free_slots(struct bcm43xx_dmaring *ring)
44 {
45         return (ring->nr_slots - ring->used_slots);
46 }
47
48 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49 {
50         assert(slot >= -1 && slot <= ring->nr_slots - 1);
51         if (slot == ring->nr_slots - 1)
52                 return 0;
53         return slot + 1;
54 }
55
56 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57 {
58         assert(slot >= 0 && slot <= ring->nr_slots - 1);
59         if (slot == 0)
60                 return ring->nr_slots - 1;
61         return slot - 1;
62 }
63
64 /* Request a slot for usage. */
65 static inline
66 int request_slot(struct bcm43xx_dmaring *ring)
67 {
68         int slot;
69
70         assert(ring->tx);
71         assert(!ring->suspended);
72         assert(free_slots(ring) != 0);
73
74         slot = next_slot(ring, ring->current_slot);
75         ring->current_slot = slot;
76         ring->used_slots++;
77
78         /* Check the number of available slots and suspend TX,
79          * if we are running low on free slots.
80          */
81         if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82                 netif_stop_queue(ring->bcm->net_dev);
83                 ring->suspended = 1;
84         }
85 #ifdef CONFIG_BCM43XX_DEBUG
86         if (ring->used_slots > ring->max_used_slots)
87                 ring->max_used_slots = ring->used_slots;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
89
90         return slot;
91 }
92
93 /* Return a slot to the free slots. */
94 static inline
95 void return_slot(struct bcm43xx_dmaring *ring, int slot)
96 {
97         assert(ring->tx);
98
99         ring->used_slots--;
100
101         /* Check if TX is suspended and check if we have
102          * enough free slots to resume it again.
103          */
104         if (unlikely(ring->suspended)) {
105                 if (free_slots(ring) >= ring->resume_mark) {
106                         ring->suspended = 0;
107                         netif_wake_queue(ring->bcm->net_dev);
108                 }
109         }
110 }
111
112 static inline
113 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114                           unsigned char *buf,
115                           size_t len,
116                           int tx)
117 {
118         dma_addr_t dmaaddr;
119
120         if (tx) {
121                 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
122                                          buf, len,
123                                          DMA_TO_DEVICE);
124         } else {
125                 dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
126                                          buf, len,
127                                          DMA_FROM_DEVICE);
128         }
129
130         return dmaaddr;
131 }
132
133 static inline
134 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
135                       dma_addr_t addr,
136                       size_t len,
137                       int tx)
138 {
139         if (tx) {
140                 dma_unmap_single(&ring->bcm->pci_dev->dev,
141                                  addr, len,
142                                  DMA_TO_DEVICE);
143         } else {
144                 dma_unmap_single(&ring->bcm->pci_dev->dev,
145                                  addr, len,
146                                  DMA_FROM_DEVICE);
147         }
148 }
149
150 static inline
151 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
152                              dma_addr_t addr,
153                              size_t len)
154 {
155         assert(!ring->tx);
156
157         dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
158                                 addr, len, DMA_FROM_DEVICE);
159 }
160
161 static inline
162 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
163                                 dma_addr_t addr,
164                                 size_t len)
165 {
166         assert(!ring->tx);
167
168         dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
169                                    addr, len, DMA_FROM_DEVICE);
170 }
171
172 /* Unmap and free a descriptor buffer. */
173 static inline
174 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
175                             struct bcm43xx_dmadesc *desc,
176                             struct bcm43xx_dmadesc_meta *meta,
177                             int irq_context)
178 {
179         assert(meta->skb);
180         if (irq_context)
181                 dev_kfree_skb_irq(meta->skb);
182         else
183                 dev_kfree_skb(meta->skb);
184         meta->skb = NULL;
185 }
186
187 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
188 {
189         struct device *dev = &(ring->bcm->pci_dev->dev);
190
191         ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
192                                          &(ring->dmabase), GFP_KERNEL);
193         if (!ring->vbase) {
194                 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
195                 return -ENOMEM;
196         }
197         if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
198                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RINGMEMORY >1G "
199                                     "(0x%08x, len: %lu)\n",
200                        ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
201                 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
202                                   ring->vbase, ring->dmabase);
203                 return -ENOMEM;
204         }
205         assert(!(ring->dmabase & 0x000003FF));
206         memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
207
208         return 0;
209 }
210
211 static void free_ringmemory(struct bcm43xx_dmaring *ring)
212 {
213         struct device *dev = &(ring->bcm->pci_dev->dev);
214
215         dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
216                           ring->vbase, ring->dmabase);
217 }
218
219 /* Reset the RX DMA channel */
220 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
221                                    u16 mmio_base)
222 {
223         int i;
224         u32 value;
225
226         bcm43xx_write32(bcm,
227                         mmio_base + BCM43xx_DMA_RX_CONTROL,
228                         0x00000000);
229         for (i = 0; i < 1000; i++) {
230                 value = bcm43xx_read32(bcm,
231                                        mmio_base + BCM43xx_DMA_RX_STATUS);
232                 value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
233                 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
234                         i = -1;
235                         break;
236                 }
237                 udelay(10);
238         }
239         if (i != -1) {
240                 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
241                 return -ENODEV;
242         }
243
244         return 0;
245 }
246
247 /* Reset the RX DMA channel */
248 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
249                                    u16 mmio_base)
250 {
251         int i;
252         u32 value;
253
254         for (i = 0; i < 1000; i++) {
255                 value = bcm43xx_read32(bcm,
256                                        mmio_base + BCM43xx_DMA_TX_STATUS);
257                 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
258                 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
259                     value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
260                     value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
261                         break;
262                 udelay(10);
263         }
264         bcm43xx_write32(bcm,
265                         mmio_base + BCM43xx_DMA_TX_CONTROL,
266                         0x00000000);
267         for (i = 0; i < 1000; i++) {
268                 value = bcm43xx_read32(bcm,
269                                        mmio_base + BCM43xx_DMA_TX_STATUS);
270                 value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
271                 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
272                         i = -1;
273                         break;
274                 }
275                 udelay(10);
276         }
277         if (i != -1) {
278                 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
279                 return -ENODEV;
280         }
281         /* ensure the reset is completed. */
282         udelay(300);
283
284         return 0;
285 }
286
287 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
288                                struct bcm43xx_dmadesc *desc,
289                                struct bcm43xx_dmadesc_meta *meta,
290                                gfp_t gfp_flags)
291 {
292         struct bcm43xx_rxhdr *rxhdr;
293         dma_addr_t dmaaddr;
294         u32 desc_addr;
295         u32 desc_ctl;
296         const int slot = (int)(desc - ring->vbase);
297         struct sk_buff *skb;
298
299         assert(slot >= 0 && slot < ring->nr_slots);
300         assert(!ring->tx);
301
302         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
303         if (unlikely(!skb))
304                 return -ENOMEM;
305         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
306         if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
307                 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
308                 dev_kfree_skb_any(skb);
309                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RX SKB >1G "
310                                     "(0x%08x, len: %u)\n",
311                        dmaaddr, ring->rx_buffersize);
312                 return -ENOMEM;
313         }
314         meta->skb = skb;
315         meta->dmaaddr = dmaaddr;
316         skb->dev = ring->bcm->net_dev;
317         desc_addr = (u32)(dmaaddr + ring->memoffset);
318         desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
319                     (u32)(ring->rx_buffersize - ring->frameoffset));
320         if (slot == ring->nr_slots - 1)
321                 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
322         set_desc_addr(desc, desc_addr);
323         set_desc_ctl(desc, desc_ctl);
324
325         rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
326         rxhdr->frame_length = 0;
327         rxhdr->flags1 = 0;
328
329         return 0;
330 }
331
332 /* Allocate the initial descbuffers.
333  * This is used for an RX ring only.
334  */
335 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
336 {
337         int i, err = -ENOMEM;
338         struct bcm43xx_dmadesc *desc;
339         struct bcm43xx_dmadesc_meta *meta;
340
341         for (i = 0; i < ring->nr_slots; i++) {
342                 desc = ring->vbase + i;
343                 meta = ring->meta + i;
344
345                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
346                 if (err)
347                         goto err_unwind;
348         }
349         ring->used_slots = ring->nr_slots;
350         err = 0;
351 out:
352         return err;
353
354 err_unwind:
355         for (i--; i >= 0; i--) {
356                 desc = ring->vbase + i;
357                 meta = ring->meta + i;
358
359                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
360                 dev_kfree_skb(meta->skb);
361         }
362         goto out;
363 }
364
365 /* Do initial setup of the DMA controller.
366  * Reset the controller, write the ring busaddress
367  * and switch the "enable" bit on.
368  */
369 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
370 {
371         int err = 0;
372         u32 value;
373
374         if (ring->tx) {
375                 /* Set Transmit Control register to "transmit enable" */
376                 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
377                                   BCM43xx_DMA_TXCTRL_ENABLE);
378                 /* Set Transmit Descriptor ring address. */
379                 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING,
380                                   ring->dmabase + ring->memoffset);
381         } else {
382                 err = alloc_initial_descbuffers(ring);
383                 if (err)
384                         goto out;
385                 /* Set Receive Control "receive enable" and frame offset */
386                 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
387                 value |= BCM43xx_DMA_RXCTRL_ENABLE;
388                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value);
389                 /* Set Receive Descriptor ring address. */
390                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING,
391                                   ring->dmabase + ring->memoffset);
392                 /* Init the descriptor pointer. */
393                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200);
394         }
395
396 out:
397         return err;
398 }
399
400 /* Shutdown the DMA controller. */
401 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
402 {
403         if (ring->tx) {
404                 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
405                 /* Zero out Transmit Descriptor ring address. */
406                 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0);
407         } else {
408                 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
409                 /* Zero out Receive Descriptor ring address. */
410                 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0);
411         }
412 }
413
414 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
415 {
416         struct bcm43xx_dmadesc *desc;
417         struct bcm43xx_dmadesc_meta *meta;
418         int i;
419
420         if (!ring->used_slots)
421                 return;
422         for (i = 0; i < ring->nr_slots; i++) {
423                 desc = ring->vbase + i;
424                 meta = ring->meta + i;
425
426                 if (!meta->skb) {
427                         assert(ring->tx);
428                         continue;
429                 }
430                 if (ring->tx) {
431                         unmap_descbuffer(ring, meta->dmaaddr,
432                                          meta->skb->len, 1);
433                 } else {
434                         unmap_descbuffer(ring, meta->dmaaddr,
435                                          ring->rx_buffersize, 0);
436                 }
437                 free_descriptor_buffer(ring, desc, meta, 0);
438         }
439 }
440
441 /* Main initialization function. */
442 static
443 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
444                                                u16 dma_controller_base,
445                                                int nr_descriptor_slots,
446                                                int tx)
447 {
448         struct bcm43xx_dmaring *ring;
449         int err;
450
451         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
452         if (!ring)
453                 goto out;
454
455         ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
456                              GFP_KERNEL);
457         if (!ring->meta)
458                 goto err_kfree_ring;
459
460         ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
461 #ifdef CONFIG_BCM947XX
462         if (bcm->pci_dev->bus->number == 0)
463                 ring->memoffset = 0;
464 #endif
465
466         ring->bcm = bcm;
467         ring->nr_slots = nr_descriptor_slots;
468         ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
469         ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
470         assert(ring->suspend_mark < ring->resume_mark);
471         ring->mmio_base = dma_controller_base;
472         if (tx) {
473                 ring->tx = 1;
474                 ring->current_slot = -1;
475         } else {
476                 switch (dma_controller_base) {
477                 case BCM43xx_MMIO_DMA1_BASE:
478                         ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
479                         ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
480                         break;
481                 case BCM43xx_MMIO_DMA4_BASE:
482                         ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
483                         ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
484                         break;
485                 default:
486                         assert(0);
487                 }
488         }
489
490         err = alloc_ringmemory(ring);
491         if (err)
492                 goto err_kfree_meta;
493         err = dmacontroller_setup(ring);
494         if (err)
495                 goto err_free_ringmemory;
496
497 out:
498         return ring;
499
500 err_free_ringmemory:
501         free_ringmemory(ring);
502 err_kfree_meta:
503         kfree(ring->meta);
504 err_kfree_ring:
505         kfree(ring);
506         ring = NULL;
507         goto out;
508 }
509
510 /* Main cleanup function. */
511 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
512 {
513         if (!ring)
514                 return;
515
516         dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
517                 ring->mmio_base,
518                 (ring->tx) ? "TX" : "RX",
519                 ring->max_used_slots, ring->nr_slots);
520         /* Device IRQs are disabled prior entering this function,
521          * so no need to take care of concurrency with rx handler stuff.
522          */
523         dmacontroller_cleanup(ring);
524         free_all_descbuffers(ring);
525         free_ringmemory(ring);
526
527         kfree(ring->meta);
528         kfree(ring);
529 }
530
531 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
532 {
533         struct bcm43xx_dma *dma;
534
535         if (bcm43xx_using_pio(bcm))
536                 return;
537         dma = bcm43xx_current_dma(bcm);
538
539         bcm43xx_destroy_dmaring(dma->rx_ring1);
540         dma->rx_ring1 = NULL;
541         bcm43xx_destroy_dmaring(dma->rx_ring0);
542         dma->rx_ring0 = NULL;
543         bcm43xx_destroy_dmaring(dma->tx_ring3);
544         dma->tx_ring3 = NULL;
545         bcm43xx_destroy_dmaring(dma->tx_ring2);
546         dma->tx_ring2 = NULL;
547         bcm43xx_destroy_dmaring(dma->tx_ring1);
548         dma->tx_ring1 = NULL;
549         bcm43xx_destroy_dmaring(dma->tx_ring0);
550         dma->tx_ring0 = NULL;
551 }
552
553 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
554 {
555         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
556         struct bcm43xx_dmaring *ring;
557         int err = -ENOMEM;
558
559         /* setup TX DMA channels. */
560         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
561                                      BCM43xx_TXRING_SLOTS, 1);
562         if (!ring)
563                 goto out;
564         dma->tx_ring0 = ring;
565
566         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
567                                      BCM43xx_TXRING_SLOTS, 1);
568         if (!ring)
569                 goto err_destroy_tx0;
570         dma->tx_ring1 = ring;
571
572         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
573                                      BCM43xx_TXRING_SLOTS, 1);
574         if (!ring)
575                 goto err_destroy_tx1;
576         dma->tx_ring2 = ring;
577
578         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
579                                      BCM43xx_TXRING_SLOTS, 1);
580         if (!ring)
581                 goto err_destroy_tx2;
582         dma->tx_ring3 = ring;
583
584         /* setup RX DMA channels. */
585         ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
586                                      BCM43xx_RXRING_SLOTS, 0);
587         if (!ring)
588                 goto err_destroy_tx3;
589         dma->rx_ring0 = ring;
590
591         if (bcm->current_core->rev < 5) {
592                 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
593                                              BCM43xx_RXRING_SLOTS, 0);
594                 if (!ring)
595                         goto err_destroy_rx0;
596                 dma->rx_ring1 = ring;
597         }
598
599         dprintk(KERN_INFO PFX "DMA initialized\n");
600         err = 0;
601 out:
602         return err;
603
604 err_destroy_rx0:
605         bcm43xx_destroy_dmaring(dma->rx_ring0);
606         dma->rx_ring0 = NULL;
607 err_destroy_tx3:
608         bcm43xx_destroy_dmaring(dma->tx_ring3);
609         dma->tx_ring3 = NULL;
610 err_destroy_tx2:
611         bcm43xx_destroy_dmaring(dma->tx_ring2);
612         dma->tx_ring2 = NULL;
613 err_destroy_tx1:
614         bcm43xx_destroy_dmaring(dma->tx_ring1);
615         dma->tx_ring1 = NULL;
616 err_destroy_tx0:
617         bcm43xx_destroy_dmaring(dma->tx_ring0);
618         dma->tx_ring0 = NULL;
619         goto out;
620 }
621
622 /* Generate a cookie for the TX header. */
623 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
624                            int slot)
625 {
626         u16 cookie = 0x0000;
627
628         /* Use the upper 4 bits of the cookie as
629          * DMA controller ID and store the slot number
630          * in the lower 12 bits
631          */
632         switch (ring->mmio_base) {
633         default:
634                 assert(0);
635         case BCM43xx_MMIO_DMA1_BASE:
636                 break;
637         case BCM43xx_MMIO_DMA2_BASE:
638                 cookie = 0x1000;
639                 break;
640         case BCM43xx_MMIO_DMA3_BASE:
641                 cookie = 0x2000;
642                 break;
643         case BCM43xx_MMIO_DMA4_BASE:
644                 cookie = 0x3000;
645                 break;
646         }
647         assert(((u16)slot & 0xF000) == 0x0000);
648         cookie |= (u16)slot;
649
650         return cookie;
651 }
652
653 /* Inspect a cookie and find out to which controller/slot it belongs. */
654 static
655 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
656                                       u16 cookie, int *slot)
657 {
658         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
659         struct bcm43xx_dmaring *ring = NULL;
660
661         switch (cookie & 0xF000) {
662         case 0x0000:
663                 ring = dma->tx_ring0;
664                 break;
665         case 0x1000:
666                 ring = dma->tx_ring1;
667                 break;
668         case 0x2000:
669                 ring = dma->tx_ring2;
670                 break;
671         case 0x3000:
672                 ring = dma->tx_ring3;
673                 break;
674         default:
675                 assert(0);
676         }
677         *slot = (cookie & 0x0FFF);
678         assert(*slot >= 0 && *slot < ring->nr_slots);
679
680         return ring;
681 }
682
683 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
684                                   int slot)
685 {
686         /* Everything is ready to start. Buffers are DMA mapped and
687          * associated with slots.
688          * "slot" is the last slot of the new frame we want to transmit.
689          * Close your seat belts now, please.
690          */
691         wmb();
692         slot = next_slot(ring, slot);
693         bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX,
694                           (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
695 }
696
697 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
698                            struct sk_buff *skb,
699                            u8 cur_frag)
700 {
701         int slot;
702         struct bcm43xx_dmadesc *desc;
703         struct bcm43xx_dmadesc_meta *meta;
704         u32 desc_ctl;
705         u32 desc_addr;
706
707         assert(skb_shinfo(skb)->nr_frags == 0);
708
709         slot = request_slot(ring);
710         desc = ring->vbase + slot;
711         meta = ring->meta + slot;
712
713         /* Add a device specific TX header. */
714         assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
715         /* Reserve enough headroom for the device tx header. */
716         __skb_push(skb, sizeof(struct bcm43xx_txhdr));
717         /* Now calculate and add the tx header.
718          * The tx header includes the PLCP header.
719          */
720         bcm43xx_generate_txhdr(ring->bcm,
721                                (struct bcm43xx_txhdr *)skb->data,
722                                skb->data + sizeof(struct bcm43xx_txhdr),
723                                skb->len - sizeof(struct bcm43xx_txhdr),
724                                (cur_frag == 0),
725                                generate_cookie(ring, slot));
726
727         meta->skb = skb;
728         meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
729         if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
730                 return_slot(ring, slot);
731                 printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA TX SKB >1G "
732                                     "(0x%08x, len: %u)\n",
733                        meta->dmaaddr, skb->len);
734                 return -ENOMEM;
735         }
736
737         desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
738         desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
739         desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
740         desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
741                      (u32)(meta->skb->len - ring->frameoffset));
742         if (slot == ring->nr_slots - 1)
743                 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
744
745         set_desc_ctl(desc, desc_ctl);
746         set_desc_addr(desc, desc_addr);
747         /* Now transfer the whole frame. */
748         dmacontroller_poke_tx(ring, slot);
749
750         return 0;
751 }
752
753 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
754                    struct ieee80211_txb *txb)
755 {
756         /* We just received a packet from the kernel network subsystem.
757          * Add headers and DMA map the memory. Poke
758          * the device to send the stuff.
759          * Note that this is called from atomic context.
760          */
761         struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
762         u8 i;
763         struct sk_buff *skb;
764
765         assert(ring->tx);
766         if (unlikely(free_slots(ring) < txb->nr_frags)) {
767                 /* The queue should be stopped,
768                  * if we are low on free slots.
769                  * If this ever triggers, we have to lower the suspend_mark.
770                  */
771                 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
772                 return -ENOMEM;
773         }
774
775         for (i = 0; i < txb->nr_frags; i++) {
776                 skb = txb->fragments[i];
777                 /* Take skb from ieee80211_txb_free */
778                 txb->fragments[i] = NULL;
779                 dma_tx_fragment(ring, skb, i);
780                 //TODO: handle failure of dma_tx_fragment
781         }
782         ieee80211_txb_free(txb);
783
784         return 0;
785 }
786
787 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
788                                    struct bcm43xx_xmitstatus *status)
789 {
790         struct bcm43xx_dmaring *ring;
791         struct bcm43xx_dmadesc *desc;
792         struct bcm43xx_dmadesc_meta *meta;
793         int is_last_fragment;
794         int slot;
795
796         ring = parse_cookie(bcm, status->cookie, &slot);
797         assert(ring);
798         assert(ring->tx);
799         assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
800         while (1) {
801                 assert(slot >= 0 && slot < ring->nr_slots);
802                 desc = ring->vbase + slot;
803                 meta = ring->meta + slot;
804
805                 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
806                 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
807                 free_descriptor_buffer(ring, desc, meta, 1);
808                 /* Everything belonging to the slot is unmapped
809                  * and freed, so we can return it.
810                  */
811                 return_slot(ring, slot);
812
813                 if (is_last_fragment)
814                         break;
815                 slot = next_slot(ring, slot);
816         }
817         bcm->stats.last_tx = jiffies;
818 }
819
820 static void dma_rx(struct bcm43xx_dmaring *ring,
821                    int *slot)
822 {
823         struct bcm43xx_dmadesc *desc;
824         struct bcm43xx_dmadesc_meta *meta;
825         struct bcm43xx_rxhdr *rxhdr;
826         struct sk_buff *skb;
827         u16 len;
828         int err;
829         dma_addr_t dmaaddr;
830
831         desc = ring->vbase + *slot;
832         meta = ring->meta + *slot;
833
834         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
835         skb = meta->skb;
836
837         if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
838                 /* We received an xmit status. */
839                 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
840                 struct bcm43xx_xmitstatus stat;
841
842                 stat.cookie = le16_to_cpu(hw->cookie);
843                 stat.flags = hw->flags;
844                 stat.cnt1 = hw->cnt1;
845                 stat.cnt2 = hw->cnt2;
846                 stat.seq = le16_to_cpu(hw->seq);
847                 stat.unknown = le16_to_cpu(hw->unknown);
848
849                 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
850                 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
851                 /* recycle the descriptor buffer. */
852                 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
853
854                 return;
855         }
856         rxhdr = (struct bcm43xx_rxhdr *)skb->data;
857         len = le16_to_cpu(rxhdr->frame_length);
858         if (len == 0) {
859                 int i = 0;
860
861                 do {
862                         udelay(2);
863                         barrier();
864                         len = le16_to_cpu(rxhdr->frame_length);
865                 } while (len == 0 && i++ < 5);
866                 if (unlikely(len == 0)) {
867                         /* recycle the descriptor buffer. */
868                         sync_descbuffer_for_device(ring, meta->dmaaddr,
869                                                    ring->rx_buffersize);
870                         goto drop;
871                 }
872         }
873         if (unlikely(len > ring->rx_buffersize)) {
874                 /* The data did not fit into one descriptor buffer
875                  * and is split over multiple buffers.
876                  * This should never happen, as we try to allocate buffers
877                  * big enough. So simply ignore this packet.
878                  */
879                 int cnt = 0;
880                 s32 tmp = len;
881
882                 while (1) {
883                         desc = ring->vbase + *slot;
884                         meta = ring->meta + *slot;
885                         /* recycle the descriptor buffer. */
886                         sync_descbuffer_for_device(ring, meta->dmaaddr,
887                                                    ring->rx_buffersize);
888                         *slot = next_slot(ring, *slot);
889                         cnt++;
890                         tmp -= ring->rx_buffersize;
891                         if (tmp <= 0)
892                                 break;
893                 }
894                 printkl(KERN_ERR PFX "DMA RX buffer too small "
895                                      "(len: %u, buffer: %u, nr-dropped: %d)\n",
896                         len, ring->rx_buffersize, cnt);
897                 goto drop;
898         }
899         len -= IEEE80211_FCS_LEN;
900
901         dmaaddr = meta->dmaaddr;
902         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
903         if (unlikely(err)) {
904                 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
905                 sync_descbuffer_for_device(ring, dmaaddr,
906                                            ring->rx_buffersize);
907                 goto drop;
908         }
909
910         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
911         skb_put(skb, len + ring->frameoffset);
912         skb_pull(skb, ring->frameoffset);
913
914         err = bcm43xx_rx(ring->bcm, skb, rxhdr);
915         if (err) {
916                 dev_kfree_skb_irq(skb);
917                 goto drop;
918         }
919
920 drop:
921         return;
922 }
923
924 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
925 {
926         u32 status;
927         u16 descptr;
928         int slot, current_slot;
929 #ifdef CONFIG_BCM43XX_DEBUG
930         int used_slots = 0;
931 #endif
932
933         assert(!ring->tx);
934         status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS);
935         descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
936         current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
937         assert(current_slot >= 0 && current_slot < ring->nr_slots);
938
939         slot = ring->current_slot;
940         for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
941                 dma_rx(ring, &slot);
942 #ifdef CONFIG_BCM43XX_DEBUG
943                 if (++used_slots > ring->max_used_slots)
944                         ring->max_used_slots = used_slots;
945 #endif
946         }
947         bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX,
948                           (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
949         ring->current_slot = slot;
950 }
951
952 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
953 {
954         assert(ring->tx);
955         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
956         bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
957                           bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
958                           | BCM43xx_DMA_TXCTRL_SUSPEND);
959 }
960
961 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
962 {
963         assert(ring->tx);
964         bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
965                           bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
966                           & ~BCM43xx_DMA_TXCTRL_SUSPEND);
967         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
968 }