3 Broadcom B43 wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
45 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
47 struct b43_dmadesc_meta **meta)
49 struct b43_dmadesc32 *desc;
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
55 return (struct b43_dmadesc_generic *)desc;
58 static void op32_fill_descriptor(struct b43_dmaring *ring,
59 struct b43_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
63 struct b43_dmadesc32 *descbase = ring->descbase;
69 slot = (int)(&(desc->dma32) - descbase);
70 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & B43_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= B43_DMA32_DCTL_DTABLEEND;
81 ctl |= B43_DMA32_DCTL_FRAMESTART;
83 ctl |= B43_DMA32_DCTL_FRAMEEND;
85 ctl |= B43_DMA32_DCTL_IRQ;
86 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
87 & B43_DMA32_DCTL_ADDREXT_MASK;
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
93 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
95 b43_dma_write(ring, B43_DMA32_TXINDEX,
96 (u32) (slot * sizeof(struct b43_dmadesc32)));
99 static void op32_tx_suspend(struct b43_dmaring *ring)
101 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
102 | B43_DMA32_TXSUSPEND);
105 static void op32_tx_resume(struct b43_dmaring *ring)
107 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
108 & ~B43_DMA32_TXSUSPEND);
111 static int op32_get_current_rxslot(struct b43_dmaring *ring)
115 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
116 val &= B43_DMA32_RXDPTR;
118 return (val / sizeof(struct b43_dmadesc32));
121 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
123 b43_dma_write(ring, B43_DMA32_RXINDEX,
124 (u32) (slot * sizeof(struct b43_dmadesc32)));
127 static const struct b43_dma_ops dma32_ops = {
128 .idx2desc = op32_idx2desc,
129 .fill_descriptor = op32_fill_descriptor,
130 .poke_tx = op32_poke_tx,
131 .tx_suspend = op32_tx_suspend,
132 .tx_resume = op32_tx_resume,
133 .get_current_rxslot = op32_get_current_rxslot,
134 .set_current_rxslot = op32_set_current_rxslot,
139 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
141 struct b43_dmadesc_meta **meta)
143 struct b43_dmadesc64 *desc;
145 *meta = &(ring->meta[slot]);
146 desc = ring->descbase;
147 desc = &(desc[slot]);
149 return (struct b43_dmadesc_generic *)desc;
152 static void op64_fill_descriptor(struct b43_dmaring *ring,
153 struct b43_dmadesc_generic *desc,
154 dma_addr_t dmaaddr, u16 bufsize,
155 int start, int end, int irq)
157 struct b43_dmadesc64 *descbase = ring->descbase;
159 u32 ctl0 = 0, ctl1 = 0;
163 slot = (int)(&(desc->dma64) - descbase);
164 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
166 addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
167 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
168 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
169 >> SSB_DMA_TRANSLATION_SHIFT;
170 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
171 if (slot == ring->nr_slots - 1)
172 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
174 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
176 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
178 ctl0 |= B43_DMA64_DCTL0_IRQ;
179 ctl1 |= (bufsize - ring->frameoffset)
180 & B43_DMA64_DCTL1_BYTECNT;
181 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
182 & B43_DMA64_DCTL1_ADDREXT_MASK;
184 desc->dma64.control0 = cpu_to_le32(ctl0);
185 desc->dma64.control1 = cpu_to_le32(ctl1);
186 desc->dma64.address_low = cpu_to_le32(addrlo);
187 desc->dma64.address_high = cpu_to_le32(addrhi);
190 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
192 b43_dma_write(ring, B43_DMA64_TXINDEX,
193 (u32) (slot * sizeof(struct b43_dmadesc64)));
196 static void op64_tx_suspend(struct b43_dmaring *ring)
198 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
199 | B43_DMA64_TXSUSPEND);
202 static void op64_tx_resume(struct b43_dmaring *ring)
204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
205 & ~B43_DMA64_TXSUSPEND);
208 static int op64_get_current_rxslot(struct b43_dmaring *ring)
212 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
213 val &= B43_DMA64_RXSTATDPTR;
215 return (val / sizeof(struct b43_dmadesc64));
218 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
220 b43_dma_write(ring, B43_DMA64_RXINDEX,
221 (u32) (slot * sizeof(struct b43_dmadesc64)));
224 static const struct b43_dma_ops dma64_ops = {
225 .idx2desc = op64_idx2desc,
226 .fill_descriptor = op64_fill_descriptor,
227 .poke_tx = op64_poke_tx,
228 .tx_suspend = op64_tx_suspend,
229 .tx_resume = op64_tx_resume,
230 .get_current_rxslot = op64_get_current_rxslot,
231 .set_current_rxslot = op64_set_current_rxslot,
234 static inline int free_slots(struct b43_dmaring *ring)
236 return (ring->nr_slots - ring->used_slots);
239 static inline int next_slot(struct b43_dmaring *ring, int slot)
241 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
242 if (slot == ring->nr_slots - 1)
247 static inline int prev_slot(struct b43_dmaring *ring, int slot)
249 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
251 return ring->nr_slots - 1;
255 #ifdef CONFIG_B43_DEBUG
256 static void update_max_used_slots(struct b43_dmaring *ring,
257 int current_used_slots)
259 if (current_used_slots <= ring->max_used_slots)
261 ring->max_used_slots = current_used_slots;
262 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
263 b43dbg(ring->dev->wl,
264 "max_used_slots increased to %d on %s ring %d\n",
265 ring->max_used_slots,
266 ring->tx ? "TX" : "RX", ring->index);
271 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
276 /* Request a slot for usage. */
277 static inline int request_slot(struct b43_dmaring *ring)
281 B43_WARN_ON(!ring->tx);
282 B43_WARN_ON(ring->stopped);
283 B43_WARN_ON(free_slots(ring) == 0);
285 slot = next_slot(ring, ring->current_slot);
286 ring->current_slot = slot;
289 update_max_used_slots(ring, ring->used_slots);
294 /* Mac80211-queue to b43-ring mapping */
295 static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
298 struct b43_dmaring *ring;
300 /*FIXME: For now we always run on TX-ring-1 */
301 return dev->dma.tx_ring1;
303 /* 0 = highest priority */
304 switch (queue_priority) {
309 ring = dev->dma.tx_ring3;
312 ring = dev->dma.tx_ring2;
315 ring = dev->dma.tx_ring1;
318 ring = dev->dma.tx_ring0;
325 /* b43-ring to mac80211-queue mapping */
326 static inline int txring_to_priority(struct b43_dmaring *ring)
328 static const u8 idx_to_prio[] = { 3, 2, 1, 0, };
331 /*FIXME: have only one queue, for now */
335 if (B43_WARN_ON(index >= ARRAY_SIZE(idx_to_prio)))
337 return idx_to_prio[index];
340 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
342 static const u16 map64[] = {
343 B43_MMIO_DMA64_BASE0,
344 B43_MMIO_DMA64_BASE1,
345 B43_MMIO_DMA64_BASE2,
346 B43_MMIO_DMA64_BASE3,
347 B43_MMIO_DMA64_BASE4,
348 B43_MMIO_DMA64_BASE5,
350 static const u16 map32[] = {
351 B43_MMIO_DMA32_BASE0,
352 B43_MMIO_DMA32_BASE1,
353 B43_MMIO_DMA32_BASE2,
354 B43_MMIO_DMA32_BASE3,
355 B43_MMIO_DMA32_BASE4,
356 B43_MMIO_DMA32_BASE5,
359 if (type == B43_DMA_64BIT) {
360 B43_WARN_ON(!(controller_idx >= 0 &&
361 controller_idx < ARRAY_SIZE(map64)));
362 return map64[controller_idx];
364 B43_WARN_ON(!(controller_idx >= 0 &&
365 controller_idx < ARRAY_SIZE(map32)));
366 return map32[controller_idx];
370 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
371 unsigned char *buf, size_t len, int tx)
376 dmaaddr = dma_map_single(ring->dev->dev->dev,
377 buf, len, DMA_TO_DEVICE);
379 dmaaddr = dma_map_single(ring->dev->dev->dev,
380 buf, len, DMA_FROM_DEVICE);
387 void unmap_descbuffer(struct b43_dmaring *ring,
388 dma_addr_t addr, size_t len, int tx)
391 dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
393 dma_unmap_single(ring->dev->dev->dev,
394 addr, len, DMA_FROM_DEVICE);
399 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
400 dma_addr_t addr, size_t len)
402 B43_WARN_ON(ring->tx);
403 dma_sync_single_for_cpu(ring->dev->dev->dev,
404 addr, len, DMA_FROM_DEVICE);
408 void sync_descbuffer_for_device(struct b43_dmaring *ring,
409 dma_addr_t addr, size_t len)
411 B43_WARN_ON(ring->tx);
412 dma_sync_single_for_device(ring->dev->dev->dev,
413 addr, len, DMA_FROM_DEVICE);
417 void free_descriptor_buffer(struct b43_dmaring *ring,
418 struct b43_dmadesc_meta *meta)
421 dev_kfree_skb_any(meta->skb);
426 static int alloc_ringmemory(struct b43_dmaring *ring)
428 struct device *dev = ring->dev->dev->dev;
429 gfp_t flags = GFP_KERNEL;
431 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
432 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
433 * has shown that 4K is sufficient for the latter as long as the buffer
434 * does not cross an 8K boundary.
436 * For unknown reasons - possibly a hardware error - the BCM4311 rev
437 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
438 * which accounts for the GFP_DMA flag below.
440 if (ring->type == B43_DMA_64BIT)
442 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
443 &(ring->dmabase), flags);
444 if (!ring->descbase) {
445 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
448 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
453 static void free_ringmemory(struct b43_dmaring *ring)
455 struct device *dev = ring->dev->dev->dev;
457 dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
458 ring->descbase, ring->dmabase);
461 /* Reset the RX DMA channel */
462 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
463 enum b43_dmatype type)
471 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
472 b43_write32(dev, mmio_base + offset, 0);
473 for (i = 0; i < 10; i++) {
474 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
476 value = b43_read32(dev, mmio_base + offset);
477 if (type == B43_DMA_64BIT) {
478 value &= B43_DMA64_RXSTAT;
479 if (value == B43_DMA64_RXSTAT_DISABLED) {
484 value &= B43_DMA32_RXSTATE;
485 if (value == B43_DMA32_RXSTAT_DISABLED) {
493 b43err(dev->wl, "DMA RX reset timed out\n");
500 /* Reset the TX DMA channel */
501 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
502 enum b43_dmatype type)
510 for (i = 0; i < 10; i++) {
511 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
513 value = b43_read32(dev, mmio_base + offset);
514 if (type == B43_DMA_64BIT) {
515 value &= B43_DMA64_TXSTAT;
516 if (value == B43_DMA64_TXSTAT_DISABLED ||
517 value == B43_DMA64_TXSTAT_IDLEWAIT ||
518 value == B43_DMA64_TXSTAT_STOPPED)
521 value &= B43_DMA32_TXSTATE;
522 if (value == B43_DMA32_TXSTAT_DISABLED ||
523 value == B43_DMA32_TXSTAT_IDLEWAIT ||
524 value == B43_DMA32_TXSTAT_STOPPED)
529 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
530 b43_write32(dev, mmio_base + offset, 0);
531 for (i = 0; i < 10; i++) {
532 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
534 value = b43_read32(dev, mmio_base + offset);
535 if (type == B43_DMA_64BIT) {
536 value &= B43_DMA64_TXSTAT;
537 if (value == B43_DMA64_TXSTAT_DISABLED) {
542 value &= B43_DMA32_TXSTATE;
543 if (value == B43_DMA32_TXSTAT_DISABLED) {
551 b43err(dev->wl, "DMA TX reset timed out\n");
554 /* ensure the reset is completed. */
560 /* Check if a DMA mapping address is invalid. */
561 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
563 size_t buffersize, bool dma_to_device)
565 if (unlikely(dma_mapping_error(addr)))
568 switch (ring->type) {
570 if ((u64)addr + buffersize > (1ULL << 30))
574 if ((u64)addr + buffersize > (1ULL << 32))
578 /* Currently we can't have addresses beyond
579 * 64bit in the kernel. */
583 /* The address is OK. */
587 /* We can't support this address. Unmap it again. */
588 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
593 static int setup_rx_descbuffer(struct b43_dmaring *ring,
594 struct b43_dmadesc_generic *desc,
595 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
597 struct b43_rxhdr_fw4 *rxhdr;
598 struct b43_hwtxstatus *txstat;
602 B43_WARN_ON(ring->tx);
604 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
607 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
608 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
609 /* ugh. try to realloc in zone_dma */
610 gfp_flags |= GFP_DMA;
612 dev_kfree_skb_any(skb);
614 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
617 dmaaddr = map_descbuffer(ring, skb->data,
618 ring->rx_buffersize, 0);
621 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
622 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
623 dev_kfree_skb_any(skb);
628 meta->dmaaddr = dmaaddr;
629 ring->ops->fill_descriptor(ring, desc, dmaaddr,
630 ring->rx_buffersize, 0, 0, 0);
632 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
633 rxhdr->frame_len = 0;
634 txstat = (struct b43_hwtxstatus *)(skb->data);
640 /* Allocate the initial descbuffers.
641 * This is used for an RX ring only.
643 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
645 int i, err = -ENOMEM;
646 struct b43_dmadesc_generic *desc;
647 struct b43_dmadesc_meta *meta;
649 for (i = 0; i < ring->nr_slots; i++) {
650 desc = ring->ops->idx2desc(ring, i, &meta);
652 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
654 b43err(ring->dev->wl,
655 "Failed to allocate initial descbuffers\n");
660 ring->used_slots = ring->nr_slots;
666 for (i--; i >= 0; i--) {
667 desc = ring->ops->idx2desc(ring, i, &meta);
669 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
670 dev_kfree_skb(meta->skb);
675 /* Do initial setup of the DMA controller.
676 * Reset the controller, write the ring busaddress
677 * and switch the "enable" bit on.
679 static int dmacontroller_setup(struct b43_dmaring *ring)
684 u32 trans = ssb_dma_translation(ring->dev->dev);
687 if (ring->type == B43_DMA_64BIT) {
688 u64 ringbase = (u64) (ring->dmabase);
690 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
691 >> SSB_DMA_TRANSLATION_SHIFT;
692 value = B43_DMA64_TXENABLE;
693 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
694 & B43_DMA64_TXADDREXT_MASK;
695 b43_dma_write(ring, B43_DMA64_TXCTL, value);
696 b43_dma_write(ring, B43_DMA64_TXRINGLO,
697 (ringbase & 0xFFFFFFFF));
698 b43_dma_write(ring, B43_DMA64_TXRINGHI,
700 ~SSB_DMA_TRANSLATION_MASK)
703 u32 ringbase = (u32) (ring->dmabase);
705 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
706 >> SSB_DMA_TRANSLATION_SHIFT;
707 value = B43_DMA32_TXENABLE;
708 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
709 & B43_DMA32_TXADDREXT_MASK;
710 b43_dma_write(ring, B43_DMA32_TXCTL, value);
711 b43_dma_write(ring, B43_DMA32_TXRING,
712 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
716 err = alloc_initial_descbuffers(ring);
719 if (ring->type == B43_DMA_64BIT) {
720 u64 ringbase = (u64) (ring->dmabase);
722 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
723 >> SSB_DMA_TRANSLATION_SHIFT;
724 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
725 value |= B43_DMA64_RXENABLE;
726 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
727 & B43_DMA64_RXADDREXT_MASK;
728 b43_dma_write(ring, B43_DMA64_RXCTL, value);
729 b43_dma_write(ring, B43_DMA64_RXRINGLO,
730 (ringbase & 0xFFFFFFFF));
731 b43_dma_write(ring, B43_DMA64_RXRINGHI,
733 ~SSB_DMA_TRANSLATION_MASK)
735 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
736 sizeof(struct b43_dmadesc64));
738 u32 ringbase = (u32) (ring->dmabase);
740 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
741 >> SSB_DMA_TRANSLATION_SHIFT;
742 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
743 value |= B43_DMA32_RXENABLE;
744 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
745 & B43_DMA32_RXADDREXT_MASK;
746 b43_dma_write(ring, B43_DMA32_RXCTL, value);
747 b43_dma_write(ring, B43_DMA32_RXRING,
748 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
750 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
751 sizeof(struct b43_dmadesc32));
759 /* Shutdown the DMA controller. */
760 static void dmacontroller_cleanup(struct b43_dmaring *ring)
763 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
765 if (ring->type == B43_DMA_64BIT) {
766 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
767 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
769 b43_dma_write(ring, B43_DMA32_TXRING, 0);
771 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
773 if (ring->type == B43_DMA_64BIT) {
774 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
775 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
777 b43_dma_write(ring, B43_DMA32_RXRING, 0);
781 static void free_all_descbuffers(struct b43_dmaring *ring)
783 struct b43_dmadesc_generic *desc;
784 struct b43_dmadesc_meta *meta;
787 if (!ring->used_slots)
789 for (i = 0; i < ring->nr_slots; i++) {
790 desc = ring->ops->idx2desc(ring, i, &meta);
793 B43_WARN_ON(!ring->tx);
797 unmap_descbuffer(ring, meta->dmaaddr,
800 unmap_descbuffer(ring, meta->dmaaddr,
801 ring->rx_buffersize, 0);
803 free_descriptor_buffer(ring, meta);
807 static u64 supported_dma_mask(struct b43_wldev *dev)
812 tmp = b43_read32(dev, SSB_TMSHIGH);
813 if (tmp & SSB_TMSHIGH_DMA64)
814 return DMA_64BIT_MASK;
815 mmio_base = b43_dmacontroller_base(0, 0);
816 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
817 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
818 if (tmp & B43_DMA32_TXADDREXT_MASK)
819 return DMA_32BIT_MASK;
821 return DMA_30BIT_MASK;
824 /* Main initialization function. */
826 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
827 int controller_index,
829 enum b43_dmatype type)
831 struct b43_dmaring *ring;
836 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
841 nr_slots = B43_RXRING_SLOTS;
843 nr_slots = B43_TXRING_SLOTS;
845 ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
850 ring->txhdr_cache = kcalloc(nr_slots,
853 if (!ring->txhdr_cache)
856 /* test for ability to dma to txhdr_cache */
857 dma_test = dma_map_single(dev->dev->dev,
862 if (b43_dma_mapping_error(ring, dma_test,
863 b43_txhdr_size(dev), 1)) {
865 kfree(ring->txhdr_cache);
866 ring->txhdr_cache = kcalloc(nr_slots,
868 GFP_KERNEL | GFP_DMA);
869 if (!ring->txhdr_cache)
872 dma_test = dma_map_single(dev->dev->dev,
877 if (b43_dma_mapping_error(ring, dma_test,
878 b43_txhdr_size(dev), 1)) {
881 "TXHDR DMA allocation failed\n");
882 goto err_kfree_txhdr_cache;
886 dma_unmap_single(dev->dev->dev,
887 dma_test, b43_txhdr_size(dev),
892 ring->nr_slots = nr_slots;
893 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
894 ring->index = controller_index;
895 if (type == B43_DMA_64BIT)
896 ring->ops = &dma64_ops;
898 ring->ops = &dma32_ops;
901 ring->current_slot = -1;
903 if (ring->index == 0) {
904 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
905 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
906 } else if (ring->index == 3) {
907 ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
908 ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
912 spin_lock_init(&ring->lock);
913 #ifdef CONFIG_B43_DEBUG
914 ring->last_injected_overflow = jiffies;
917 err = alloc_ringmemory(ring);
919 goto err_kfree_txhdr_cache;
920 err = dmacontroller_setup(ring);
922 goto err_free_ringmemory;
928 free_ringmemory(ring);
929 err_kfree_txhdr_cache:
930 kfree(ring->txhdr_cache);
939 /* Main cleanup function. */
940 static void b43_destroy_dmaring(struct b43_dmaring *ring)
945 b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
946 (unsigned int)(ring->type),
948 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
949 /* Device IRQs are disabled prior entering this function,
950 * so no need to take care of concurrency with rx handler stuff.
952 dmacontroller_cleanup(ring);
953 free_all_descbuffers(ring);
954 free_ringmemory(ring);
956 kfree(ring->txhdr_cache);
961 void b43_dma_free(struct b43_wldev *dev)
963 struct b43_dma *dma = &dev->dma;
965 b43_destroy_dmaring(dma->rx_ring3);
966 dma->rx_ring3 = NULL;
967 b43_destroy_dmaring(dma->rx_ring0);
968 dma->rx_ring0 = NULL;
970 b43_destroy_dmaring(dma->tx_ring5);
971 dma->tx_ring5 = NULL;
972 b43_destroy_dmaring(dma->tx_ring4);
973 dma->tx_ring4 = NULL;
974 b43_destroy_dmaring(dma->tx_ring3);
975 dma->tx_ring3 = NULL;
976 b43_destroy_dmaring(dma->tx_ring2);
977 dma->tx_ring2 = NULL;
978 b43_destroy_dmaring(dma->tx_ring1);
979 dma->tx_ring1 = NULL;
980 b43_destroy_dmaring(dma->tx_ring0);
981 dma->tx_ring0 = NULL;
984 int b43_dma_init(struct b43_wldev *dev)
986 struct b43_dma *dma = &dev->dma;
987 struct b43_dmaring *ring;
990 enum b43_dmatype type;
992 dmamask = supported_dma_mask(dev);
997 type = B43_DMA_30BIT;
1000 type = B43_DMA_32BIT;
1002 case DMA_64BIT_MASK:
1003 type = B43_DMA_64BIT;
1006 err = ssb_dma_set_mask(dev->dev, dmamask);
1008 b43err(dev->wl, "The machine/kernel does not support "
1009 "the required DMA mask (0x%08X%08X)\n",
1010 (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
1011 (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
1016 /* setup TX DMA channels. */
1017 ring = b43_setup_dmaring(dev, 0, 1, type);
1020 dma->tx_ring0 = ring;
1022 ring = b43_setup_dmaring(dev, 1, 1, type);
1024 goto err_destroy_tx0;
1025 dma->tx_ring1 = ring;
1027 ring = b43_setup_dmaring(dev, 2, 1, type);
1029 goto err_destroy_tx1;
1030 dma->tx_ring2 = ring;
1032 ring = b43_setup_dmaring(dev, 3, 1, type);
1034 goto err_destroy_tx2;
1035 dma->tx_ring3 = ring;
1037 ring = b43_setup_dmaring(dev, 4, 1, type);
1039 goto err_destroy_tx3;
1040 dma->tx_ring4 = ring;
1042 ring = b43_setup_dmaring(dev, 5, 1, type);
1044 goto err_destroy_tx4;
1045 dma->tx_ring5 = ring;
1047 /* setup RX DMA channels. */
1048 ring = b43_setup_dmaring(dev, 0, 0, type);
1050 goto err_destroy_tx5;
1051 dma->rx_ring0 = ring;
1053 if (dev->dev->id.revision < 5) {
1054 ring = b43_setup_dmaring(dev, 3, 0, type);
1056 goto err_destroy_rx0;
1057 dma->rx_ring3 = ring;
1060 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1061 (unsigned int)type);
1067 b43_destroy_dmaring(dma->rx_ring0);
1068 dma->rx_ring0 = NULL;
1070 b43_destroy_dmaring(dma->tx_ring5);
1071 dma->tx_ring5 = NULL;
1073 b43_destroy_dmaring(dma->tx_ring4);
1074 dma->tx_ring4 = NULL;
1076 b43_destroy_dmaring(dma->tx_ring3);
1077 dma->tx_ring3 = NULL;
1079 b43_destroy_dmaring(dma->tx_ring2);
1080 dma->tx_ring2 = NULL;
1082 b43_destroy_dmaring(dma->tx_ring1);
1083 dma->tx_ring1 = NULL;
1085 b43_destroy_dmaring(dma->tx_ring0);
1086 dma->tx_ring0 = NULL;
1090 /* Generate a cookie for the TX header. */
1091 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1093 u16 cookie = 0x1000;
1095 /* Use the upper 4 bits of the cookie as
1096 * DMA controller ID and store the slot number
1097 * in the lower 12 bits.
1098 * Note that the cookie must never be 0, as this
1099 * is a special value used in RX path.
1100 * It can also not be 0xFFFF because that is special
1101 * for multicast frames.
1103 switch (ring->index) {
1125 B43_WARN_ON(slot & ~0x0FFF);
1126 cookie |= (u16) slot;
1131 /* Inspect a cookie and find out to which controller/slot it belongs. */
1133 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1135 struct b43_dma *dma = &dev->dma;
1136 struct b43_dmaring *ring = NULL;
1138 switch (cookie & 0xF000) {
1140 ring = dma->tx_ring0;
1143 ring = dma->tx_ring1;
1146 ring = dma->tx_ring2;
1149 ring = dma->tx_ring3;
1152 ring = dma->tx_ring4;
1155 ring = dma->tx_ring5;
1160 *slot = (cookie & 0x0FFF);
1161 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1166 static int dma_tx_fragment(struct b43_dmaring *ring,
1167 struct sk_buff *skb,
1168 struct ieee80211_tx_control *ctl)
1170 const struct b43_dma_ops *ops = ring->ops;
1172 int slot, old_top_slot, old_used_slots;
1174 struct b43_dmadesc_generic *desc;
1175 struct b43_dmadesc_meta *meta;
1176 struct b43_dmadesc_meta *meta_hdr;
1177 struct sk_buff *bounce_skb;
1179 size_t hdrsize = b43_txhdr_size(ring->dev);
1181 #define SLOTS_PER_PACKET 2
1182 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1184 old_top_slot = ring->current_slot;
1185 old_used_slots = ring->used_slots;
1187 /* Get a slot for the header. */
1188 slot = request_slot(ring);
1189 desc = ops->idx2desc(ring, slot, &meta_hdr);
1190 memset(meta_hdr, 0, sizeof(*meta_hdr));
1192 header = &(ring->txhdr_cache[slot * hdrsize]);
1193 cookie = generate_cookie(ring, slot);
1194 err = b43_generate_txhdr(ring->dev, header,
1195 skb->data, skb->len, ctl, cookie);
1196 if (unlikely(err)) {
1197 ring->current_slot = old_top_slot;
1198 ring->used_slots = old_used_slots;
1202 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1204 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1205 ring->current_slot = old_top_slot;
1206 ring->used_slots = old_used_slots;
1209 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1212 /* Get a slot for the payload. */
1213 slot = request_slot(ring);
1214 desc = ops->idx2desc(ring, slot, &meta);
1215 memset(meta, 0, sizeof(*meta));
1217 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1219 meta->is_last_fragment = 1;
1221 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1222 /* create a bounce buffer in zone_dma on mapping failure. */
1223 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1224 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1226 ring->current_slot = old_top_slot;
1227 ring->used_slots = old_used_slots;
1232 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1233 dev_kfree_skb_any(skb);
1236 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1237 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1238 ring->current_slot = old_top_slot;
1239 ring->used_slots = old_used_slots;
1241 goto out_free_bounce;
1245 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1247 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1248 /* Tell the firmware about the cookie of the last
1249 * mcast frame, so it can clear the more-data bit in it. */
1250 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1251 B43_SHM_SH_MCASTCOOKIE, cookie);
1253 /* Now transfer the whole frame. */
1255 ops->poke_tx(ring, next_slot(ring, slot));
1259 dev_kfree_skb_any(skb);
1261 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1266 static inline int should_inject_overflow(struct b43_dmaring *ring)
1268 #ifdef CONFIG_B43_DEBUG
1269 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1270 /* Check if we should inject another ringbuffer overflow
1271 * to test handling of this situation in the stack. */
1272 unsigned long next_overflow;
1274 next_overflow = ring->last_injected_overflow + HZ;
1275 if (time_after(jiffies, next_overflow)) {
1276 ring->last_injected_overflow = jiffies;
1277 b43dbg(ring->dev->wl,
1278 "Injecting TX ring overflow on "
1279 "DMA controller %d\n", ring->index);
1283 #endif /* CONFIG_B43_DEBUG */
1287 int b43_dma_tx(struct b43_wldev *dev,
1288 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1290 struct b43_dmaring *ring;
1291 struct ieee80211_hdr *hdr;
1293 unsigned long flags;
1295 if (unlikely(skb->len < 2 + 2 + 6)) {
1296 /* Too short, this can't be a valid frame. */
1300 hdr = (struct ieee80211_hdr *)skb->data;
1301 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1302 /* The multicast ring will be sent after the DTIM */
1303 ring = dev->dma.tx_ring4;
1304 /* Set the more-data bit. Ucode will clear it on
1305 * the last frame for us. */
1306 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1308 /* Decide by priority where to put this frame. */
1309 ring = priority_to_txring(dev, ctl->queue);
1312 spin_lock_irqsave(&ring->lock, flags);
1313 B43_WARN_ON(!ring->tx);
1314 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1315 b43warn(dev->wl, "DMA queue overflow\n");
1319 /* Check if the queue was stopped in mac80211,
1320 * but we got called nevertheless.
1321 * That would be a mac80211 bug. */
1322 B43_WARN_ON(ring->stopped);
1324 err = dma_tx_fragment(ring, skb, ctl);
1325 if (unlikely(err == -ENOKEY)) {
1326 /* Drop this packet, as we don't have the encryption key
1327 * anymore and must not transmit it unencrypted. */
1328 dev_kfree_skb_any(skb);
1332 if (unlikely(err)) {
1333 b43err(dev->wl, "DMA tx mapping failure\n");
1336 ring->nr_tx_packets++;
1337 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1338 should_inject_overflow(ring)) {
1339 /* This TX ring is full. */
1340 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1342 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1343 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1347 spin_unlock_irqrestore(&ring->lock, flags);
1352 /* Called with IRQs disabled. */
1353 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1354 const struct b43_txstatus *status)
1356 const struct b43_dma_ops *ops;
1357 struct b43_dmaring *ring;
1358 struct b43_dmadesc_generic *desc;
1359 struct b43_dmadesc_meta *meta;
1362 ring = parse_cookie(dev, status->cookie, &slot);
1363 if (unlikely(!ring))
1366 spin_lock(&ring->lock); /* IRQs are already disabled. */
1368 B43_WARN_ON(!ring->tx);
1371 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1372 desc = ops->idx2desc(ring, slot, &meta);
1375 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1378 unmap_descbuffer(ring, meta->dmaaddr,
1379 b43_txhdr_size(dev), 1);
1381 if (meta->is_last_fragment) {
1382 B43_WARN_ON(!meta->skb);
1383 /* Call back to inform the ieee80211 subsystem about the
1384 * status of the transmission.
1385 * Some fields of txstat are already filled in dma_tx().
1387 if (status->acked) {
1388 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1390 if (!(meta->txstat.control.flags
1391 & IEEE80211_TXCTL_NO_ACK))
1392 meta->txstat.excessive_retries = 1;
1394 if (status->frame_count == 0) {
1395 /* The frame was not transmitted at all. */
1396 meta->txstat.retry_count = 0;
1398 meta->txstat.retry_count = status->frame_count - 1;
1399 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1401 /* skb is freed by ieee80211_tx_status_irqsafe() */
1404 /* No need to call free_descriptor_buffer here, as
1405 * this is only the txhdr, which is not allocated.
1407 B43_WARN_ON(meta->skb);
1410 /* Everything unmapped and free'd. So it's not used anymore. */
1413 if (meta->is_last_fragment)
1415 slot = next_slot(ring, slot);
1417 dev->stats.last_tx = jiffies;
1418 if (ring->stopped) {
1419 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1420 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1422 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1423 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1427 spin_unlock(&ring->lock);
1430 void b43_dma_get_tx_stats(struct b43_wldev *dev,
1431 struct ieee80211_tx_queue_stats *stats)
1433 const int nr_queues = dev->wl->hw->queues;
1434 struct b43_dmaring *ring;
1435 struct ieee80211_tx_queue_stats_data *data;
1436 unsigned long flags;
1439 for (i = 0; i < nr_queues; i++) {
1440 data = &(stats->data[i]);
1441 ring = priority_to_txring(dev, i);
1443 spin_lock_irqsave(&ring->lock, flags);
1444 data->len = ring->used_slots / SLOTS_PER_PACKET;
1445 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1446 data->count = ring->nr_tx_packets;
1447 spin_unlock_irqrestore(&ring->lock, flags);
1451 static void dma_rx(struct b43_dmaring *ring, int *slot)
1453 const struct b43_dma_ops *ops = ring->ops;
1454 struct b43_dmadesc_generic *desc;
1455 struct b43_dmadesc_meta *meta;
1456 struct b43_rxhdr_fw4 *rxhdr;
1457 struct sk_buff *skb;
1462 desc = ops->idx2desc(ring, *slot, &meta);
1464 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1467 if (ring->index == 3) {
1468 /* We received an xmit status. */
1469 struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data;
1472 while (hw->cookie == 0) {
1479 b43_handle_hwtxstatus(ring->dev, hw);
1480 /* recycle the descriptor buffer. */
1481 sync_descbuffer_for_device(ring, meta->dmaaddr,
1482 ring->rx_buffersize);
1486 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1487 len = le16_to_cpu(rxhdr->frame_len);
1494 len = le16_to_cpu(rxhdr->frame_len);
1495 } while (len == 0 && i++ < 5);
1496 if (unlikely(len == 0)) {
1497 /* recycle the descriptor buffer. */
1498 sync_descbuffer_for_device(ring, meta->dmaaddr,
1499 ring->rx_buffersize);
1503 if (unlikely(len > ring->rx_buffersize)) {
1504 /* The data did not fit into one descriptor buffer
1505 * and is split over multiple buffers.
1506 * This should never happen, as we try to allocate buffers
1507 * big enough. So simply ignore this packet.
1513 desc = ops->idx2desc(ring, *slot, &meta);
1514 /* recycle the descriptor buffer. */
1515 sync_descbuffer_for_device(ring, meta->dmaaddr,
1516 ring->rx_buffersize);
1517 *slot = next_slot(ring, *slot);
1519 tmp -= ring->rx_buffersize;
1523 b43err(ring->dev->wl, "DMA RX buffer too small "
1524 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1525 len, ring->rx_buffersize, cnt);
1529 dmaaddr = meta->dmaaddr;
1530 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1531 if (unlikely(err)) {
1532 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1533 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1537 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1538 skb_put(skb, len + ring->frameoffset);
1539 skb_pull(skb, ring->frameoffset);
1541 b43_rx(ring->dev, skb, rxhdr);
1546 void b43_dma_rx(struct b43_dmaring *ring)
1548 const struct b43_dma_ops *ops = ring->ops;
1549 int slot, current_slot;
1552 B43_WARN_ON(ring->tx);
1553 current_slot = ops->get_current_rxslot(ring);
1554 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1556 slot = ring->current_slot;
1557 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1558 dma_rx(ring, &slot);
1559 update_max_used_slots(ring, ++used_slots);
1561 ops->set_current_rxslot(ring, slot);
1562 ring->current_slot = slot;
1565 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1567 unsigned long flags;
1569 spin_lock_irqsave(&ring->lock, flags);
1570 B43_WARN_ON(!ring->tx);
1571 ring->ops->tx_suspend(ring);
1572 spin_unlock_irqrestore(&ring->lock, flags);
1575 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1577 unsigned long flags;
1579 spin_lock_irqsave(&ring->lock, flags);
1580 B43_WARN_ON(!ring->tx);
1581 ring->ops->tx_resume(ring);
1582 spin_unlock_irqrestore(&ring->lock, flags);
1585 void b43_dma_tx_suspend(struct b43_wldev *dev)
1587 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1588 b43_dma_tx_suspend_ring(dev->dma.tx_ring0);
1589 b43_dma_tx_suspend_ring(dev->dma.tx_ring1);
1590 b43_dma_tx_suspend_ring(dev->dma.tx_ring2);
1591 b43_dma_tx_suspend_ring(dev->dma.tx_ring3);
1592 b43_dma_tx_suspend_ring(dev->dma.tx_ring4);
1593 b43_dma_tx_suspend_ring(dev->dma.tx_ring5);
1596 void b43_dma_tx_resume(struct b43_wldev *dev)
1598 b43_dma_tx_resume_ring(dev->dma.tx_ring5);
1599 b43_dma_tx_resume_ring(dev->dma.tx_ring4);
1600 b43_dma_tx_resume_ring(dev->dma.tx_ring3);
1601 b43_dma_tx_resume_ring(dev->dma.tx_ring2);
1602 b43_dma_tx_resume_ring(dev->dma.tx_ring1);
1603 b43_dma_tx_resume_ring(dev->dma.tx_ring0);
1604 b43_power_saving_ctl_bits(dev, 0);