2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 generic usb device routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/usb.h>
29 #include <linux/bug.h>
32 #include "rt2x00usb.h"
35 * Interfacing with the HW.
37 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
38 const u8 request, const u8 requesttype,
39 const u16 offset, const u16 value,
40 void *buffer, const u16 buffer_length,
43 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
47 (requesttype == USB_VENDOR_REQUEST_IN) ?
48 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
51 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
52 status = usb_control_msg(usb_dev, pipe, request, requesttype,
53 value, offset, buffer, buffer_length,
60 * -ENODEV: Device has disappeared, no point continuing.
61 * All other errors: Try again.
63 else if (status == -ENODEV)
68 "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
69 request, offset, status);
73 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
75 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
76 const u8 request, const u8 requesttype,
77 const u16 offset, void *buffer,
78 const u16 buffer_length, const int timeout)
82 BUG_ON(!mutex_is_locked(&rt2x00dev->usb_cache_mutex));
85 * Check for Cache availability.
87 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
88 ERROR(rt2x00dev, "CSR cache not available.\n");
92 if (requesttype == USB_VENDOR_REQUEST_OUT)
93 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
95 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
96 offset, 0, rt2x00dev->csr.cache,
97 buffer_length, timeout);
99 if (!status && requesttype == USB_VENDOR_REQUEST_IN)
100 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
104 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
106 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
107 const u8 request, const u8 requesttype,
108 const u16 offset, void *buffer,
109 const u16 buffer_length, const int timeout)
113 mutex_lock(&rt2x00dev->usb_cache_mutex);
115 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
116 requesttype, offset, buffer,
117 buffer_length, timeout);
119 mutex_unlock(&rt2x00dev->usb_cache_mutex);
123 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
128 static void rt2x00usb_interrupt_txdone(struct urb *urb)
130 struct queue_entry *entry = (struct queue_entry *)urb->context;
131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
132 struct txdone_entry_desc txdesc;
133 __le32 *txd = (__le32 *)entry->skb->data;
134 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
138 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
141 rt2x00_desc_read(txd, 0, &word);
144 * Remove the descriptor data from the buffer.
146 skb_pull(entry->skb, entry->queue->desc_size);
149 * Obtain the status about this packet.
150 * Note that when the status is 0 it does not mean the
151 * frame was send out correctly. It only means the frame
152 * was succesfully pushed to the hardware, we have no
153 * way to determine the transmission status right now.
154 * (Only indirectly by looking at the failed TX counters
158 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
160 __set_bit(TXDONE_FAILURE, &txdesc.flags);
163 rt2x00lib_txdone(entry, &txdesc);
166 * Make this entry available for reuse.
169 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
172 * If the data queue was full before the txdone handler
173 * we must make sure the packet queue in the mac80211 stack
174 * is reenabled when the txdone handler has finished.
176 if (!rt2x00queue_full(entry->queue))
177 ieee80211_wake_queue(rt2x00dev->hw, qid);
180 int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
181 struct data_queue *queue, struct sk_buff *skb)
183 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
184 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
185 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
186 struct skb_frame_desc *skbdesc;
187 struct txentry_desc txdesc;
190 if (rt2x00queue_full(queue))
193 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
195 "Arrived at non-free entry in the non-full queue %d.\n"
196 "Please file bug report to %s.\n",
197 entry->queue->qid, DRV_PROJECT);
202 * Copy all TX descriptor information into txdesc,
203 * after that we are free to use the skb->cb array
204 * for our information.
207 rt2x00queue_create_tx_descriptor(entry, &txdesc);
210 * Add the descriptor in front of the skb.
212 skb_push(skb, queue->desc_size);
213 memset(skb->data, 0, queue->desc_size);
216 * Fill in skb descriptor
218 skbdesc = get_skb_frame_desc(skb);
219 memset(skbdesc, 0, sizeof(*skbdesc));
220 skbdesc->data = skb->data + queue->desc_size;
221 skbdesc->data_len = skb->len - queue->desc_size;
222 skbdesc->desc = skb->data;
223 skbdesc->desc_len = queue->desc_size;
224 skbdesc->entry = entry;
226 rt2x00queue_write_tx_descriptor(entry, &txdesc);
229 * USB devices cannot blindly pass the skb->len as the
230 * length of the data to usb_fill_bulk_urb. Pass the skb
231 * to the driver to determine what the length should be.
233 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, skb);
236 * Initialize URB and send the frame to the device.
238 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
239 usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
240 skb->data, length, rt2x00usb_interrupt_txdone, entry);
241 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
243 rt2x00queue_index_inc(queue, Q_INDEX);
247 EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
252 static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
255 unsigned int frame_size;
256 unsigned int reserved_size;
259 * The frame size includes descriptor size, because the
260 * hardware directly receive the frame into the skbuffer.
262 frame_size = queue->data_size + queue->desc_size;
265 * For the allocation we should keep a few things in mind:
266 * 1) 4byte alignment of 802.11 payload
268 * For (1) we need at most 4 bytes to guarentee the correct
269 * alignment. We are going to optimize the fact that the chance
270 * that the 802.11 header_size % 4 == 2 is much bigger then
271 * anything else. However since we need to move the frame up
272 * to 3 bytes to the front, which means we need to preallocate
280 skb = dev_alloc_skb(frame_size + reserved_size);
284 skb_reserve(skb, reserved_size);
285 skb_put(skb, frame_size);
290 static void rt2x00usb_interrupt_rxdone(struct urb *urb)
292 struct queue_entry *entry = (struct queue_entry *)urb->context;
293 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
295 struct skb_frame_desc *skbdesc;
296 struct rxdone_entry_desc rxdesc;
297 unsigned int header_size;
300 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
301 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
305 * Check if the received data is simply too small
306 * to be actually valid, or if the urb is signaling
309 if (urb->actual_length < entry->queue->desc_size || urb->status)
313 * Fill in skb descriptor
315 skbdesc = get_skb_frame_desc(entry->skb);
316 memset(skbdesc, 0, sizeof(*skbdesc));
317 skbdesc->entry = entry;
319 memset(&rxdesc, 0, sizeof(rxdesc));
320 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
322 header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
325 * The data behind the ieee80211 header must be
326 * aligned on a 4 byte boundary. We already reserved
327 * 2 bytes for header_size % 4 == 2 optimization.
328 * To determine the number of bytes which the data
329 * should be moved to the left, we must add these
330 * 2 bytes to the header_size.
332 align = (header_size + 2) % 4;
335 skb_push(entry->skb, align);
336 /* Move entire frame in 1 command */
337 memmove(entry->skb->data, entry->skb->data + align,
341 /* Update data pointers, trim buffer to correct size */
342 skbdesc->data = entry->skb->data;
343 skb_trim(entry->skb, rxdesc.size);
346 * Allocate a new sk buffer to replace the current one.
347 * If allocation fails, we should drop the current frame
348 * so we can recycle the existing sk buffer for the new frame.
350 skb = rt2x00usb_alloc_rxskb(entry->queue);
355 * Send the frame to rt2x00lib for further processing.
357 rt2x00lib_rxdone(entry, &rxdesc);
360 * Replace current entry's skb with the newly allocated one,
361 * and reinitialize the urb.
364 urb->transfer_buffer = entry->skb->data;
365 urb->transfer_buffer_length = entry->skb->len;
368 if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
369 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
370 usb_submit_urb(urb, GFP_ATOMIC);
373 rt2x00queue_index_inc(entry->queue, Q_INDEX);
379 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
381 struct queue_entry_priv_usb *entry_priv;
382 struct queue_entry_priv_usb_bcn *bcn_priv;
385 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
391 for (i = 0; i < rt2x00dev->rx->limit; i++) {
392 entry_priv = rt2x00dev->rx->entries[i].priv_data;
393 usb_kill_urb(entry_priv->urb);
399 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
400 bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
401 if (bcn_priv->guardian_urb)
402 usb_kill_urb(bcn_priv->guardian_urb);
405 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
408 * Device initialization handlers.
410 void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
411 struct queue_entry *entry)
413 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
414 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
416 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
417 usb_rcvbulkpipe(usb_dev, 1),
418 entry->skb->data, entry->skb->len,
419 rt2x00usb_interrupt_rxdone, entry);
421 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
422 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
424 EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
426 void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
427 struct queue_entry *entry)
431 EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
433 static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
434 struct data_queue *queue)
436 struct queue_entry_priv_usb *entry_priv;
437 struct queue_entry_priv_usb_bcn *bcn_priv;
440 for (i = 0; i < queue->limit; i++) {
441 entry_priv = queue->entries[i].priv_data;
442 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
443 if (!entry_priv->urb)
448 * If this is not the beacon queue or
449 * no guardian byte was required for the beacon,
452 if (rt2x00dev->bcn != queue ||
453 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
456 for (i = 0; i < queue->limit; i++) {
457 bcn_priv = queue->entries[i].priv_data;
458 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
459 if (!bcn_priv->guardian_urb)
466 static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
467 struct data_queue *queue)
469 struct queue_entry_priv_usb *entry_priv;
470 struct queue_entry_priv_usb_bcn *bcn_priv;
476 for (i = 0; i < queue->limit; i++) {
477 entry_priv = queue->entries[i].priv_data;
478 usb_kill_urb(entry_priv->urb);
479 usb_free_urb(entry_priv->urb);
480 if (queue->entries[i].skb)
481 kfree_skb(queue->entries[i].skb);
485 * If this is not the beacon queue or
486 * no guardian byte was required for the beacon,
489 if (rt2x00dev->bcn != queue ||
490 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
493 for (i = 0; i < queue->limit; i++) {
494 bcn_priv = queue->entries[i].priv_data;
495 usb_kill_urb(bcn_priv->guardian_urb);
496 usb_free_urb(bcn_priv->guardian_urb);
500 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
502 struct data_queue *queue;
504 unsigned int entry_size;
506 int uninitialized_var(status);
511 queue_for_each(rt2x00dev, queue) {
512 status = rt2x00usb_alloc_urb(rt2x00dev, queue);
518 * For the RX queue, skb's should be allocated.
520 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
521 for (i = 0; i < rt2x00dev->rx->limit; i++) {
522 skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
526 rt2x00dev->rx->entries[i].skb = skb;
532 rt2x00usb_uninitialize(rt2x00dev);
536 EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
538 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
540 struct data_queue *queue;
542 queue_for_each(rt2x00dev, queue)
543 rt2x00usb_free_urb(rt2x00dev, queue);
545 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
548 * USB driver handlers.
550 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
552 kfree(rt2x00dev->rf);
553 rt2x00dev->rf = NULL;
555 kfree(rt2x00dev->eeprom);
556 rt2x00dev->eeprom = NULL;
558 kfree(rt2x00dev->csr.cache);
559 rt2x00dev->csr.cache = NULL;
562 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
564 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
565 if (!rt2x00dev->csr.cache)
568 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
569 if (!rt2x00dev->eeprom)
572 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
579 ERROR_PROBE("Failed to allocate registers.\n");
581 rt2x00usb_free_reg(rt2x00dev);
586 int rt2x00usb_probe(struct usb_interface *usb_intf,
587 const struct usb_device_id *id)
589 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
590 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
591 struct ieee80211_hw *hw;
592 struct rt2x00_dev *rt2x00dev;
595 usb_dev = usb_get_dev(usb_dev);
597 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
599 ERROR_PROBE("Failed to allocate hardware.\n");
601 goto exit_put_device;
604 usb_set_intfdata(usb_intf, hw);
606 rt2x00dev = hw->priv;
607 rt2x00dev->dev = usb_intf;
608 rt2x00dev->ops = ops;
610 mutex_init(&rt2x00dev->usb_cache_mutex);
612 rt2x00dev->usb_maxpacket =
613 usb_maxpacket(usb_dev, usb_sndbulkpipe(usb_dev, 1), 1);
614 if (!rt2x00dev->usb_maxpacket)
615 rt2x00dev->usb_maxpacket = 1;
617 retval = rt2x00usb_alloc_reg(rt2x00dev);
619 goto exit_free_device;
621 retval = rt2x00lib_probe_dev(rt2x00dev);
628 rt2x00usb_free_reg(rt2x00dev);
631 ieee80211_free_hw(hw);
634 usb_put_dev(usb_dev);
636 usb_set_intfdata(usb_intf, NULL);
640 EXPORT_SYMBOL_GPL(rt2x00usb_probe);
642 void rt2x00usb_disconnect(struct usb_interface *usb_intf)
644 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
645 struct rt2x00_dev *rt2x00dev = hw->priv;
648 * Free all allocated data.
650 rt2x00lib_remove_dev(rt2x00dev);
651 rt2x00usb_free_reg(rt2x00dev);
652 ieee80211_free_hw(hw);
655 * Free the USB device data.
657 usb_set_intfdata(usb_intf, NULL);
658 usb_put_dev(interface_to_usbdev(usb_intf));
660 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
663 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
665 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
666 struct rt2x00_dev *rt2x00dev = hw->priv;
669 retval = rt2x00lib_suspend(rt2x00dev, state);
673 rt2x00usb_free_reg(rt2x00dev);
676 * Decrease usbdev refcount.
678 usb_put_dev(interface_to_usbdev(usb_intf));
682 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
684 int rt2x00usb_resume(struct usb_interface *usb_intf)
686 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
687 struct rt2x00_dev *rt2x00dev = hw->priv;
690 usb_get_dev(interface_to_usbdev(usb_intf));
692 retval = rt2x00usb_alloc_reg(rt2x00dev);
696 retval = rt2x00lib_resume(rt2x00dev);
703 rt2x00usb_free_reg(rt2x00dev);
707 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
708 #endif /* CONFIG_PM */
711 * rt2x00usb module information.
713 MODULE_AUTHOR(DRV_PROJECT);
714 MODULE_VERSION(DRV_VERSION);
715 MODULE_DESCRIPTION("rt2x00 usb library");
716 MODULE_LICENSE("GPL");