2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 generic usb device routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/usb.h>
29 #include <linux/bug.h>
32 #include "rt2x00usb.h"
35 * Interfacing with the HW.
37 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
38 const u8 request, const u8 requesttype,
39 const u16 offset, const u16 value,
40 void *buffer, const u16 buffer_length,
43 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
47 (requesttype == USB_VENDOR_REQUEST_IN) ?
48 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
51 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
52 status = usb_control_msg(usb_dev, pipe, request, requesttype,
53 value, offset, buffer, buffer_length,
60 * -ENODEV: Device has disappeared, no point continuing.
61 * All other errors: Try again.
63 else if (status == -ENODEV)
68 "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
69 request, offset, status);
73 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
75 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
76 const u8 request, const u8 requesttype,
77 const u16 offset, void *buffer,
78 const u16 buffer_length, const int timeout)
82 BUG_ON(!mutex_is_locked(&rt2x00dev->usb_cache_mutex));
85 * Check for Cache availability.
87 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
88 ERROR(rt2x00dev, "CSR cache not available.\n");
92 if (requesttype == USB_VENDOR_REQUEST_OUT)
93 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
95 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
96 offset, 0, rt2x00dev->csr.cache,
97 buffer_length, timeout);
99 if (!status && requesttype == USB_VENDOR_REQUEST_IN)
100 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
104 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
106 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
107 const u8 request, const u8 requesttype,
108 const u16 offset, void *buffer,
109 const u16 buffer_length, const int timeout)
113 mutex_lock(&rt2x00dev->usb_cache_mutex);
115 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
116 requesttype, offset, buffer,
117 buffer_length, timeout);
119 mutex_unlock(&rt2x00dev->usb_cache_mutex);
123 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
125 static void rt2x00usb_vendor_request_async_complete(struct urb *urb)
128 * We're done with it, descrease usage count and let the
129 * usb layer delete it as soon as it is done with it.
134 int rt2x00usb_vendor_request_async(struct rt2x00_dev *rt2x00dev,
135 const u8 request, const u16 offset,
138 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
139 struct usb_ctrlrequest *ctrl;
143 urb = usb_alloc_urb(0, GFP_NOIO);
147 ctrl = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
153 ctrl->bRequestType= USB_VENDOR_REQUEST_OUT;
154 ctrl->bRequest = request;
155 ctrl->wValue = cpu_to_le16p(&value);
156 ctrl->wIndex = cpu_to_le16p(&offset);
159 usb_fill_control_urb(urb, usb_dev, usb_sndctrlpipe(usb_dev, 0),
160 (unsigned char *)ctrl, NULL, 0,
161 rt2x00usb_vendor_request_async_complete, NULL);
163 status = usb_submit_urb(urb, GFP_ATOMIC);
175 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_async);
180 static void rt2x00usb_interrupt_txdone(struct urb *urb)
182 struct queue_entry *entry = (struct queue_entry *)urb->context;
183 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
184 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
185 struct txdone_entry_desc txdesc;
186 __le32 *txd = (__le32 *)entry->skb->data;
189 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
190 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
193 rt2x00_desc_read(txd, 0, &word);
196 * Remove the descriptor data from the buffer.
198 skb_pull(entry->skb, entry->queue->desc_size);
201 * Obtain the status about this packet.
203 txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
205 txdesc.control = &priv_tx->control;
207 rt2x00lib_txdone(entry, &txdesc);
210 * Make this entry available for reuse.
213 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
216 * If the data queue was full before the txdone handler
217 * we must make sure the packet queue in the mac80211 stack
218 * is reenabled when the txdone handler has finished.
220 if (!rt2x00queue_full(entry->queue))
221 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
224 int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
225 struct data_queue *queue, struct sk_buff *skb,
226 struct ieee80211_tx_control *control)
228 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
229 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
230 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
231 struct skb_frame_desc *skbdesc;
234 if (rt2x00queue_full(queue))
237 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
239 "Arrived at non-free entry in the non-full queue %d.\n"
240 "Please file bug report to %s.\n",
241 control->queue, DRV_PROJECT);
246 * Add the descriptor in front of the skb.
248 skb_push(skb, queue->desc_size);
249 memset(skb->data, 0, queue->desc_size);
252 * Fill in skb descriptor
254 skbdesc = get_skb_frame_desc(skb);
255 memset(skbdesc, 0, sizeof(*skbdesc));
256 skbdesc->data = skb->data + queue->desc_size;
257 skbdesc->data_len = skb->len - queue->desc_size;
258 skbdesc->desc = skb->data;
259 skbdesc->desc_len = queue->desc_size;
260 skbdesc->entry = entry;
262 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
265 * USB devices cannot blindly pass the skb->len as the
266 * length of the data to usb_fill_bulk_urb. Pass the skb
267 * to the driver to determine what the length should be.
269 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, skb);
272 * Initialize URB and send the frame to the device.
274 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
275 usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
276 skb->data, length, rt2x00usb_interrupt_txdone, entry);
277 usb_submit_urb(priv_tx->urb, GFP_ATOMIC);
279 rt2x00queue_index_inc(queue, Q_INDEX);
283 EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
288 static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
291 unsigned int frame_size;
294 * As alignment we use 2 and not NET_IP_ALIGN because we need
295 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
296 * can be 0 on some hardware). We use these 2 bytes for frame
297 * alignment later, we assume that the chance that
298 * header_size % 4 == 2 is bigger then header_size % 2 == 0
299 * and thus optimize alignment by reserving the 2 bytes in
302 frame_size = queue->data_size + queue->desc_size;
303 skb = dev_alloc_skb(frame_size + 2);
308 skb_put(skb, frame_size);
313 static void rt2x00usb_interrupt_rxdone(struct urb *urb)
315 struct queue_entry *entry = (struct queue_entry *)urb->context;
316 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
318 struct skb_frame_desc *skbdesc;
319 struct rxdone_entry_desc rxdesc;
321 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
322 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
326 * Check if the received data is simply too small
327 * to be actually valid, or if the urb is signaling
330 if (urb->actual_length < entry->queue->desc_size || urb->status)
334 * Fill in skb descriptor
336 skbdesc = get_skb_frame_desc(entry->skb);
337 memset(skbdesc, 0, sizeof(*skbdesc));
338 skbdesc->entry = entry;
340 memset(&rxdesc, 0, sizeof(rxdesc));
341 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
344 * Allocate a new sk buffer to replace the current one.
345 * If allocation fails, we should drop the current frame
346 * so we can recycle the existing sk buffer for the new frame.
348 skb = rt2x00usb_alloc_rxskb(entry->queue);
353 * Send the frame to rt2x00lib for further processing.
355 rt2x00lib_rxdone(entry, &rxdesc);
358 * Replace current entry's skb with the newly allocated one,
359 * and reinitialize the urb.
362 urb->transfer_buffer = entry->skb->data;
363 urb->transfer_buffer_length = entry->skb->len;
366 if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
367 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
368 usb_submit_urb(urb, GFP_ATOMIC);
371 rt2x00queue_index_inc(entry->queue, Q_INDEX);
377 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
379 struct queue_entry_priv_usb_rx *priv_rx;
380 struct queue_entry_priv_usb_tx *priv_tx;
381 struct queue_entry_priv_usb_bcn *priv_bcn;
382 struct data_queue *queue;
385 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000,
391 for (i = 0; i < rt2x00dev->rx->limit; i++) {
392 priv_rx = rt2x00dev->rx->entries[i].priv_data;
393 usb_kill_urb(priv_rx->urb);
396 tx_queue_for_each(rt2x00dev, queue) {
397 for (i = 0; i < queue->limit; i++) {
398 priv_tx = queue->entries[i].priv_data;
399 usb_kill_urb(priv_tx->urb);
403 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
404 priv_bcn = rt2x00dev->bcn->entries[i].priv_data;
405 usb_kill_urb(priv_bcn->urb);
407 if (priv_bcn->guardian_urb)
408 usb_kill_urb(priv_bcn->guardian_urb);
411 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
414 for (i = 0; i < rt2x00dev->bcn[1].limit; i++) {
415 priv_tx = rt2x00dev->bcn[1].entries[i].priv_data;
416 usb_kill_urb(priv_tx->urb);
419 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
422 * Device initialization handlers.
424 void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
425 struct queue_entry *entry)
427 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
428 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
430 usb_fill_bulk_urb(priv_rx->urb, usb_dev,
431 usb_rcvbulkpipe(usb_dev, 1),
432 entry->skb->data, entry->skb->len,
433 rt2x00usb_interrupt_rxdone, entry);
435 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
436 usb_submit_urb(priv_rx->urb, GFP_ATOMIC);
438 EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
440 void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
441 struct queue_entry *entry)
445 EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
447 static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
448 struct data_queue *queue)
450 struct queue_entry_priv_usb_rx *priv_rx;
451 struct queue_entry_priv_usb_tx *priv_tx;
452 struct queue_entry_priv_usb_bcn *priv_bcn;
454 unsigned int guardian =
455 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
461 for (i = 0; i < queue->limit; i++) {
462 urb = usb_alloc_urb(0, GFP_KERNEL);
466 if (queue->qid == QID_RX) {
467 priv_rx = queue->entries[i].priv_data;
469 } else if (queue->qid == QID_MGMT && guardian) {
470 priv_bcn = queue->entries[i].priv_data;
473 urb = usb_alloc_urb(0, GFP_KERNEL);
477 priv_bcn->guardian_urb = urb;
479 priv_tx = queue->entries[i].priv_data;
487 static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
488 struct data_queue *queue)
490 struct queue_entry_priv_usb_rx *priv_rx;
491 struct queue_entry_priv_usb_tx *priv_tx;
492 struct queue_entry_priv_usb_bcn *priv_bcn;
494 unsigned int guardian =
495 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
501 for (i = 0; i < queue->limit; i++) {
502 if (queue->qid == QID_RX) {
503 priv_rx = queue->entries[i].priv_data;
505 } else if (queue->qid == QID_MGMT && guardian) {
506 priv_bcn = queue->entries[i].priv_data;
508 usb_kill_urb(priv_bcn->guardian_urb);
509 usb_free_urb(priv_bcn->guardian_urb);
513 priv_tx = queue->entries[i].priv_data;
519 if (queue->entries[i].skb)
520 kfree_skb(queue->entries[i].skb);
524 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
526 struct data_queue *queue;
528 unsigned int entry_size;
530 int uninitialized_var(status);
535 queue_for_each(rt2x00dev, queue) {
536 status = rt2x00usb_alloc_urb(rt2x00dev, queue);
542 * For the RX queue, skb's should be allocated.
544 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
545 for (i = 0; i < rt2x00dev->rx->limit; i++) {
546 skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
550 rt2x00dev->rx->entries[i].skb = skb;
556 rt2x00usb_uninitialize(rt2x00dev);
560 EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
562 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
564 struct data_queue *queue;
566 queue_for_each(rt2x00dev, queue)
567 rt2x00usb_free_urb(rt2x00dev, queue);
569 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
572 * USB driver handlers.
574 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
576 kfree(rt2x00dev->rf);
577 rt2x00dev->rf = NULL;
579 kfree(rt2x00dev->eeprom);
580 rt2x00dev->eeprom = NULL;
582 kfree(rt2x00dev->csr.cache);
583 rt2x00dev->csr.cache = NULL;
586 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
588 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
589 if (!rt2x00dev->csr.cache)
592 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
593 if (!rt2x00dev->eeprom)
596 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
603 ERROR_PROBE("Failed to allocate registers.\n");
605 rt2x00usb_free_reg(rt2x00dev);
610 int rt2x00usb_probe(struct usb_interface *usb_intf,
611 const struct usb_device_id *id)
613 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
614 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
615 struct ieee80211_hw *hw;
616 struct rt2x00_dev *rt2x00dev;
619 usb_dev = usb_get_dev(usb_dev);
621 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
623 ERROR_PROBE("Failed to allocate hardware.\n");
625 goto exit_put_device;
628 usb_set_intfdata(usb_intf, hw);
630 rt2x00dev = hw->priv;
631 rt2x00dev->dev = usb_intf;
632 rt2x00dev->ops = ops;
634 mutex_init(&rt2x00dev->usb_cache_mutex);
636 rt2x00dev->usb_maxpacket =
637 usb_maxpacket(usb_dev, usb_sndbulkpipe(usb_dev, 1), 1);
638 if (!rt2x00dev->usb_maxpacket)
639 rt2x00dev->usb_maxpacket = 1;
641 retval = rt2x00usb_alloc_reg(rt2x00dev);
643 goto exit_free_device;
645 retval = rt2x00lib_probe_dev(rt2x00dev);
652 rt2x00usb_free_reg(rt2x00dev);
655 ieee80211_free_hw(hw);
658 usb_put_dev(usb_dev);
660 usb_set_intfdata(usb_intf, NULL);
664 EXPORT_SYMBOL_GPL(rt2x00usb_probe);
666 void rt2x00usb_disconnect(struct usb_interface *usb_intf)
668 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
669 struct rt2x00_dev *rt2x00dev = hw->priv;
672 * Free all allocated data.
674 rt2x00lib_remove_dev(rt2x00dev);
675 rt2x00usb_free_reg(rt2x00dev);
676 ieee80211_free_hw(hw);
679 * Free the USB device data.
681 usb_set_intfdata(usb_intf, NULL);
682 usb_put_dev(interface_to_usbdev(usb_intf));
684 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
687 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
689 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
690 struct rt2x00_dev *rt2x00dev = hw->priv;
693 retval = rt2x00lib_suspend(rt2x00dev, state);
697 rt2x00usb_free_reg(rt2x00dev);
700 * Decrease usbdev refcount.
702 usb_put_dev(interface_to_usbdev(usb_intf));
706 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
708 int rt2x00usb_resume(struct usb_interface *usb_intf)
710 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
711 struct rt2x00_dev *rt2x00dev = hw->priv;
714 usb_get_dev(interface_to_usbdev(usb_intf));
716 retval = rt2x00usb_alloc_reg(rt2x00dev);
720 retval = rt2x00lib_resume(rt2x00dev);
727 rt2x00usb_free_reg(rt2x00dev);
731 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
732 #endif /* CONFIG_PM */
735 * rt2x00usb module information.
737 MODULE_AUTHOR(DRV_PROJECT);
738 MODULE_VERSION(DRV_VERSION);
739 MODULE_DESCRIPTION("rt2x00 usb library");
740 MODULE_LICENSE("GPL");