d4361dc0773e6536f4e24b19753da463a2f1f0b1
[linux-2.6.git] / drivers / net / wireless / rt2x00 / rt2x00usb.c
1 /*
2         Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3         Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4         <http://rt2x00.serialmonkey.com>
5
6         This program is free software; you can redistribute it and/or modify
7         it under the terms of the GNU General Public License as published by
8         the Free Software Foundation; either version 2 of the License, or
9         (at your option) any later version.
10
11         This program is distributed in the hope that it will be useful,
12         but WITHOUT ANY WARRANTY; without even the implied warranty of
13         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14         GNU General Public License for more details.
15
16         You should have received a copy of the GNU General Public License
17         along with this program; if not, write to the
18         Free Software Foundation, Inc.,
19         59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23         Module: rt2x00usb
24         Abstract: rt2x00 generic usb device routines.
25  */
26
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/usb.h>
31 #include <linux/bug.h>
32
33 #include "rt2x00.h"
34 #include "rt2x00usb.h"
35
36 /*
37  * Interfacing with the HW.
38  */
39 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
40                              const u8 request, const u8 requesttype,
41                              const u16 offset, const u16 value,
42                              void *buffer, const u16 buffer_length,
43                              const int timeout)
44 {
45         struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
46         int status;
47         unsigned int i;
48         unsigned int pipe =
49             (requesttype == USB_VENDOR_REQUEST_IN) ?
50             usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
51
52         if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
53                 return -ENODEV;
54
55         for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
56                 status = usb_control_msg(usb_dev, pipe, request, requesttype,
57                                          value, offset, buffer, buffer_length,
58                                          timeout);
59                 if (status >= 0)
60                         return 0;
61
62                 /*
63                  * Check for errors
64                  * -ENODEV: Device has disappeared, no point continuing.
65                  * All other errors: Try again.
66                  */
67                 else if (status == -ENODEV) {
68                         clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
69                         break;
70                 }
71         }
72
73         ERROR(rt2x00dev,
74               "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
75               request, offset, status);
76
77         return status;
78 }
79 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
80
81 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
82                                    const u8 request, const u8 requesttype,
83                                    const u16 offset, void *buffer,
84                                    const u16 buffer_length, const int timeout)
85 {
86         int status;
87
88         BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
89
90         /*
91          * Check for Cache availability.
92          */
93         if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
94                 ERROR(rt2x00dev, "CSR cache not available.\n");
95                 return -ENOMEM;
96         }
97
98         if (requesttype == USB_VENDOR_REQUEST_OUT)
99                 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
100
101         status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
102                                           offset, 0, rt2x00dev->csr.cache,
103                                           buffer_length, timeout);
104
105         if (!status && requesttype == USB_VENDOR_REQUEST_IN)
106                 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
107
108         return status;
109 }
110 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
111
112 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
113                                   const u8 request, const u8 requesttype,
114                                   const u16 offset, void *buffer,
115                                   const u16 buffer_length, const int timeout)
116 {
117         int status = 0;
118         unsigned char *tb;
119         u16 off, len, bsize;
120
121         mutex_lock(&rt2x00dev->csr_mutex);
122
123         tb  = (char *)buffer;
124         off = offset;
125         len = buffer_length;
126         while (len && !status) {
127                 bsize = min_t(u16, CSR_CACHE_SIZE, len);
128                 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
129                                                         requesttype, off, tb,
130                                                         bsize, timeout);
131
132                 tb  += bsize;
133                 len -= bsize;
134                 off += bsize;
135         }
136
137         mutex_unlock(&rt2x00dev->csr_mutex);
138
139         return status;
140 }
141 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
142
143 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
144                            const unsigned int offset,
145                            const struct rt2x00_field32 field,
146                            u32 *reg)
147 {
148         unsigned int i;
149
150         if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
151                 return -ENODEV;
152
153         for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
154                 rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
155                 if (!rt2x00_get_field32(*reg, field))
156                         return 1;
157                 udelay(REGISTER_BUSY_DELAY);
158         }
159
160         ERROR(rt2x00dev, "Indirect register access failed: "
161               "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
162         *reg = ~0;
163
164         return 0;
165 }
166 EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
167
168 /*
169  * TX data handlers.
170  */
171 static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
172 {
173         /*
174          * If the transfer to hardware succeeded, it does not mean the
175          * frame was send out correctly. It only means the frame
176          * was succesfully pushed to the hardware, we have no
177          * way to determine the transmission status right now.
178          * (Only indirectly by looking at the failed TX counters
179          * in the register).
180          */
181         if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
182                 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
183         else
184                 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
185 }
186
187 static void rt2x00usb_work_txdone(struct work_struct *work)
188 {
189         struct rt2x00_dev *rt2x00dev =
190             container_of(work, struct rt2x00_dev, txdone_work);
191         struct data_queue *queue;
192         struct queue_entry *entry;
193
194         tx_queue_for_each(rt2x00dev, queue) {
195                 while (!rt2x00queue_empty(queue)) {
196                         entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
197
198                         if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
199                                 break;
200
201                         rt2x00usb_work_txdone_entry(entry);
202                 }
203         }
204 }
205
206 static void rt2x00usb_interrupt_txdone(struct urb *urb)
207 {
208         struct queue_entry *entry = (struct queue_entry *)urb->context;
209         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
210
211         if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
212                 return;
213
214         /*
215          * Report the frame as DMA done
216          */
217         rt2x00lib_dmadone(entry);
218
219         /*
220          * Check if the frame was correctly uploaded
221          */
222         if (urb->status)
223                 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
224
225         /*
226          * Schedule the delayed work for reading the TX status
227          * from the device.
228          */
229         ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
230 }
231
232 static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
233 {
234         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
235         struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
236         struct queue_entry_priv_usb *entry_priv = entry->priv_data;
237         u32 length;
238         int status;
239
240         if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
241                 return;
242
243         /*
244          * USB devices cannot blindly pass the skb->len as the
245          * length of the data to usb_fill_bulk_urb. Pass the skb
246          * to the driver to determine what the length should be.
247          */
248         length = rt2x00dev->ops->lib->get_tx_data_len(entry);
249
250         usb_fill_bulk_urb(entry_priv->urb, usb_dev,
251                           usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
252                           entry->skb->data, length,
253                           rt2x00usb_interrupt_txdone, entry);
254
255         status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
256         if (status) {
257                 if (status == -ENODEV)
258                         clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
259                 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
260                 rt2x00lib_dmadone(entry);
261         }
262 }
263
264 void rt2x00usb_kick_queue(struct data_queue *queue)
265 {
266         switch (queue->qid) {
267         case QID_AC_BE:
268         case QID_AC_BK:
269         case QID_AC_VI:
270         case QID_AC_VO:
271                 if (!rt2x00queue_empty(queue))
272                         rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
273                                                    rt2x00usb_kick_tx_entry);
274                 break;
275         default:
276                 break;
277         }
278 }
279 EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
280
281 static void rt2x00usb_kill_entry(struct queue_entry *entry)
282 {
283         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
284         struct queue_entry_priv_usb *entry_priv = entry->priv_data;
285         struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
286
287         if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
288                 return;
289
290         usb_kill_urb(entry_priv->urb);
291
292         /*
293          * Kill guardian urb (if required by driver).
294          */
295         if ((entry->queue->qid == QID_BEACON) &&
296             (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
297                 usb_kill_urb(bcn_priv->guardian_urb);
298 }
299
300 void rt2x00usb_stop_queue(struct data_queue *queue)
301 {
302         rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
303                                    rt2x00usb_kill_entry);
304 }
305 EXPORT_SYMBOL_GPL(rt2x00usb_stop_queue);
306
307 static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
308 {
309         struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
310         unsigned short threshold = queue->threshold;
311
312         WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
313                 " invoke forced forced reset\n", queue->qid);
314
315         /*
316          * Temporarily disable the TX queue, this will force mac80211
317          * to use the other queues until this queue has been restored.
318          *
319          * Set the queue threshold to the queue limit. This prevents the
320          * queue from being enabled during the txdone handler.
321          */
322         queue->threshold = queue->limit;
323         ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
324
325         /*
326          * Kill all entries in the queue, afterwards we need to
327          * wait a bit for all URBs to be cancelled.
328          */
329         rt2x00usb_stop_queue(queue);
330
331         /*
332          * In case that a driver has overriden the txdone_work
333          * function, we invoke the TX done through there.
334          */
335         rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
336
337         /*
338          * The queue has been reset, and mac80211 is allowed to use the
339          * queue again.
340          */
341         queue->threshold = threshold;
342         ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
343 }
344
345 static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
346 {
347         WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
348                 " invoke forced tx handler\n", queue->qid);
349
350         ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
351 }
352
353 void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
354 {
355         struct data_queue *queue;
356
357         tx_queue_for_each(rt2x00dev, queue) {
358                 if (!rt2x00queue_empty(queue)) {
359                         if (rt2x00queue_dma_timeout(queue))
360                                 rt2x00usb_watchdog_tx_dma(queue);
361                         if (rt2x00queue_status_timeout(queue))
362                                 rt2x00usb_watchdog_tx_status(queue);
363                 }
364         }
365 }
366 EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
367
368 /*
369  * RX data handlers.
370  */
371 static void rt2x00usb_work_rxdone(struct work_struct *work)
372 {
373         struct rt2x00_dev *rt2x00dev =
374             container_of(work, struct rt2x00_dev, rxdone_work);
375         struct queue_entry *entry;
376         struct skb_frame_desc *skbdesc;
377         u8 rxd[32];
378
379         while (!rt2x00queue_empty(rt2x00dev->rx)) {
380                 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
381
382                 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
383                         break;
384
385                 /*
386                  * Fill in desc fields of the skb descriptor
387                  */
388                 skbdesc = get_skb_frame_desc(entry->skb);
389                 skbdesc->desc = rxd;
390                 skbdesc->desc_len = entry->queue->desc_size;
391
392                 /*
393                  * Send the frame to rt2x00lib for further processing.
394                  */
395                 rt2x00lib_rxdone(entry);
396         }
397 }
398
399 static void rt2x00usb_interrupt_rxdone(struct urb *urb)
400 {
401         struct queue_entry *entry = (struct queue_entry *)urb->context;
402         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
403
404         if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
405                 return;
406
407         /*
408          * Report the frame as DMA done
409          */
410         rt2x00lib_dmadone(entry);
411
412         /*
413          * Check if the received data is simply too small
414          * to be actually valid, or if the urb is signaling
415          * a problem.
416          */
417         if (urb->actual_length < entry->queue->desc_size || urb->status)
418                 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
419
420         /*
421          * Schedule the delayed work for reading the RX status
422          * from the device.
423          */
424         ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
425 }
426
427 /*
428  * Radio handlers
429  */
430 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
431 {
432         rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
433                                     REGISTER_TIMEOUT);
434
435         rt2x00dev->ops->lib->stop_queue(rt2x00dev->rx);
436 }
437 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
438
439 /*
440  * Device initialization handlers.
441  */
442 void rt2x00usb_clear_entry(struct queue_entry *entry)
443 {
444         struct usb_device *usb_dev =
445             to_usb_device_intf(entry->queue->rt2x00dev->dev);
446         struct queue_entry_priv_usb *entry_priv = entry->priv_data;
447         int pipe;
448         int status;
449
450         entry->flags = 0;
451
452         if (entry->queue->qid == QID_RX) {
453                 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint);
454                 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
455                                 entry->skb->data, entry->skb->len,
456                                 rt2x00usb_interrupt_rxdone, entry);
457
458                 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
459
460                 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
461                 if (status) {
462                         if (status == -ENODEV)
463                                 clear_bit(DEVICE_STATE_PRESENT,
464                                           &entry->queue->rt2x00dev->flags);
465                         set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
466                         rt2x00lib_dmadone(entry);
467                 }
468         }
469 }
470 EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
471
472 static void rt2x00usb_assign_endpoint(struct data_queue *queue,
473                                       struct usb_endpoint_descriptor *ep_desc)
474 {
475         struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
476         int pipe;
477
478         queue->usb_endpoint = usb_endpoint_num(ep_desc);
479
480         if (queue->qid == QID_RX) {
481                 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
482                 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
483         } else {
484                 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
485                 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
486         }
487
488         if (!queue->usb_maxpacket)
489                 queue->usb_maxpacket = 1;
490 }
491
492 static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
493 {
494         struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
495         struct usb_host_interface *intf_desc = intf->cur_altsetting;
496         struct usb_endpoint_descriptor *ep_desc;
497         struct data_queue *queue = rt2x00dev->tx;
498         struct usb_endpoint_descriptor *tx_ep_desc = NULL;
499         unsigned int i;
500
501         /*
502          * Walk through all available endpoints to search for "bulk in"
503          * and "bulk out" endpoints. When we find such endpoints collect
504          * the information we need from the descriptor and assign it
505          * to the queue.
506          */
507         for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
508                 ep_desc = &intf_desc->endpoint[i].desc;
509
510                 if (usb_endpoint_is_bulk_in(ep_desc)) {
511                         rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
512                 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
513                            (queue != queue_end(rt2x00dev))) {
514                         rt2x00usb_assign_endpoint(queue, ep_desc);
515                         queue = queue_next(queue);
516
517                         tx_ep_desc = ep_desc;
518                 }
519         }
520
521         /*
522          * At least 1 endpoint for RX and 1 endpoint for TX must be available.
523          */
524         if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
525                 ERROR(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
526                 return -EPIPE;
527         }
528
529         /*
530          * It might be possible not all queues have a dedicated endpoint.
531          * Loop through all TX queues and copy the endpoint information
532          * which we have gathered from already assigned endpoints.
533          */
534         txall_queue_for_each(rt2x00dev, queue) {
535                 if (!queue->usb_endpoint)
536                         rt2x00usb_assign_endpoint(queue, tx_ep_desc);
537         }
538
539         return 0;
540 }
541
542 static int rt2x00usb_alloc_entries(struct data_queue *queue)
543 {
544         struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
545         struct queue_entry_priv_usb *entry_priv;
546         struct queue_entry_priv_usb_bcn *bcn_priv;
547         unsigned int i;
548
549         for (i = 0; i < queue->limit; i++) {
550                 entry_priv = queue->entries[i].priv_data;
551                 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
552                 if (!entry_priv->urb)
553                         return -ENOMEM;
554         }
555
556         /*
557          * If this is not the beacon queue or
558          * no guardian byte was required for the beacon,
559          * then we are done.
560          */
561         if (queue->qid != QID_BEACON ||
562             !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
563                 return 0;
564
565         for (i = 0; i < queue->limit; i++) {
566                 bcn_priv = queue->entries[i].priv_data;
567                 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
568                 if (!bcn_priv->guardian_urb)
569                         return -ENOMEM;
570         }
571
572         return 0;
573 }
574
575 static void rt2x00usb_free_entries(struct data_queue *queue)
576 {
577         struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
578         struct queue_entry_priv_usb *entry_priv;
579         struct queue_entry_priv_usb_bcn *bcn_priv;
580         unsigned int i;
581
582         if (!queue->entries)
583                 return;
584
585         for (i = 0; i < queue->limit; i++) {
586                 entry_priv = queue->entries[i].priv_data;
587                 usb_kill_urb(entry_priv->urb);
588                 usb_free_urb(entry_priv->urb);
589         }
590
591         /*
592          * If this is not the beacon queue or
593          * no guardian byte was required for the beacon,
594          * then we are done.
595          */
596         if (queue->qid != QID_BEACON ||
597             !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
598                 return;
599
600         for (i = 0; i < queue->limit; i++) {
601                 bcn_priv = queue->entries[i].priv_data;
602                 usb_kill_urb(bcn_priv->guardian_urb);
603                 usb_free_urb(bcn_priv->guardian_urb);
604         }
605 }
606
607 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
608 {
609         struct data_queue *queue;
610         int status;
611
612         /*
613          * Find endpoints for each queue
614          */
615         status = rt2x00usb_find_endpoints(rt2x00dev);
616         if (status)
617                 goto exit;
618
619         /*
620          * Allocate DMA
621          */
622         queue_for_each(rt2x00dev, queue) {
623                 status = rt2x00usb_alloc_entries(queue);
624                 if (status)
625                         goto exit;
626         }
627
628         return 0;
629
630 exit:
631         rt2x00usb_uninitialize(rt2x00dev);
632
633         return status;
634 }
635 EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
636
637 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
638 {
639         struct data_queue *queue;
640
641         queue_for_each(rt2x00dev, queue)
642                 rt2x00usb_free_entries(queue);
643 }
644 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
645
646 /*
647  * USB driver handlers.
648  */
649 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
650 {
651         kfree(rt2x00dev->rf);
652         rt2x00dev->rf = NULL;
653
654         kfree(rt2x00dev->eeprom);
655         rt2x00dev->eeprom = NULL;
656
657         kfree(rt2x00dev->csr.cache);
658         rt2x00dev->csr.cache = NULL;
659 }
660
661 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
662 {
663         rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
664         if (!rt2x00dev->csr.cache)
665                 goto exit;
666
667         rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
668         if (!rt2x00dev->eeprom)
669                 goto exit;
670
671         rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
672         if (!rt2x00dev->rf)
673                 goto exit;
674
675         return 0;
676
677 exit:
678         ERROR_PROBE("Failed to allocate registers.\n");
679
680         rt2x00usb_free_reg(rt2x00dev);
681
682         return -ENOMEM;
683 }
684
685 int rt2x00usb_probe(struct usb_interface *usb_intf,
686                     const struct usb_device_id *id)
687 {
688         struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
689         struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
690         struct ieee80211_hw *hw;
691         struct rt2x00_dev *rt2x00dev;
692         int retval;
693
694         usb_dev = usb_get_dev(usb_dev);
695
696         hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
697         if (!hw) {
698                 ERROR_PROBE("Failed to allocate hardware.\n");
699                 retval = -ENOMEM;
700                 goto exit_put_device;
701         }
702
703         usb_set_intfdata(usb_intf, hw);
704
705         rt2x00dev = hw->priv;
706         rt2x00dev->dev = &usb_intf->dev;
707         rt2x00dev->ops = ops;
708         rt2x00dev->hw = hw;
709
710         rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
711
712         INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
713         INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
714
715         retval = rt2x00usb_alloc_reg(rt2x00dev);
716         if (retval)
717                 goto exit_free_device;
718
719         retval = rt2x00lib_probe_dev(rt2x00dev);
720         if (retval)
721                 goto exit_free_reg;
722
723         return 0;
724
725 exit_free_reg:
726         rt2x00usb_free_reg(rt2x00dev);
727
728 exit_free_device:
729         ieee80211_free_hw(hw);
730
731 exit_put_device:
732         usb_put_dev(usb_dev);
733
734         usb_set_intfdata(usb_intf, NULL);
735
736         return retval;
737 }
738 EXPORT_SYMBOL_GPL(rt2x00usb_probe);
739
740 void rt2x00usb_disconnect(struct usb_interface *usb_intf)
741 {
742         struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
743         struct rt2x00_dev *rt2x00dev = hw->priv;
744
745         /*
746          * Free all allocated data.
747          */
748         rt2x00lib_remove_dev(rt2x00dev);
749         rt2x00usb_free_reg(rt2x00dev);
750         ieee80211_free_hw(hw);
751
752         /*
753          * Free the USB device data.
754          */
755         usb_set_intfdata(usb_intf, NULL);
756         usb_put_dev(interface_to_usbdev(usb_intf));
757 }
758 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
759
760 #ifdef CONFIG_PM
761 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
762 {
763         struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
764         struct rt2x00_dev *rt2x00dev = hw->priv;
765         int retval;
766
767         retval = rt2x00lib_suspend(rt2x00dev, state);
768         if (retval)
769                 return retval;
770
771         /*
772          * Decrease usbdev refcount.
773          */
774         usb_put_dev(interface_to_usbdev(usb_intf));
775
776         return 0;
777 }
778 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
779
780 int rt2x00usb_resume(struct usb_interface *usb_intf)
781 {
782         struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
783         struct rt2x00_dev *rt2x00dev = hw->priv;
784
785         usb_get_dev(interface_to_usbdev(usb_intf));
786
787         return rt2x00lib_resume(rt2x00dev);
788 }
789 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
790 #endif /* CONFIG_PM */
791
792 /*
793  * rt2x00usb module information.
794  */
795 MODULE_AUTHOR(DRV_PROJECT);
796 MODULE_VERSION(DRV_VERSION);
797 MODULE_DESCRIPTION("rt2x00 usb library");
798 MODULE_LICENSE("GPL");