rt2x00usb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. /*
  2. Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
  3. <http://rt2x00.serialmonkey.com>
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the
  14. Free Software Foundation, Inc.,
  15. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  16. */
  17. /*
  18. Module: rt2x00usb
  19. Abstract: rt2x00 generic usb device routines.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/usb.h>
  24. #include <linux/bug.h>
  25. #include "rt2x00.h"
  26. #include "rt2x00usb.h"
  27. /*
  28. * Interfacing with the HW.
  29. */
  30. int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
  31. const u8 request, const u8 requesttype,
  32. const u16 offset, const u16 value,
  33. void *buffer, const u16 buffer_length,
  34. const int timeout)
  35. {
  36. struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
  37. int status;
  38. unsigned int i;
  39. unsigned int pipe =
  40. (requesttype == USB_VENDOR_REQUEST_IN) ?
  41. usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
  42. for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
  43. status = usb_control_msg(usb_dev, pipe, request, requesttype,
  44. value, offset, buffer, buffer_length,
  45. timeout);
  46. if (status >= 0)
  47. return 0;
  48. /*
  49. * Check for errors
  50. * -ENODEV: Device has disappeared, no point continuing.
  51. * All other errors: Try again.
  52. */
  53. else if (status == -ENODEV)
  54. break;
  55. }
  56. ERROR(rt2x00dev,
  57. "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
  58. request, offset, status);
  59. return status;
  60. }
  61. EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
  62. int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
  63. const u8 request, const u8 requesttype,
  64. const u16 offset, void *buffer,
  65. const u16 buffer_length, const int timeout)
  66. {
  67. int status;
  68. BUG_ON(!mutex_is_locked(&rt2x00dev->usb_cache_mutex));
  69. /*
  70. * Check for Cache availability.
  71. */
  72. if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
  73. ERROR(rt2x00dev, "CSR cache not available.\n");
  74. return -ENOMEM;
  75. }
  76. if (requesttype == USB_VENDOR_REQUEST_OUT)
  77. memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
  78. status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
  79. offset, 0, rt2x00dev->csr.cache,
  80. buffer_length, timeout);
  81. if (!status && requesttype == USB_VENDOR_REQUEST_IN)
  82. memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
  83. return status;
  84. }
  85. EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
  86. int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
  87. const u8 request, const u8 requesttype,
  88. const u16 offset, void *buffer,
  89. const u16 buffer_length, const int timeout)
  90. {
  91. int status;
  92. mutex_lock(&rt2x00dev->usb_cache_mutex);
  93. status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
  94. requesttype, offset, buffer,
  95. buffer_length, timeout);
  96. mutex_unlock(&rt2x00dev->usb_cache_mutex);
  97. return status;
  98. }
  99. EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
  100. /*
  101. * TX data handlers.
  102. */
  103. static void rt2x00usb_interrupt_txdone(struct urb *urb)
  104. {
  105. struct queue_entry *entry = (struct queue_entry *)urb->context;
  106. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  107. struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
  108. struct txdone_entry_desc txdesc;
  109. __le32 *txd = (__le32 *)entry->skb->data;
  110. u32 word;
  111. if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
  112. !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
  113. return;
  114. rt2x00_desc_read(txd, 0, &word);
  115. /*
  116. * Remove the descriptor data from the buffer.
  117. */
  118. skb_pull(entry->skb, entry->queue->desc_size);
  119. /*
  120. * Obtain the status about this packet.
  121. */
  122. txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
  123. txdesc.retry = 0;
  124. txdesc.control = &priv_tx->control;
  125. rt2x00lib_txdone(entry, &txdesc);
  126. /*
  127. * Make this entry available for reuse.
  128. */
  129. entry->flags = 0;
  130. rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
  131. /*
  132. * If the data queue was full before the txdone handler
  133. * we must make sure the packet queue in the mac80211 stack
  134. * is reenabled when the txdone handler has finished.
  135. */
  136. if (!rt2x00queue_full(entry->queue))
  137. ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
  138. }
  139. int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
  140. struct data_queue *queue, struct sk_buff *skb,
  141. struct ieee80211_tx_control *control)
  142. {
  143. struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
  144. struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
  145. struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
  146. struct skb_frame_desc *skbdesc;
  147. u32 length;
  148. if (rt2x00queue_full(queue))
  149. return -EINVAL;
  150. if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
  151. ERROR(rt2x00dev,
  152. "Arrived at non-free entry in the non-full queue %d.\n"
  153. "Please file bug report to %s.\n",
  154. control->queue, DRV_PROJECT);
  155. return -EINVAL;
  156. }
  157. /*
  158. * Add the descriptor in front of the skb.
  159. */
  160. skb_push(skb, queue->desc_size);
  161. memset(skb->data, 0, queue->desc_size);
  162. /*
  163. * Fill in skb descriptor
  164. */
  165. skbdesc = get_skb_frame_desc(skb);
  166. skbdesc->data = skb->data + queue->desc_size;
  167. skbdesc->data_len = skb->len - queue->desc_size;
  168. skbdesc->desc = skb->data;
  169. skbdesc->desc_len = queue->desc_size;
  170. skbdesc->entry = entry;
  171. memcpy(&priv_tx->control, control, sizeof(priv_tx->control));
  172. rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
  173. /*
  174. * USB devices cannot blindly pass the skb->len as the
  175. * length of the data to usb_fill_bulk_urb. Pass the skb
  176. * to the driver to determine what the length should be.
  177. */
  178. length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, skb);
  179. /*
  180. * Initialize URB and send the frame to the device.
  181. */
  182. __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  183. usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
  184. skb->data, length, rt2x00usb_interrupt_txdone, entry);
  185. usb_submit_urb(priv_tx->urb, GFP_ATOMIC);
  186. rt2x00queue_index_inc(queue, Q_INDEX);
  187. return 0;
  188. }
  189. EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
  190. /*
  191. * RX data handlers.
  192. */
  193. static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
  194. {
  195. struct sk_buff *skb;
  196. unsigned int frame_size;
  197. /*
  198. * As alignment we use 2 and not NET_IP_ALIGN because we need
  199. * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
  200. * can be 0 on some hardware). We use these 2 bytes for frame
  201. * alignment later, we assume that the chance that
  202. * header_size % 4 == 2 is bigger then header_size % 2 == 0
  203. * and thus optimize alignment by reserving the 2 bytes in
  204. * advance.
  205. */
  206. frame_size = queue->data_size + queue->desc_size;
  207. skb = dev_alloc_skb(queue->desc_size + frame_size + 2);
  208. if (!skb)
  209. return NULL;
  210. skb_reserve(skb, queue->desc_size + 2);
  211. skb_put(skb, frame_size);
  212. return skb;
  213. }
  214. static void rt2x00usb_interrupt_rxdone(struct urb *urb)
  215. {
  216. struct queue_entry *entry = (struct queue_entry *)urb->context;
  217. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  218. struct sk_buff *skb;
  219. struct skb_frame_desc *skbdesc;
  220. struct rxdone_entry_desc rxdesc;
  221. int header_size;
  222. if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
  223. !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
  224. return;
  225. /*
  226. * Check if the received data is simply too small
  227. * to be actually valid, or if the urb is signaling
  228. * a problem.
  229. */
  230. if (urb->actual_length < entry->queue->desc_size || urb->status)
  231. goto skip_entry;
  232. /*
  233. * Fill in skb descriptor
  234. */
  235. skbdesc = get_skb_frame_desc(entry->skb);
  236. memset(skbdesc, 0, sizeof(*skbdesc));
  237. skbdesc->entry = entry;
  238. memset(&rxdesc, 0, sizeof(rxdesc));
  239. rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
  240. /*
  241. * The data behind the ieee80211 header must be
  242. * aligned on a 4 byte boundary.
  243. */
  244. header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
  245. if (header_size % 4 == 0) {
  246. skb_push(entry->skb, 2);
  247. memmove(entry->skb->data, entry->skb->data + 2,
  248. entry->skb->len - 2);
  249. skbdesc->data = entry->skb->data;
  250. skb_trim(entry->skb,entry->skb->len - 2);
  251. }
  252. /*
  253. * Allocate a new sk buffer to replace the current one.
  254. * If allocation fails, we should drop the current frame
  255. * so we can recycle the existing sk buffer for the new frame.
  256. */
  257. skb = rt2x00usb_alloc_rxskb(entry->queue);
  258. if (!skb)
  259. goto skip_entry;
  260. /*
  261. * Send the frame to rt2x00lib for further processing.
  262. */
  263. rt2x00lib_rxdone(entry, &rxdesc);
  264. /*
  265. * Replace current entry's skb with the newly allocated one,
  266. * and reinitialize the urb.
  267. */
  268. entry->skb = skb;
  269. urb->transfer_buffer = entry->skb->data;
  270. urb->transfer_buffer_length = entry->skb->len;
  271. skip_entry:
  272. if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
  273. __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  274. usb_submit_urb(urb, GFP_ATOMIC);
  275. }
  276. rt2x00queue_index_inc(entry->queue, Q_INDEX);
  277. }
  278. /*
  279. * Radio handlers
  280. */
  281. void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
  282. {
  283. struct queue_entry_priv_usb_rx *priv_rx;
  284. struct queue_entry_priv_usb_tx *priv_tx;
  285. struct queue_entry_priv_usb_bcn *priv_bcn;
  286. struct data_queue *queue;
  287. unsigned int i;
  288. rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000,
  289. REGISTER_TIMEOUT);
  290. /*
  291. * Cancel all queues.
  292. */
  293. for (i = 0; i < rt2x00dev->rx->limit; i++) {
  294. priv_rx = rt2x00dev->rx->entries[i].priv_data;
  295. usb_kill_urb(priv_rx->urb);
  296. }
  297. tx_queue_for_each(rt2x00dev, queue) {
  298. for (i = 0; i < queue->limit; i++) {
  299. priv_tx = queue->entries[i].priv_data;
  300. usb_kill_urb(priv_tx->urb);
  301. }
  302. }
  303. /*
  304. * Kill guardian urb (if required by driver).
  305. */
  306. if (!test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
  307. return;
  308. for (i = 0; i < rt2x00dev->bcn->limit; i++) {
  309. priv_bcn = rt2x00dev->bcn->entries[i].priv_data;
  310. usb_kill_urb(priv_bcn->urb);
  311. if (priv_bcn->guardian_urb)
  312. usb_kill_urb(priv_bcn->guardian_urb);
  313. }
  314. if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
  315. return;
  316. for (i = 0; i < rt2x00dev->bcn[1].limit; i++) {
  317. priv_tx = rt2x00dev->bcn[1].entries[i].priv_data;
  318. usb_kill_urb(priv_tx->urb);
  319. }
  320. }
  321. EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
  322. /*
  323. * Device initialization handlers.
  324. */
  325. void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
  326. struct queue_entry *entry)
  327. {
  328. struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
  329. struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
  330. usb_fill_bulk_urb(priv_rx->urb, usb_dev,
  331. usb_rcvbulkpipe(usb_dev, 1),
  332. entry->skb->data, entry->skb->len,
  333. rt2x00usb_interrupt_rxdone, entry);
  334. __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
  335. usb_submit_urb(priv_rx->urb, GFP_ATOMIC);
  336. }
  337. EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
  338. void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
  339. struct queue_entry *entry)
  340. {
  341. entry->flags = 0;
  342. }
  343. EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
  344. static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
  345. struct data_queue *queue)
  346. {
  347. struct queue_entry_priv_usb_rx *priv_rx;
  348. struct queue_entry_priv_usb_tx *priv_tx;
  349. struct queue_entry_priv_usb_bcn *priv_bcn;
  350. struct urb *urb;
  351. unsigned int guardian =
  352. test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
  353. unsigned int i;
  354. /*
  355. * Allocate the URB's
  356. */
  357. for (i = 0; i < queue->limit; i++) {
  358. urb = usb_alloc_urb(0, GFP_KERNEL);
  359. if (!urb)
  360. return -ENOMEM;
  361. if (queue->qid == QID_RX) {
  362. priv_rx = queue->entries[i].priv_data;
  363. priv_rx->urb = urb;
  364. } else if (queue->qid == QID_MGMT && guardian) {
  365. priv_bcn = queue->entries[i].priv_data;
  366. priv_bcn->urb = urb;
  367. urb = usb_alloc_urb(0, GFP_KERNEL);
  368. if (!urb)
  369. return -ENOMEM;
  370. priv_bcn->guardian_urb = urb;
  371. } else {
  372. priv_tx = queue->entries[i].priv_data;
  373. priv_tx->urb = urb;
  374. }
  375. }
  376. return 0;
  377. }
  378. static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
  379. struct data_queue *queue)
  380. {
  381. struct queue_entry_priv_usb_rx *priv_rx;
  382. struct queue_entry_priv_usb_tx *priv_tx;
  383. struct queue_entry_priv_usb_bcn *priv_bcn;
  384. struct urb *urb;
  385. unsigned int guardian =
  386. test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
  387. unsigned int i;
  388. if (!queue->entries)
  389. return;
  390. for (i = 0; i < queue->limit; i++) {
  391. if (queue->qid == QID_RX) {
  392. priv_rx = queue->entries[i].priv_data;
  393. urb = priv_rx->urb;
  394. } else if (queue->qid == QID_MGMT && guardian) {
  395. priv_bcn = queue->entries[i].priv_data;
  396. usb_kill_urb(priv_bcn->guardian_urb);
  397. usb_free_urb(priv_bcn->guardian_urb);
  398. urb = priv_bcn->urb;
  399. } else {
  400. priv_tx = queue->entries[i].priv_data;
  401. urb = priv_tx->urb;
  402. }
  403. usb_kill_urb(urb);
  404. usb_free_urb(urb);
  405. if (queue->entries[i].skb)
  406. kfree_skb(queue->entries[i].skb);
  407. }
  408. }
  409. int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
  410. {
  411. struct data_queue *queue;
  412. struct sk_buff *skb;
  413. unsigned int entry_size;
  414. unsigned int i;
  415. int uninitialized_var(status);
  416. /*
  417. * Allocate DMA
  418. */
  419. queue_for_each(rt2x00dev, queue) {
  420. status = rt2x00usb_alloc_urb(rt2x00dev, queue);
  421. if (status)
  422. goto exit;
  423. }
  424. /*
  425. * For the RX queue, skb's should be allocated.
  426. */
  427. entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
  428. for (i = 0; i < rt2x00dev->rx->limit; i++) {
  429. skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
  430. if (!skb)
  431. goto exit;
  432. rt2x00dev->rx->entries[i].skb = skb;
  433. }
  434. return 0;
  435. exit:
  436. rt2x00usb_uninitialize(rt2x00dev);
  437. return status;
  438. }
  439. EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
  440. void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
  441. {
  442. struct data_queue *queue;
  443. queue_for_each(rt2x00dev, queue)
  444. rt2x00usb_free_urb(rt2x00dev, queue);
  445. }
  446. EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
  447. /*
  448. * USB driver handlers.
  449. */
  450. static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
  451. {
  452. kfree(rt2x00dev->rf);
  453. rt2x00dev->rf = NULL;
  454. kfree(rt2x00dev->eeprom);
  455. rt2x00dev->eeprom = NULL;
  456. kfree(rt2x00dev->csr.cache);
  457. rt2x00dev->csr.cache = NULL;
  458. }
  459. static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
  460. {
  461. rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
  462. if (!rt2x00dev->csr.cache)
  463. goto exit;
  464. rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
  465. if (!rt2x00dev->eeprom)
  466. goto exit;
  467. rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
  468. if (!rt2x00dev->rf)
  469. goto exit;
  470. return 0;
  471. exit:
  472. ERROR_PROBE("Failed to allocate registers.\n");
  473. rt2x00usb_free_reg(rt2x00dev);
  474. return -ENOMEM;
  475. }
  476. int rt2x00usb_probe(struct usb_interface *usb_intf,
  477. const struct usb_device_id *id)
  478. {
  479. struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
  480. struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
  481. struct ieee80211_hw *hw;
  482. struct rt2x00_dev *rt2x00dev;
  483. int retval;
  484. usb_dev = usb_get_dev(usb_dev);
  485. hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
  486. if (!hw) {
  487. ERROR_PROBE("Failed to allocate hardware.\n");
  488. retval = -ENOMEM;
  489. goto exit_put_device;
  490. }
  491. usb_set_intfdata(usb_intf, hw);
  492. rt2x00dev = hw->priv;
  493. rt2x00dev->dev = usb_intf;
  494. rt2x00dev->ops = ops;
  495. rt2x00dev->hw = hw;
  496. mutex_init(&rt2x00dev->usb_cache_mutex);
  497. rt2x00dev->usb_maxpacket =
  498. usb_maxpacket(usb_dev, usb_sndbulkpipe(usb_dev, 1), 1);
  499. if (!rt2x00dev->usb_maxpacket)
  500. rt2x00dev->usb_maxpacket = 1;
  501. retval = rt2x00usb_alloc_reg(rt2x00dev);
  502. if (retval)
  503. goto exit_free_device;
  504. retval = rt2x00lib_probe_dev(rt2x00dev);
  505. if (retval)
  506. goto exit_free_reg;
  507. return 0;
  508. exit_free_reg:
  509. rt2x00usb_free_reg(rt2x00dev);
  510. exit_free_device:
  511. ieee80211_free_hw(hw);
  512. exit_put_device:
  513. usb_put_dev(usb_dev);
  514. usb_set_intfdata(usb_intf, NULL);
  515. return retval;
  516. }
  517. EXPORT_SYMBOL_GPL(rt2x00usb_probe);
  518. void rt2x00usb_disconnect(struct usb_interface *usb_intf)
  519. {
  520. struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
  521. struct rt2x00_dev *rt2x00dev = hw->priv;
  522. /*
  523. * Free all allocated data.
  524. */
  525. rt2x00lib_remove_dev(rt2x00dev);
  526. rt2x00usb_free_reg(rt2x00dev);
  527. ieee80211_free_hw(hw);
  528. /*
  529. * Free the USB device data.
  530. */
  531. usb_set_intfdata(usb_intf, NULL);
  532. usb_put_dev(interface_to_usbdev(usb_intf));
  533. }
  534. EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
  535. #ifdef CONFIG_PM
  536. int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
  537. {
  538. struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
  539. struct rt2x00_dev *rt2x00dev = hw->priv;
  540. int retval;
  541. retval = rt2x00lib_suspend(rt2x00dev, state);
  542. if (retval)
  543. return retval;
  544. rt2x00usb_free_reg(rt2x00dev);
  545. /*
  546. * Decrease usbdev refcount.
  547. */
  548. usb_put_dev(interface_to_usbdev(usb_intf));
  549. return 0;
  550. }
  551. EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
  552. int rt2x00usb_resume(struct usb_interface *usb_intf)
  553. {
  554. struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
  555. struct rt2x00_dev *rt2x00dev = hw->priv;
  556. int retval;
  557. usb_get_dev(interface_to_usbdev(usb_intf));
  558. retval = rt2x00usb_alloc_reg(rt2x00dev);
  559. if (retval)
  560. return retval;
  561. retval = rt2x00lib_resume(rt2x00dev);
  562. if (retval)
  563. goto exit_free_reg;
  564. return 0;
  565. exit_free_reg:
  566. rt2x00usb_free_reg(rt2x00dev);
  567. return retval;
  568. }
  569. EXPORT_SYMBOL_GPL(rt2x00usb_resume);
  570. #endif /* CONFIG_PM */
  571. /*
  572. * rt2x00usb module information.
  573. */
  574. MODULE_AUTHOR(DRV_PROJECT);
  575. MODULE_VERSION(DRV_VERSION);
  576. MODULE_DESCRIPTION("rt2x00 usb library");
  577. MODULE_LICENSE("GPL");