hif_usb.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325
  1. /*
  2. * Copyright (c) 2010-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <asm/unaligned.h>
  17. #include "htc.h"
  18. /* identify firmware images */
  19. #define FIRMWARE_AR7010_1_1 "htc_7010.fw"
  20. #define FIRMWARE_AR9271 "htc_9271.fw"
  21. MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
  22. MODULE_FIRMWARE(FIRMWARE_AR9271);
  23. static struct usb_device_id ath9k_hif_usb_ids[] = {
  24. { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
  25. { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
  26. { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
  27. { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
  28. { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
  29. { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
  30. { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
  31. { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
  32. { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
  33. { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
  34. { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
  35. { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
  36. { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
  37. { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
  38. { USB_DEVICE(0x0cf3, 0x7015),
  39. .driver_info = AR9287_USB }, /* Atheros */
  40. { USB_DEVICE(0x1668, 0x1200),
  41. .driver_info = AR9287_USB }, /* Verizon */
  42. { USB_DEVICE(0x0cf3, 0x7010),
  43. .driver_info = AR9280_USB }, /* Atheros */
  44. { USB_DEVICE(0x0846, 0x9018),
  45. .driver_info = AR9280_USB }, /* Netgear WNDA3200 */
  46. { USB_DEVICE(0x083A, 0xA704),
  47. .driver_info = AR9280_USB }, /* SMC Networks */
  48. { USB_DEVICE(0x0411, 0x017f),
  49. .driver_info = AR9280_USB }, /* Sony UWA-BR100 */
  50. { USB_DEVICE(0x0cf3, 0x20ff),
  51. .driver_info = STORAGE_DEVICE },
  52. { },
  53. };
  54. MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
  55. static int __hif_usb_tx(struct hif_device_usb *hif_dev);
  56. static void hif_usb_regout_cb(struct urb *urb)
  57. {
  58. struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
  59. switch (urb->status) {
  60. case 0:
  61. break;
  62. case -ENOENT:
  63. case -ECONNRESET:
  64. case -ENODEV:
  65. case -ESHUTDOWN:
  66. goto free;
  67. default:
  68. break;
  69. }
  70. if (cmd) {
  71. ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
  72. cmd->skb, true);
  73. kfree(cmd);
  74. }
  75. return;
  76. free:
  77. kfree_skb(cmd->skb);
  78. kfree(cmd);
  79. }
  80. static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
  81. struct sk_buff *skb)
  82. {
  83. struct urb *urb;
  84. struct cmd_buf *cmd;
  85. int ret = 0;
  86. urb = usb_alloc_urb(0, GFP_KERNEL);
  87. if (urb == NULL)
  88. return -ENOMEM;
  89. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  90. if (cmd == NULL) {
  91. usb_free_urb(urb);
  92. return -ENOMEM;
  93. }
  94. cmd->skb = skb;
  95. cmd->hif_dev = hif_dev;
  96. usb_fill_bulk_urb(urb, hif_dev->udev,
  97. usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
  98. skb->data, skb->len,
  99. hif_usb_regout_cb, cmd);
  100. usb_anchor_urb(urb, &hif_dev->regout_submitted);
  101. ret = usb_submit_urb(urb, GFP_KERNEL);
  102. if (ret) {
  103. usb_unanchor_urb(urb);
  104. kfree(cmd);
  105. }
  106. usb_free_urb(urb);
  107. return ret;
  108. }
  109. static void hif_usb_mgmt_cb(struct urb *urb)
  110. {
  111. struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
  112. struct hif_device_usb *hif_dev;
  113. bool txok = true;
  114. if (!cmd || !cmd->skb || !cmd->hif_dev)
  115. return;
  116. hif_dev = cmd->hif_dev;
  117. switch (urb->status) {
  118. case 0:
  119. break;
  120. case -ENOENT:
  121. case -ECONNRESET:
  122. case -ENODEV:
  123. case -ESHUTDOWN:
  124. txok = false;
  125. /*
  126. * If the URBs are being flushed, no need to complete
  127. * this packet.
  128. */
  129. spin_lock(&hif_dev->tx.tx_lock);
  130. if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
  131. spin_unlock(&hif_dev->tx.tx_lock);
  132. dev_kfree_skb_any(cmd->skb);
  133. kfree(cmd);
  134. return;
  135. }
  136. spin_unlock(&hif_dev->tx.tx_lock);
  137. break;
  138. default:
  139. txok = false;
  140. break;
  141. }
  142. skb_pull(cmd->skb, 4);
  143. ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
  144. cmd->skb, txok);
  145. kfree(cmd);
  146. }
  147. static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev,
  148. struct sk_buff *skb)
  149. {
  150. struct urb *urb;
  151. struct cmd_buf *cmd;
  152. int ret = 0;
  153. __le16 *hdr;
  154. urb = usb_alloc_urb(0, GFP_ATOMIC);
  155. if (urb == NULL)
  156. return -ENOMEM;
  157. cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
  158. if (cmd == NULL) {
  159. usb_free_urb(urb);
  160. return -ENOMEM;
  161. }
  162. cmd->skb = skb;
  163. cmd->hif_dev = hif_dev;
  164. hdr = (__le16 *) skb_push(skb, 4);
  165. *hdr++ = cpu_to_le16(skb->len - 4);
  166. *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
  167. usb_fill_bulk_urb(urb, hif_dev->udev,
  168. usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
  169. skb->data, skb->len,
  170. hif_usb_mgmt_cb, cmd);
  171. usb_anchor_urb(urb, &hif_dev->mgmt_submitted);
  172. ret = usb_submit_urb(urb, GFP_ATOMIC);
  173. if (ret) {
  174. usb_unanchor_urb(urb);
  175. kfree(cmd);
  176. }
  177. usb_free_urb(urb);
  178. return ret;
  179. }
  180. static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
  181. struct sk_buff_head *list)
  182. {
  183. struct sk_buff *skb;
  184. while ((skb = __skb_dequeue(list)) != NULL) {
  185. dev_kfree_skb_any(skb);
  186. }
  187. }
  188. static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
  189. struct sk_buff_head *queue,
  190. bool txok)
  191. {
  192. struct sk_buff *skb;
  193. while ((skb = __skb_dequeue(queue)) != NULL) {
  194. ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
  195. skb, txok);
  196. if (txok)
  197. TX_STAT_INC(skb_success);
  198. else
  199. TX_STAT_INC(skb_failed);
  200. }
  201. }
  202. static void hif_usb_tx_cb(struct urb *urb)
  203. {
  204. struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
  205. struct hif_device_usb *hif_dev;
  206. bool txok = true;
  207. if (!tx_buf || !tx_buf->hif_dev)
  208. return;
  209. hif_dev = tx_buf->hif_dev;
  210. switch (urb->status) {
  211. case 0:
  212. break;
  213. case -ENOENT:
  214. case -ECONNRESET:
  215. case -ENODEV:
  216. case -ESHUTDOWN:
  217. txok = false;
  218. /*
  219. * If the URBs are being flushed, no need to add this
  220. * URB to the free list.
  221. */
  222. spin_lock(&hif_dev->tx.tx_lock);
  223. if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
  224. spin_unlock(&hif_dev->tx.tx_lock);
  225. ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
  226. return;
  227. }
  228. spin_unlock(&hif_dev->tx.tx_lock);
  229. break;
  230. default:
  231. txok = false;
  232. break;
  233. }
  234. ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok);
  235. /* Re-initialize the SKB queue */
  236. tx_buf->len = tx_buf->offset = 0;
  237. __skb_queue_head_init(&tx_buf->skb_queue);
  238. /* Add this TX buffer to the free list */
  239. spin_lock(&hif_dev->tx.tx_lock);
  240. list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
  241. hif_dev->tx.tx_buf_cnt++;
  242. if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
  243. __hif_usb_tx(hif_dev); /* Check for pending SKBs */
  244. TX_STAT_INC(buf_completed);
  245. spin_unlock(&hif_dev->tx.tx_lock);
  246. }
  247. /* TX lock has to be taken */
  248. static int __hif_usb_tx(struct hif_device_usb *hif_dev)
  249. {
  250. struct tx_buf *tx_buf = NULL;
  251. struct sk_buff *nskb = NULL;
  252. int ret = 0, i;
  253. u16 tx_skb_cnt = 0;
  254. u8 *buf;
  255. __le16 *hdr;
  256. if (hif_dev->tx.tx_skb_cnt == 0)
  257. return 0;
  258. /* Check if a free TX buffer is available */
  259. if (list_empty(&hif_dev->tx.tx_buf))
  260. return 0;
  261. tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
  262. list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
  263. hif_dev->tx.tx_buf_cnt--;
  264. tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
  265. for (i = 0; i < tx_skb_cnt; i++) {
  266. nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
  267. /* Should never be NULL */
  268. BUG_ON(!nskb);
  269. hif_dev->tx.tx_skb_cnt--;
  270. buf = tx_buf->buf;
  271. buf += tx_buf->offset;
  272. hdr = (__le16 *)buf;
  273. *hdr++ = cpu_to_le16(nskb->len);
  274. *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
  275. buf += 4;
  276. memcpy(buf, nskb->data, nskb->len);
  277. tx_buf->len = nskb->len + 4;
  278. if (i < (tx_skb_cnt - 1))
  279. tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
  280. if (i == (tx_skb_cnt - 1))
  281. tx_buf->len += tx_buf->offset;
  282. __skb_queue_tail(&tx_buf->skb_queue, nskb);
  283. TX_STAT_INC(skb_queued);
  284. }
  285. usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
  286. usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
  287. tx_buf->buf, tx_buf->len,
  288. hif_usb_tx_cb, tx_buf);
  289. ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
  290. if (ret) {
  291. tx_buf->len = tx_buf->offset = 0;
  292. ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false);
  293. __skb_queue_head_init(&tx_buf->skb_queue);
  294. list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
  295. hif_dev->tx.tx_buf_cnt++;
  296. }
  297. if (!ret)
  298. TX_STAT_INC(buf_queued);
  299. return ret;
  300. }
  301. static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
  302. {
  303. struct ath9k_htc_tx_ctl *tx_ctl;
  304. unsigned long flags;
  305. int ret = 0;
  306. spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  307. if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
  308. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  309. return -ENODEV;
  310. }
  311. /* Check if the max queue count has been reached */
  312. if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
  313. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  314. return -ENOMEM;
  315. }
  316. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  317. tx_ctl = HTC_SKB_CB(skb);
  318. /* Mgmt/Beacon frames don't use the TX buffer pool */
  319. if ((tx_ctl->type == ATH9K_HTC_MGMT) ||
  320. (tx_ctl->type == ATH9K_HTC_BEACON)) {
  321. ret = hif_usb_send_mgmt(hif_dev, skb);
  322. }
  323. spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  324. if ((tx_ctl->type == ATH9K_HTC_NORMAL) ||
  325. (tx_ctl->type == ATH9K_HTC_AMPDU)) {
  326. __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
  327. hif_dev->tx.tx_skb_cnt++;
  328. }
  329. /* Check if AMPDUs have to be sent immediately */
  330. if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
  331. (hif_dev->tx.tx_skb_cnt < 2)) {
  332. __hif_usb_tx(hif_dev);
  333. }
  334. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  335. return ret;
  336. }
  337. static void hif_usb_start(void *hif_handle)
  338. {
  339. struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
  340. unsigned long flags;
  341. hif_dev->flags |= HIF_USB_START;
  342. spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  343. hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
  344. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  345. }
  346. static void hif_usb_stop(void *hif_handle)
  347. {
  348. struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
  349. struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
  350. unsigned long flags;
  351. spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  352. ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false);
  353. hif_dev->tx.tx_skb_cnt = 0;
  354. hif_dev->tx.flags |= HIF_USB_TX_STOP;
  355. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  356. /* The pending URBs have to be canceled. */
  357. list_for_each_entry_safe(tx_buf, tx_buf_tmp,
  358. &hif_dev->tx.tx_pending, list) {
  359. usb_kill_urb(tx_buf->urb);
  360. }
  361. usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
  362. }
  363. static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
  364. {
  365. struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
  366. int ret = 0;
  367. switch (pipe_id) {
  368. case USB_WLAN_TX_PIPE:
  369. ret = hif_usb_send_tx(hif_dev, skb);
  370. break;
  371. case USB_REG_OUT_PIPE:
  372. ret = hif_usb_send_regout(hif_dev, skb);
  373. break;
  374. default:
  375. dev_err(&hif_dev->udev->dev,
  376. "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
  377. ret = -EINVAL;
  378. break;
  379. }
  380. return ret;
  381. }
  382. static inline bool check_index(struct sk_buff *skb, u8 idx)
  383. {
  384. struct ath9k_htc_tx_ctl *tx_ctl;
  385. tx_ctl = HTC_SKB_CB(skb);
  386. if ((tx_ctl->type == ATH9K_HTC_AMPDU) &&
  387. (tx_ctl->sta_idx == idx))
  388. return true;
  389. return false;
  390. }
  391. static void hif_usb_sta_drain(void *hif_handle, u8 idx)
  392. {
  393. struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
  394. struct sk_buff *skb, *tmp;
  395. unsigned long flags;
  396. spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  397. skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
  398. if (check_index(skb, idx)) {
  399. __skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
  400. ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
  401. skb, false);
  402. hif_dev->tx.tx_skb_cnt--;
  403. TX_STAT_INC(skb_failed);
  404. }
  405. }
  406. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  407. }
  408. static struct ath9k_htc_hif hif_usb = {
  409. .transport = ATH9K_HIF_USB,
  410. .name = "ath9k_hif_usb",
  411. .control_ul_pipe = USB_REG_OUT_PIPE,
  412. .control_dl_pipe = USB_REG_IN_PIPE,
  413. .start = hif_usb_start,
  414. .stop = hif_usb_stop,
  415. .sta_drain = hif_usb_sta_drain,
  416. .send = hif_usb_send,
  417. };
  418. static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
  419. struct sk_buff *skb)
  420. {
  421. struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
  422. int index = 0, i = 0, len = skb->len;
  423. int rx_remain_len, rx_pkt_len;
  424. u16 pool_index = 0;
  425. u8 *ptr;
  426. spin_lock(&hif_dev->rx_lock);
  427. rx_remain_len = hif_dev->rx_remain_len;
  428. rx_pkt_len = hif_dev->rx_transfer_len;
  429. if (rx_remain_len != 0) {
  430. struct sk_buff *remain_skb = hif_dev->remain_skb;
  431. if (remain_skb) {
  432. ptr = (u8 *) remain_skb->data;
  433. index = rx_remain_len;
  434. rx_remain_len -= hif_dev->rx_pad_len;
  435. ptr += rx_pkt_len;
  436. memcpy(ptr, skb->data, rx_remain_len);
  437. rx_pkt_len += rx_remain_len;
  438. hif_dev->rx_remain_len = 0;
  439. skb_put(remain_skb, rx_pkt_len);
  440. skb_pool[pool_index++] = remain_skb;
  441. } else {
  442. index = rx_remain_len;
  443. }
  444. }
  445. spin_unlock(&hif_dev->rx_lock);
  446. while (index < len) {
  447. u16 pkt_len;
  448. u16 pkt_tag;
  449. u16 pad_len;
  450. int chk_idx;
  451. ptr = (u8 *) skb->data;
  452. pkt_len = get_unaligned_le16(ptr + index);
  453. pkt_tag = get_unaligned_le16(ptr + index + 2);
  454. if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
  455. RX_STAT_INC(skb_dropped);
  456. return;
  457. }
  458. pad_len = 4 - (pkt_len & 0x3);
  459. if (pad_len == 4)
  460. pad_len = 0;
  461. chk_idx = index;
  462. index = index + 4 + pkt_len + pad_len;
  463. if (index > MAX_RX_BUF_SIZE) {
  464. spin_lock(&hif_dev->rx_lock);
  465. hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
  466. hif_dev->rx_transfer_len =
  467. MAX_RX_BUF_SIZE - chk_idx - 4;
  468. hif_dev->rx_pad_len = pad_len;
  469. nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
  470. if (!nskb) {
  471. dev_err(&hif_dev->udev->dev,
  472. "ath9k_htc: RX memory allocation error\n");
  473. spin_unlock(&hif_dev->rx_lock);
  474. goto err;
  475. }
  476. skb_reserve(nskb, 32);
  477. RX_STAT_INC(skb_allocated);
  478. memcpy(nskb->data, &(skb->data[chk_idx+4]),
  479. hif_dev->rx_transfer_len);
  480. /* Record the buffer pointer */
  481. hif_dev->remain_skb = nskb;
  482. spin_unlock(&hif_dev->rx_lock);
  483. } else {
  484. nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
  485. if (!nskb) {
  486. dev_err(&hif_dev->udev->dev,
  487. "ath9k_htc: RX memory allocation error\n");
  488. goto err;
  489. }
  490. skb_reserve(nskb, 32);
  491. RX_STAT_INC(skb_allocated);
  492. memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
  493. skb_put(nskb, pkt_len);
  494. skb_pool[pool_index++] = nskb;
  495. }
  496. }
  497. err:
  498. for (i = 0; i < pool_index; i++) {
  499. ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
  500. skb_pool[i]->len, USB_WLAN_RX_PIPE);
  501. RX_STAT_INC(skb_completed);
  502. }
  503. }
  504. static void ath9k_hif_usb_rx_cb(struct urb *urb)
  505. {
  506. struct sk_buff *skb = (struct sk_buff *) urb->context;
  507. struct hif_device_usb *hif_dev =
  508. usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
  509. int ret;
  510. if (!skb)
  511. return;
  512. if (!hif_dev)
  513. goto free;
  514. switch (urb->status) {
  515. case 0:
  516. break;
  517. case -ENOENT:
  518. case -ECONNRESET:
  519. case -ENODEV:
  520. case -ESHUTDOWN:
  521. goto free;
  522. default:
  523. goto resubmit;
  524. }
  525. if (likely(urb->actual_length != 0)) {
  526. skb_put(skb, urb->actual_length);
  527. ath9k_hif_usb_rx_stream(hif_dev, skb);
  528. }
  529. resubmit:
  530. skb_reset_tail_pointer(skb);
  531. skb_trim(skb, 0);
  532. usb_anchor_urb(urb, &hif_dev->rx_submitted);
  533. ret = usb_submit_urb(urb, GFP_ATOMIC);
  534. if (ret) {
  535. usb_unanchor_urb(urb);
  536. goto free;
  537. }
  538. return;
  539. free:
  540. kfree_skb(skb);
  541. }
  542. static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
  543. {
  544. struct sk_buff *skb = (struct sk_buff *) urb->context;
  545. struct sk_buff *nskb;
  546. struct hif_device_usb *hif_dev =
  547. usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
  548. int ret;
  549. if (!skb)
  550. return;
  551. if (!hif_dev)
  552. goto free;
  553. switch (urb->status) {
  554. case 0:
  555. break;
  556. case -ENOENT:
  557. case -ECONNRESET:
  558. case -ENODEV:
  559. case -ESHUTDOWN:
  560. goto free;
  561. default:
  562. skb_reset_tail_pointer(skb);
  563. skb_trim(skb, 0);
  564. goto resubmit;
  565. }
  566. if (likely(urb->actual_length != 0)) {
  567. skb_put(skb, urb->actual_length);
  568. /* Process the command first */
  569. ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
  570. skb->len, USB_REG_IN_PIPE);
  571. nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
  572. if (!nskb) {
  573. dev_err(&hif_dev->udev->dev,
  574. "ath9k_htc: REG_IN memory allocation failure\n");
  575. urb->context = NULL;
  576. return;
  577. }
  578. usb_fill_bulk_urb(urb, hif_dev->udev,
  579. usb_rcvbulkpipe(hif_dev->udev,
  580. USB_REG_IN_PIPE),
  581. nskb->data, MAX_REG_IN_BUF_SIZE,
  582. ath9k_hif_usb_reg_in_cb, nskb);
  583. }
  584. resubmit:
  585. usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
  586. ret = usb_submit_urb(urb, GFP_ATOMIC);
  587. if (ret) {
  588. usb_unanchor_urb(urb);
  589. goto free;
  590. }
  591. return;
  592. free:
  593. kfree_skb(skb);
  594. urb->context = NULL;
  595. }
  596. static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
  597. {
  598. struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
  599. unsigned long flags;
  600. list_for_each_entry_safe(tx_buf, tx_buf_tmp,
  601. &hif_dev->tx.tx_buf, list) {
  602. usb_kill_urb(tx_buf->urb);
  603. list_del(&tx_buf->list);
  604. usb_free_urb(tx_buf->urb);
  605. kfree(tx_buf->buf);
  606. kfree(tx_buf);
  607. }
  608. spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
  609. hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
  610. spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
  611. list_for_each_entry_safe(tx_buf, tx_buf_tmp,
  612. &hif_dev->tx.tx_pending, list) {
  613. usb_kill_urb(tx_buf->urb);
  614. list_del(&tx_buf->list);
  615. usb_free_urb(tx_buf->urb);
  616. kfree(tx_buf->buf);
  617. kfree(tx_buf);
  618. }
  619. usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
  620. }
  621. static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
  622. {
  623. struct tx_buf *tx_buf;
  624. int i;
  625. INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
  626. INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
  627. spin_lock_init(&hif_dev->tx.tx_lock);
  628. __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
  629. init_usb_anchor(&hif_dev->mgmt_submitted);
  630. for (i = 0; i < MAX_TX_URB_NUM; i++) {
  631. tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
  632. if (!tx_buf)
  633. goto err;
  634. tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
  635. if (!tx_buf->buf)
  636. goto err;
  637. tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
  638. if (!tx_buf->urb)
  639. goto err;
  640. tx_buf->hif_dev = hif_dev;
  641. __skb_queue_head_init(&tx_buf->skb_queue);
  642. list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
  643. }
  644. hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
  645. return 0;
  646. err:
  647. if (tx_buf) {
  648. kfree(tx_buf->buf);
  649. kfree(tx_buf);
  650. }
  651. ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
  652. return -ENOMEM;
  653. }
  654. static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
  655. {
  656. usb_kill_anchored_urbs(&hif_dev->rx_submitted);
  657. }
  658. static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
  659. {
  660. struct urb *urb = NULL;
  661. struct sk_buff *skb = NULL;
  662. int i, ret;
  663. init_usb_anchor(&hif_dev->rx_submitted);
  664. spin_lock_init(&hif_dev->rx_lock);
  665. for (i = 0; i < MAX_RX_URB_NUM; i++) {
  666. /* Allocate URB */
  667. urb = usb_alloc_urb(0, GFP_KERNEL);
  668. if (urb == NULL) {
  669. ret = -ENOMEM;
  670. goto err_urb;
  671. }
  672. /* Allocate buffer */
  673. skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
  674. if (!skb) {
  675. ret = -ENOMEM;
  676. goto err_skb;
  677. }
  678. usb_fill_bulk_urb(urb, hif_dev->udev,
  679. usb_rcvbulkpipe(hif_dev->udev,
  680. USB_WLAN_RX_PIPE),
  681. skb->data, MAX_RX_BUF_SIZE,
  682. ath9k_hif_usb_rx_cb, skb);
  683. /* Anchor URB */
  684. usb_anchor_urb(urb, &hif_dev->rx_submitted);
  685. /* Submit URB */
  686. ret = usb_submit_urb(urb, GFP_KERNEL);
  687. if (ret) {
  688. usb_unanchor_urb(urb);
  689. goto err_submit;
  690. }
  691. /*
  692. * Drop reference count.
  693. * This ensures that the URB is freed when killing them.
  694. */
  695. usb_free_urb(urb);
  696. }
  697. return 0;
  698. err_submit:
  699. kfree_skb(skb);
  700. err_skb:
  701. usb_free_urb(urb);
  702. err_urb:
  703. ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
  704. return ret;
  705. }
  706. static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
  707. {
  708. usb_kill_anchored_urbs(&hif_dev->reg_in_submitted);
  709. }
  710. static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
  711. {
  712. struct urb *urb = NULL;
  713. struct sk_buff *skb = NULL;
  714. int i, ret;
  715. init_usb_anchor(&hif_dev->reg_in_submitted);
  716. for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
  717. /* Allocate URB */
  718. urb = usb_alloc_urb(0, GFP_KERNEL);
  719. if (urb == NULL) {
  720. ret = -ENOMEM;
  721. goto err_urb;
  722. }
  723. /* Allocate buffer */
  724. skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
  725. if (!skb) {
  726. ret = -ENOMEM;
  727. goto err_skb;
  728. }
  729. usb_fill_bulk_urb(urb, hif_dev->udev,
  730. usb_rcvbulkpipe(hif_dev->udev,
  731. USB_REG_IN_PIPE),
  732. skb->data, MAX_REG_IN_BUF_SIZE,
  733. ath9k_hif_usb_reg_in_cb, skb);
  734. /* Anchor URB */
  735. usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
  736. /* Submit URB */
  737. ret = usb_submit_urb(urb, GFP_KERNEL);
  738. if (ret) {
  739. usb_unanchor_urb(urb);
  740. goto err_submit;
  741. }
  742. /*
  743. * Drop reference count.
  744. * This ensures that the URB is freed when killing them.
  745. */
  746. usb_free_urb(urb);
  747. }
  748. return 0;
  749. err_submit:
  750. kfree_skb(skb);
  751. err_skb:
  752. usb_free_urb(urb);
  753. err_urb:
  754. ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
  755. return ret;
  756. }
  757. static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
  758. {
  759. /* Register Write */
  760. init_usb_anchor(&hif_dev->regout_submitted);
  761. /* TX */
  762. if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
  763. goto err;
  764. /* RX */
  765. if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
  766. goto err_rx;
  767. /* Register Read */
  768. if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0)
  769. goto err_reg;
  770. return 0;
  771. err_reg:
  772. ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
  773. err_rx:
  774. ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
  775. err:
  776. return -ENOMEM;
  777. }
  778. static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
  779. {
  780. usb_kill_anchored_urbs(&hif_dev->regout_submitted);
  781. ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
  782. ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
  783. ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
  784. }
  785. static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
  786. u32 drv_info)
  787. {
  788. int transfer, err;
  789. const void *data = hif_dev->firmware->data;
  790. size_t len = hif_dev->firmware->size;
  791. u32 addr = AR9271_FIRMWARE;
  792. u8 *buf = kzalloc(4096, GFP_KERNEL);
  793. u32 firm_offset;
  794. if (!buf)
  795. return -ENOMEM;
  796. while (len) {
  797. transfer = min_t(int, len, 4096);
  798. memcpy(buf, data, transfer);
  799. err = usb_control_msg(hif_dev->udev,
  800. usb_sndctrlpipe(hif_dev->udev, 0),
  801. FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
  802. addr >> 8, 0, buf, transfer, HZ);
  803. if (err < 0) {
  804. kfree(buf);
  805. return err;
  806. }
  807. len -= transfer;
  808. data += transfer;
  809. addr += transfer;
  810. }
  811. kfree(buf);
  812. if (IS_AR7010_DEVICE(drv_info))
  813. firm_offset = AR7010_FIRMWARE_TEXT;
  814. else
  815. firm_offset = AR9271_FIRMWARE_TEXT;
  816. /*
  817. * Issue FW download complete command to firmware.
  818. */
  819. err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
  820. FIRMWARE_DOWNLOAD_COMP,
  821. 0x40 | USB_DIR_OUT,
  822. firm_offset >> 8, 0, NULL, 0, HZ);
  823. if (err)
  824. return -EIO;
  825. dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
  826. hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
  827. return 0;
  828. }
  829. static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
  830. {
  831. int ret, idx;
  832. struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
  833. struct usb_endpoint_descriptor *endp;
  834. /* Request firmware */
  835. ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
  836. &hif_dev->udev->dev);
  837. if (ret) {
  838. dev_err(&hif_dev->udev->dev,
  839. "ath9k_htc: Firmware - %s not found\n", hif_dev->fw_name);
  840. goto err_fw_req;
  841. }
  842. /* Download firmware */
  843. ret = ath9k_hif_usb_download_fw(hif_dev, drv_info);
  844. if (ret) {
  845. dev_err(&hif_dev->udev->dev,
  846. "ath9k_htc: Firmware - %s download failed\n",
  847. hif_dev->fw_name);
  848. goto err_fw_download;
  849. }
  850. /* On downloading the firmware to the target, the USB descriptor of EP4
  851. * is 'patched' to change the type of the endpoint to Bulk. This will
  852. * bring down CPU usage during the scan period.
  853. */
  854. for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
  855. endp = &alt->endpoint[idx].desc;
  856. if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
  857. == USB_ENDPOINT_XFER_INT) {
  858. endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
  859. endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
  860. endp->bInterval = 0;
  861. }
  862. }
  863. /* Alloc URBs */
  864. ret = ath9k_hif_usb_alloc_urbs(hif_dev);
  865. if (ret) {
  866. dev_err(&hif_dev->udev->dev,
  867. "ath9k_htc: Unable to allocate URBs\n");
  868. goto err_fw_download;
  869. }
  870. return 0;
  871. err_fw_download:
  872. release_firmware(hif_dev->firmware);
  873. err_fw_req:
  874. hif_dev->firmware = NULL;
  875. return ret;
  876. }
  877. static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
  878. {
  879. ath9k_hif_usb_dealloc_urbs(hif_dev);
  880. if (hif_dev->firmware)
  881. release_firmware(hif_dev->firmware);
  882. }
  883. /*
  884. * An exact copy of the function from zd1211rw.
  885. */
  886. static int send_eject_command(struct usb_interface *interface)
  887. {
  888. struct usb_device *udev = interface_to_usbdev(interface);
  889. struct usb_host_interface *iface_desc = &interface->altsetting[0];
  890. struct usb_endpoint_descriptor *endpoint;
  891. unsigned char *cmd;
  892. u8 bulk_out_ep;
  893. int r;
  894. /* Find bulk out endpoint */
  895. for (r = 1; r >= 0; r--) {
  896. endpoint = &iface_desc->endpoint[r].desc;
  897. if (usb_endpoint_dir_out(endpoint) &&
  898. usb_endpoint_xfer_bulk(endpoint)) {
  899. bulk_out_ep = endpoint->bEndpointAddress;
  900. break;
  901. }
  902. }
  903. if (r == -1) {
  904. dev_err(&udev->dev,
  905. "ath9k_htc: Could not find bulk out endpoint\n");
  906. return -ENODEV;
  907. }
  908. cmd = kzalloc(31, GFP_KERNEL);
  909. if (cmd == NULL)
  910. return -ENODEV;
  911. /* USB bulk command block */
  912. cmd[0] = 0x55; /* bulk command signature */
  913. cmd[1] = 0x53; /* bulk command signature */
  914. cmd[2] = 0x42; /* bulk command signature */
  915. cmd[3] = 0x43; /* bulk command signature */
  916. cmd[14] = 6; /* command length */
  917. cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
  918. cmd[19] = 0x2; /* eject disc */
  919. dev_info(&udev->dev, "Ejecting storage device...\n");
  920. r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
  921. cmd, 31, NULL, 2000);
  922. kfree(cmd);
  923. if (r)
  924. return r;
  925. /* At this point, the device disconnects and reconnects with the real
  926. * ID numbers. */
  927. usb_set_intfdata(interface, NULL);
  928. return 0;
  929. }
  930. static int ath9k_hif_usb_probe(struct usb_interface *interface,
  931. const struct usb_device_id *id)
  932. {
  933. struct usb_device *udev = interface_to_usbdev(interface);
  934. struct hif_device_usb *hif_dev;
  935. int ret = 0;
  936. if (id->driver_info == STORAGE_DEVICE)
  937. return send_eject_command(interface);
  938. hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
  939. if (!hif_dev) {
  940. ret = -ENOMEM;
  941. goto err_alloc;
  942. }
  943. usb_get_dev(udev);
  944. hif_dev->udev = udev;
  945. hif_dev->interface = interface;
  946. hif_dev->device_id = id->idProduct;
  947. #ifdef CONFIG_PM
  948. udev->reset_resume = 1;
  949. #endif
  950. usb_set_intfdata(interface, hif_dev);
  951. hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
  952. &hif_dev->udev->dev);
  953. if (hif_dev->htc_handle == NULL) {
  954. ret = -ENOMEM;
  955. goto err_htc_hw_alloc;
  956. }
  957. /* Find out which firmware to load */
  958. if (IS_AR7010_DEVICE(id->driver_info))
  959. hif_dev->fw_name = FIRMWARE_AR7010_1_1;
  960. else
  961. hif_dev->fw_name = FIRMWARE_AR9271;
  962. ret = ath9k_hif_usb_dev_init(hif_dev, id->driver_info);
  963. if (ret) {
  964. ret = -EINVAL;
  965. goto err_hif_init_usb;
  966. }
  967. ret = ath9k_htc_hw_init(hif_dev->htc_handle,
  968. &interface->dev, hif_dev->device_id,
  969. hif_dev->udev->product, id->driver_info);
  970. if (ret) {
  971. ret = -EINVAL;
  972. goto err_htc_hw_init;
  973. }
  974. dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
  975. return 0;
  976. err_htc_hw_init:
  977. ath9k_hif_usb_dev_deinit(hif_dev);
  978. err_hif_init_usb:
  979. ath9k_htc_hw_free(hif_dev->htc_handle);
  980. err_htc_hw_alloc:
  981. usb_set_intfdata(interface, NULL);
  982. kfree(hif_dev);
  983. usb_put_dev(udev);
  984. err_alloc:
  985. return ret;
  986. }
  987. static void ath9k_hif_usb_reboot(struct usb_device *udev)
  988. {
  989. u32 reboot_cmd = 0xffffffff;
  990. void *buf;
  991. int ret;
  992. buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL);
  993. if (!buf)
  994. return;
  995. ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
  996. buf, 4, NULL, HZ);
  997. if (ret)
  998. dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
  999. kfree(buf);
  1000. }
  1001. static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
  1002. {
  1003. struct usb_device *udev = interface_to_usbdev(interface);
  1004. struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
  1005. bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
  1006. if (!hif_dev)
  1007. return;
  1008. ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
  1009. ath9k_htc_hw_free(hif_dev->htc_handle);
  1010. ath9k_hif_usb_dev_deinit(hif_dev);
  1011. usb_set_intfdata(interface, NULL);
  1012. if (!unplugged && (hif_dev->flags & HIF_USB_START))
  1013. ath9k_hif_usb_reboot(udev);
  1014. kfree(hif_dev);
  1015. dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
  1016. usb_put_dev(udev);
  1017. }
  1018. #ifdef CONFIG_PM
  1019. static int ath9k_hif_usb_suspend(struct usb_interface *interface,
  1020. pm_message_t message)
  1021. {
  1022. struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
  1023. /*
  1024. * The device has to be set to FULLSLEEP mode in case no
  1025. * interface is up.
  1026. */
  1027. if (!(hif_dev->flags & HIF_USB_START))
  1028. ath9k_htc_suspend(hif_dev->htc_handle);
  1029. ath9k_hif_usb_dealloc_urbs(hif_dev);
  1030. return 0;
  1031. }
  1032. static int ath9k_hif_usb_resume(struct usb_interface *interface)
  1033. {
  1034. struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
  1035. struct htc_target *htc_handle = hif_dev->htc_handle;
  1036. int ret;
  1037. ret = ath9k_hif_usb_alloc_urbs(hif_dev);
  1038. if (ret)
  1039. return ret;
  1040. if (hif_dev->firmware) {
  1041. ret = ath9k_hif_usb_download_fw(hif_dev,
  1042. htc_handle->drv_priv->ah->hw_version.usbdev);
  1043. if (ret)
  1044. goto fail_resume;
  1045. } else {
  1046. ath9k_hif_usb_dealloc_urbs(hif_dev);
  1047. return -EIO;
  1048. }
  1049. mdelay(100);
  1050. ret = ath9k_htc_resume(htc_handle);
  1051. if (ret)
  1052. goto fail_resume;
  1053. return 0;
  1054. fail_resume:
  1055. ath9k_hif_usb_dealloc_urbs(hif_dev);
  1056. return ret;
  1057. }
  1058. #endif
  1059. static struct usb_driver ath9k_hif_usb_driver = {
  1060. .name = KBUILD_MODNAME,
  1061. .probe = ath9k_hif_usb_probe,
  1062. .disconnect = ath9k_hif_usb_disconnect,
  1063. #ifdef CONFIG_PM
  1064. .suspend = ath9k_hif_usb_suspend,
  1065. .resume = ath9k_hif_usb_resume,
  1066. .reset_resume = ath9k_hif_usb_resume,
  1067. #endif
  1068. .id_table = ath9k_hif_usb_ids,
  1069. .soft_unbind = 1,
  1070. };
  1071. int ath9k_hif_usb_init(void)
  1072. {
  1073. return usb_register(&ath9k_hif_usb_driver);
  1074. }
  1075. void ath9k_hif_usb_exit(void)
  1076. {
  1077. usb_deregister(&ath9k_hif_usb_driver);
  1078. }