dhd_linux.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "dhd.h"
  24. #include "dhd_bus.h"
  25. #include "dhd_proto.h"
  26. #include "dhd_dbg.h"
  27. #include "fwil_types.h"
  28. #include "p2p.h"
  29. #include "wl_cfg80211.h"
  30. #include "fwil.h"
  31. #include "fwsignal.h"
  32. MODULE_AUTHOR("Broadcom Corporation");
  33. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  34. MODULE_LICENSE("Dual BSD/GPL");
  35. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  36. /* AMPDU rx reordering definitions */
  37. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  38. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  39. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  40. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  41. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  42. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  43. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  44. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  45. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  46. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  47. /* Error bits */
  48. int brcmf_msg_level;
  49. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  50. MODULE_PARM_DESC(debug, "level of debug output");
  51. /* P2P0 enable */
  52. static int brcmf_p2p_enable;
  53. #ifdef CONFIG_BRCMDBG
  54. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  55. MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
  56. #endif
  57. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  58. {
  59. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  60. brcmf_err("ifidx %d out of range\n", ifidx);
  61. return "<if_bad>";
  62. }
  63. if (drvr->iflist[ifidx] == NULL) {
  64. brcmf_err("null i/f %d\n", ifidx);
  65. return "<if_null>";
  66. }
  67. if (drvr->iflist[ifidx]->ndev)
  68. return drvr->iflist[ifidx]->ndev->name;
  69. return "<if_none>";
  70. }
  71. static void _brcmf_set_multicast_list(struct work_struct *work)
  72. {
  73. struct brcmf_if *ifp;
  74. struct net_device *ndev;
  75. struct netdev_hw_addr *ha;
  76. u32 cmd_value, cnt;
  77. __le32 cnt_le;
  78. char *buf, *bufp;
  79. u32 buflen;
  80. s32 err;
  81. ifp = container_of(work, struct brcmf_if, multicast_work);
  82. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  83. ndev = ifp->ndev;
  84. /* Determine initial value of allmulti flag */
  85. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  86. /* Send down the multicast list first. */
  87. cnt = netdev_mc_count(ndev);
  88. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  89. buf = kmalloc(buflen, GFP_ATOMIC);
  90. if (!buf)
  91. return;
  92. bufp = buf;
  93. cnt_le = cpu_to_le32(cnt);
  94. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  95. bufp += sizeof(cnt_le);
  96. netdev_for_each_mc_addr(ha, ndev) {
  97. if (!cnt)
  98. break;
  99. memcpy(bufp, ha->addr, ETH_ALEN);
  100. bufp += ETH_ALEN;
  101. cnt--;
  102. }
  103. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  104. if (err < 0) {
  105. brcmf_err("Setting mcast_list failed, %d\n", err);
  106. cmd_value = cnt ? true : cmd_value;
  107. }
  108. kfree(buf);
  109. /*
  110. * Now send the allmulti setting. This is based on the setting in the
  111. * net_device flags, but might be modified above to be turned on if we
  112. * were trying to set some addresses and dongle rejected it...
  113. */
  114. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  115. if (err < 0)
  116. brcmf_err("Setting allmulti failed, %d\n", err);
  117. /*Finally, pick up the PROMISC flag */
  118. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  119. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  120. if (err < 0)
  121. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  122. err);
  123. }
  124. static void
  125. _brcmf_set_mac_address(struct work_struct *work)
  126. {
  127. struct brcmf_if *ifp;
  128. s32 err;
  129. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  130. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  131. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  132. ETH_ALEN);
  133. if (err < 0) {
  134. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  135. } else {
  136. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  137. ifp->mac_addr);
  138. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  139. }
  140. }
  141. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  142. {
  143. struct brcmf_if *ifp = netdev_priv(ndev);
  144. struct sockaddr *sa = (struct sockaddr *)addr;
  145. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  146. schedule_work(&ifp->setmacaddr_work);
  147. return 0;
  148. }
  149. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  150. {
  151. struct brcmf_if *ifp = netdev_priv(ndev);
  152. schedule_work(&ifp->multicast_work);
  153. }
  154. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  155. struct net_device *ndev)
  156. {
  157. int ret;
  158. struct brcmf_if *ifp = netdev_priv(ndev);
  159. struct brcmf_pub *drvr = ifp->drvr;
  160. struct ethhdr *eh;
  161. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  162. /* Can the device send data? */
  163. if (drvr->bus_if->state != BRCMF_BUS_DATA) {
  164. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  165. netif_stop_queue(ndev);
  166. dev_kfree_skb(skb);
  167. ret = -ENODEV;
  168. goto done;
  169. }
  170. if (!drvr->iflist[ifp->bssidx]) {
  171. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  172. netif_stop_queue(ndev);
  173. dev_kfree_skb(skb);
  174. ret = -ENODEV;
  175. goto done;
  176. }
  177. /* Make sure there's enough room for any header */
  178. if (skb_headroom(skb) < drvr->hdrlen) {
  179. struct sk_buff *skb2;
  180. brcmf_dbg(INFO, "%s: insufficient headroom\n",
  181. brcmf_ifname(drvr, ifp->bssidx));
  182. drvr->bus_if->tx_realloc++;
  183. skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
  184. dev_kfree_skb(skb);
  185. skb = skb2;
  186. if (skb == NULL) {
  187. brcmf_err("%s: skb_realloc_headroom failed\n",
  188. brcmf_ifname(drvr, ifp->bssidx));
  189. ret = -ENOMEM;
  190. goto done;
  191. }
  192. }
  193. /* validate length for ether packet */
  194. if (skb->len < sizeof(*eh)) {
  195. ret = -EINVAL;
  196. dev_kfree_skb(skb);
  197. goto done;
  198. }
  199. ret = brcmf_fws_process_skb(ifp, skb);
  200. done:
  201. if (ret) {
  202. ifp->stats.tx_dropped++;
  203. } else {
  204. ifp->stats.tx_packets++;
  205. ifp->stats.tx_bytes += skb->len;
  206. }
  207. /* Return ok: we always eat the packet */
  208. return NETDEV_TX_OK;
  209. }
  210. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  211. enum brcmf_netif_stop_reason reason, bool state)
  212. {
  213. unsigned long flags;
  214. if (!ifp || !ifp->ndev)
  215. return;
  216. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  217. ifp->bssidx, ifp->netif_stop, reason, state);
  218. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  219. if (state) {
  220. if (!ifp->netif_stop)
  221. netif_stop_queue(ifp->ndev);
  222. ifp->netif_stop |= reason;
  223. } else {
  224. ifp->netif_stop &= ~reason;
  225. if (!ifp->netif_stop)
  226. netif_wake_queue(ifp->ndev);
  227. }
  228. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  229. }
  230. void brcmf_txflowblock(struct device *dev, bool state)
  231. {
  232. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  233. struct brcmf_pub *drvr = bus_if->drvr;
  234. brcmf_dbg(TRACE, "Enter\n");
  235. brcmf_fws_bus_blocked(drvr, state);
  236. }
  237. static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  238. {
  239. skb->dev = ifp->ndev;
  240. skb->protocol = eth_type_trans(skb, skb->dev);
  241. if (skb->pkt_type == PACKET_MULTICAST)
  242. ifp->stats.multicast++;
  243. /* Process special event packets */
  244. brcmf_fweh_process_skb(ifp->drvr, skb);
  245. if (!(ifp->ndev->flags & IFF_UP)) {
  246. brcmu_pkt_buf_free_skb(skb);
  247. return;
  248. }
  249. ifp->stats.rx_bytes += skb->len;
  250. ifp->stats.rx_packets++;
  251. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  252. if (in_interrupt())
  253. netif_rx(skb);
  254. else
  255. /* If the receive is not processed inside an ISR,
  256. * the softirqd must be woken explicitly to service
  257. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  258. */
  259. netif_rx_ni(skb);
  260. }
  261. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  262. u8 start, u8 end,
  263. struct sk_buff_head *skb_list)
  264. {
  265. /* initialize return list */
  266. __skb_queue_head_init(skb_list);
  267. if (rfi->pend_pkts == 0) {
  268. brcmf_dbg(INFO, "no packets in reorder queue\n");
  269. return;
  270. }
  271. do {
  272. if (rfi->pktslots[start]) {
  273. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  274. rfi->pktslots[start] = NULL;
  275. }
  276. start++;
  277. if (start > rfi->max_idx)
  278. start = 0;
  279. } while (start != end);
  280. rfi->pend_pkts -= skb_queue_len(skb_list);
  281. }
  282. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  283. struct sk_buff *pkt)
  284. {
  285. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  286. struct brcmf_ampdu_rx_reorder *rfi;
  287. struct sk_buff_head reorder_list;
  288. struct sk_buff *pnext;
  289. u8 flags;
  290. u32 buf_size;
  291. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  292. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  293. /* validate flags and flow id */
  294. if (flags == 0xFF) {
  295. brcmf_err("invalid flags...so ignore this packet\n");
  296. brcmf_netif_rx(ifp, pkt);
  297. return;
  298. }
  299. rfi = ifp->drvr->reorder_flows[flow_id];
  300. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  301. brcmf_dbg(INFO, "flow-%d: delete\n",
  302. flow_id);
  303. if (rfi == NULL) {
  304. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  305. flow_id);
  306. brcmf_netif_rx(ifp, pkt);
  307. return;
  308. }
  309. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  310. &reorder_list);
  311. /* add the last packet */
  312. __skb_queue_tail(&reorder_list, pkt);
  313. kfree(rfi);
  314. ifp->drvr->reorder_flows[flow_id] = NULL;
  315. goto netif_rx;
  316. }
  317. /* from here on we need a flow reorder instance */
  318. if (rfi == NULL) {
  319. buf_size = sizeof(*rfi);
  320. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  321. buf_size += (max_idx + 1) * sizeof(pkt);
  322. /* allocate space for flow reorder info */
  323. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  324. flow_id, max_idx);
  325. rfi = kzalloc(buf_size, GFP_ATOMIC);
  326. if (rfi == NULL) {
  327. brcmf_err("failed to alloc buffer\n");
  328. brcmf_netif_rx(ifp, pkt);
  329. return;
  330. }
  331. ifp->drvr->reorder_flows[flow_id] = rfi;
  332. rfi->pktslots = (struct sk_buff **)(rfi+1);
  333. rfi->max_idx = max_idx;
  334. }
  335. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  336. if (rfi->pend_pkts) {
  337. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  338. rfi->exp_idx,
  339. &reorder_list);
  340. WARN_ON(rfi->pend_pkts);
  341. } else {
  342. __skb_queue_head_init(&reorder_list);
  343. }
  344. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  345. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  346. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  347. rfi->pktslots[rfi->cur_idx] = pkt;
  348. rfi->pend_pkts++;
  349. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  350. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  351. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  352. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  353. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  354. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  355. /* still in the current hole */
  356. /* enqueue the current on the buffer chain */
  357. if (rfi->pktslots[cur_idx] != NULL) {
  358. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  359. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  360. rfi->pktslots[cur_idx] = NULL;
  361. }
  362. rfi->pktslots[cur_idx] = pkt;
  363. rfi->pend_pkts++;
  364. rfi->cur_idx = cur_idx;
  365. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  366. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  367. /* can return now as there is no reorder
  368. * list to process.
  369. */
  370. return;
  371. }
  372. if (rfi->exp_idx == cur_idx) {
  373. if (rfi->pktslots[cur_idx] != NULL) {
  374. brcmf_dbg(INFO, "error buffer pending..free it\n");
  375. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  376. rfi->pktslots[cur_idx] = NULL;
  377. }
  378. rfi->pktslots[cur_idx] = pkt;
  379. rfi->pend_pkts++;
  380. /* got the expected one. flush from current to expected
  381. * and update expected
  382. */
  383. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  384. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  385. rfi->cur_idx = cur_idx;
  386. rfi->exp_idx = exp_idx;
  387. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  388. &reorder_list);
  389. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  390. flow_id, skb_queue_len(&reorder_list),
  391. rfi->pend_pkts);
  392. } else {
  393. u8 end_idx;
  394. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  395. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  396. cur_idx, exp_idx);
  397. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  398. end_idx = rfi->exp_idx;
  399. else
  400. end_idx = exp_idx;
  401. /* flush pkts first */
  402. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  403. &reorder_list);
  404. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  405. __skb_queue_tail(&reorder_list, pkt);
  406. } else {
  407. rfi->pktslots[cur_idx] = pkt;
  408. rfi->pend_pkts++;
  409. }
  410. rfi->exp_idx = exp_idx;
  411. rfi->cur_idx = cur_idx;
  412. }
  413. } else {
  414. /* explicity window move updating the expected index */
  415. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  416. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  417. flow_id, flags, rfi->exp_idx, exp_idx);
  418. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  419. end_idx = rfi->exp_idx;
  420. else
  421. end_idx = exp_idx;
  422. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  423. &reorder_list);
  424. __skb_queue_tail(&reorder_list, pkt);
  425. /* set the new expected idx */
  426. rfi->exp_idx = exp_idx;
  427. }
  428. netif_rx:
  429. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  430. __skb_unlink(pkt, &reorder_list);
  431. brcmf_netif_rx(ifp, pkt);
  432. }
  433. }
  434. void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
  435. {
  436. struct sk_buff *skb, *pnext;
  437. struct brcmf_if *ifp;
  438. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  439. struct brcmf_pub *drvr = bus_if->drvr;
  440. struct brcmf_skb_reorder_data *rd;
  441. u8 ifidx;
  442. int ret;
  443. brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
  444. skb_queue_len(skb_list));
  445. skb_queue_walk_safe(skb_list, skb, pnext) {
  446. skb_unlink(skb, skb_list);
  447. /* process and remove protocol-specific header */
  448. ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
  449. ifp = drvr->iflist[ifidx];
  450. if (ret || !ifp || !ifp->ndev) {
  451. if ((ret != -ENODATA) && ifp)
  452. ifp->stats.rx_errors++;
  453. brcmu_pkt_buf_free_skb(skb);
  454. continue;
  455. }
  456. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  457. if (rd->reorder)
  458. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  459. else
  460. brcmf_netif_rx(ifp, skb);
  461. }
  462. }
  463. void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
  464. bool success)
  465. {
  466. struct brcmf_if *ifp;
  467. struct ethhdr *eh;
  468. u8 ifidx;
  469. u16 type;
  470. int res;
  471. res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
  472. ifp = drvr->iflist[ifidx];
  473. if (!ifp)
  474. goto done;
  475. if (res == 0) {
  476. eh = (struct ethhdr *)(txp->data);
  477. type = ntohs(eh->h_proto);
  478. if (type == ETH_P_PAE) {
  479. atomic_dec(&ifp->pend_8021x_cnt);
  480. if (waitqueue_active(&ifp->pend_8021x_wait))
  481. wake_up(&ifp->pend_8021x_wait);
  482. }
  483. }
  484. if (!success)
  485. ifp->stats.tx_errors++;
  486. done:
  487. brcmu_pkt_buf_free_skb(txp);
  488. }
  489. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  490. {
  491. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  492. struct brcmf_pub *drvr = bus_if->drvr;
  493. /* await txstatus signal for firmware if active */
  494. if (brcmf_fws_fc_active(drvr->fws)) {
  495. if (!success)
  496. brcmf_fws_bustxfail(drvr->fws, txp);
  497. } else {
  498. brcmf_txfinalize(drvr, txp, success);
  499. }
  500. }
  501. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  502. {
  503. struct brcmf_if *ifp = netdev_priv(ndev);
  504. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  505. return &ifp->stats;
  506. }
  507. /*
  508. * Set current toe component enables in toe_ol iovar,
  509. * and set toe global enable iovar
  510. */
  511. static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
  512. {
  513. s32 err;
  514. err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
  515. if (err < 0) {
  516. brcmf_err("Setting toe_ol failed, %d\n", err);
  517. return err;
  518. }
  519. err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
  520. if (err < 0)
  521. brcmf_err("Setting toe failed, %d\n", err);
  522. return err;
  523. }
  524. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  525. struct ethtool_drvinfo *info)
  526. {
  527. struct brcmf_if *ifp = netdev_priv(ndev);
  528. struct brcmf_pub *drvr = ifp->drvr;
  529. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  530. snprintf(info->version, sizeof(info->version), "%lu",
  531. drvr->drv_version);
  532. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  533. sizeof(info->bus_info));
  534. }
  535. static const struct ethtool_ops brcmf_ethtool_ops = {
  536. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  537. };
  538. static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
  539. {
  540. struct brcmf_pub *drvr = ifp->drvr;
  541. struct ethtool_drvinfo info;
  542. char drvname[sizeof(info.driver)];
  543. u32 cmd;
  544. struct ethtool_value edata;
  545. u32 toe_cmpnt, csum_dir;
  546. int ret;
  547. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  548. /* all ethtool calls start with a cmd word */
  549. if (copy_from_user(&cmd, uaddr, sizeof(u32)))
  550. return -EFAULT;
  551. switch (cmd) {
  552. case ETHTOOL_GDRVINFO:
  553. /* Copy out any request driver name */
  554. if (copy_from_user(&info, uaddr, sizeof(info)))
  555. return -EFAULT;
  556. strncpy(drvname, info.driver, sizeof(info.driver));
  557. drvname[sizeof(info.driver) - 1] = '\0';
  558. /* clear struct for return */
  559. memset(&info, 0, sizeof(info));
  560. info.cmd = cmd;
  561. /* if requested, identify ourselves */
  562. if (strcmp(drvname, "?dhd") == 0) {
  563. sprintf(info.driver, "dhd");
  564. strcpy(info.version, BRCMF_VERSION_STR);
  565. }
  566. /* report dongle driver type */
  567. else
  568. sprintf(info.driver, "wl");
  569. sprintf(info.version, "%lu", drvr->drv_version);
  570. if (copy_to_user(uaddr, &info, sizeof(info)))
  571. return -EFAULT;
  572. brcmf_dbg(TRACE, "given %*s, returning %s\n",
  573. (int)sizeof(drvname), drvname, info.driver);
  574. break;
  575. /* Get toe offload components from dongle */
  576. case ETHTOOL_GRXCSUM:
  577. case ETHTOOL_GTXCSUM:
  578. ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
  579. if (ret < 0)
  580. return ret;
  581. csum_dir =
  582. (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
  583. edata.cmd = cmd;
  584. edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
  585. if (copy_to_user(uaddr, &edata, sizeof(edata)))
  586. return -EFAULT;
  587. break;
  588. /* Set toe offload components in dongle */
  589. case ETHTOOL_SRXCSUM:
  590. case ETHTOOL_STXCSUM:
  591. if (copy_from_user(&edata, uaddr, sizeof(edata)))
  592. return -EFAULT;
  593. /* Read the current settings, update and write back */
  594. ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
  595. if (ret < 0)
  596. return ret;
  597. csum_dir =
  598. (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
  599. if (edata.data != 0)
  600. toe_cmpnt |= csum_dir;
  601. else
  602. toe_cmpnt &= ~csum_dir;
  603. ret = brcmf_toe_set(ifp, toe_cmpnt);
  604. if (ret < 0)
  605. return ret;
  606. /* If setting TX checksum mode, tell Linux the new mode */
  607. if (cmd == ETHTOOL_STXCSUM) {
  608. if (edata.data)
  609. ifp->ndev->features |= NETIF_F_IP_CSUM;
  610. else
  611. ifp->ndev->features &= ~NETIF_F_IP_CSUM;
  612. }
  613. break;
  614. default:
  615. return -EOPNOTSUPP;
  616. }
  617. return 0;
  618. }
  619. static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
  620. int cmd)
  621. {
  622. struct brcmf_if *ifp = netdev_priv(ndev);
  623. struct brcmf_pub *drvr = ifp->drvr;
  624. brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
  625. if (!drvr->iflist[ifp->bssidx])
  626. return -1;
  627. if (cmd == SIOCETHTOOL)
  628. return brcmf_ethtool(ifp, ifr->ifr_data);
  629. return -EOPNOTSUPP;
  630. }
  631. static int brcmf_netdev_stop(struct net_device *ndev)
  632. {
  633. struct brcmf_if *ifp = netdev_priv(ndev);
  634. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  635. brcmf_cfg80211_down(ndev);
  636. /* Set state and stop OS transmissions */
  637. netif_stop_queue(ndev);
  638. return 0;
  639. }
  640. static int brcmf_netdev_open(struct net_device *ndev)
  641. {
  642. struct brcmf_if *ifp = netdev_priv(ndev);
  643. struct brcmf_pub *drvr = ifp->drvr;
  644. struct brcmf_bus *bus_if = drvr->bus_if;
  645. u32 toe_ol;
  646. s32 ret = 0;
  647. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  648. /* If bus is not ready, can't continue */
  649. if (bus_if->state != BRCMF_BUS_DATA) {
  650. brcmf_err("failed bus is not ready\n");
  651. return -EAGAIN;
  652. }
  653. atomic_set(&ifp->pend_8021x_cnt, 0);
  654. /* Get current TOE mode from dongle */
  655. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  656. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  657. ndev->features |= NETIF_F_IP_CSUM;
  658. else
  659. ndev->features &= ~NETIF_F_IP_CSUM;
  660. /* Allow transmit calls */
  661. netif_start_queue(ndev);
  662. if (brcmf_cfg80211_up(ndev)) {
  663. brcmf_err("failed to bring up cfg80211\n");
  664. return -1;
  665. }
  666. return ret;
  667. }
  668. static const struct net_device_ops brcmf_netdev_ops_pri = {
  669. .ndo_open = brcmf_netdev_open,
  670. .ndo_stop = brcmf_netdev_stop,
  671. .ndo_get_stats = brcmf_netdev_get_stats,
  672. .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
  673. .ndo_start_xmit = brcmf_netdev_start_xmit,
  674. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  675. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  676. };
  677. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  678. {
  679. struct brcmf_pub *drvr = ifp->drvr;
  680. struct net_device *ndev;
  681. s32 err;
  682. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  683. ifp->mac_addr);
  684. ndev = ifp->ndev;
  685. /* set appropriate operations */
  686. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  687. ndev->hard_header_len += drvr->hdrlen;
  688. ndev->ethtool_ops = &brcmf_ethtool_ops;
  689. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  690. drvr->hdrlen;
  691. /* set the mac address */
  692. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  693. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  694. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  695. if (rtnl_locked)
  696. err = register_netdevice(ndev);
  697. else
  698. err = register_netdev(ndev);
  699. if (err != 0) {
  700. brcmf_err("couldn't register the net device\n");
  701. goto fail;
  702. }
  703. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  704. ndev->destructor = free_netdev;
  705. return 0;
  706. fail:
  707. drvr->iflist[ifp->bssidx] = NULL;
  708. ndev->netdev_ops = NULL;
  709. free_netdev(ndev);
  710. return -EBADE;
  711. }
  712. static int brcmf_net_p2p_open(struct net_device *ndev)
  713. {
  714. brcmf_dbg(TRACE, "Enter\n");
  715. return brcmf_cfg80211_up(ndev);
  716. }
  717. static int brcmf_net_p2p_stop(struct net_device *ndev)
  718. {
  719. brcmf_dbg(TRACE, "Enter\n");
  720. return brcmf_cfg80211_down(ndev);
  721. }
  722. static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
  723. struct ifreq *ifr, int cmd)
  724. {
  725. brcmf_dbg(TRACE, "Enter\n");
  726. return 0;
  727. }
  728. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  729. struct net_device *ndev)
  730. {
  731. if (skb)
  732. dev_kfree_skb_any(skb);
  733. return NETDEV_TX_OK;
  734. }
  735. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  736. .ndo_open = brcmf_net_p2p_open,
  737. .ndo_stop = brcmf_net_p2p_stop,
  738. .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
  739. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  740. };
  741. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  742. {
  743. struct net_device *ndev;
  744. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  745. ifp->mac_addr);
  746. ndev = ifp->ndev;
  747. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  748. /* set the mac address */
  749. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  750. if (register_netdev(ndev) != 0) {
  751. brcmf_err("couldn't register the p2p net device\n");
  752. goto fail;
  753. }
  754. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  755. return 0;
  756. fail:
  757. ifp->drvr->iflist[ifp->bssidx] = NULL;
  758. ndev->netdev_ops = NULL;
  759. free_netdev(ndev);
  760. return -EBADE;
  761. }
  762. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  763. char *name, u8 *mac_addr)
  764. {
  765. struct brcmf_if *ifp;
  766. struct net_device *ndev;
  767. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  768. ifp = drvr->iflist[bssidx];
  769. /*
  770. * Delete the existing interface before overwriting it
  771. * in case we missed the BRCMF_E_IF_DEL event.
  772. */
  773. if (ifp) {
  774. brcmf_err("ERROR: netdev:%s already exists\n",
  775. ifp->ndev->name);
  776. if (ifidx) {
  777. netif_stop_queue(ifp->ndev);
  778. unregister_netdev(ifp->ndev);
  779. free_netdev(ifp->ndev);
  780. drvr->iflist[bssidx] = NULL;
  781. } else {
  782. brcmf_err("ignore IF event\n");
  783. return ERR_PTR(-EINVAL);
  784. }
  785. }
  786. if (!brcmf_p2p_enable && bssidx == 1) {
  787. /* this is P2P_DEVICE interface */
  788. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  789. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  790. if (!ifp)
  791. return ERR_PTR(-ENOMEM);
  792. } else {
  793. brcmf_dbg(INFO, "allocate netdev interface\n");
  794. /* Allocate netdev, including space for private structure */
  795. ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
  796. if (!ndev)
  797. return ERR_PTR(-ENOMEM);
  798. ifp = netdev_priv(ndev);
  799. ifp->ndev = ndev;
  800. }
  801. ifp->drvr = drvr;
  802. drvr->iflist[bssidx] = ifp;
  803. ifp->ifidx = ifidx;
  804. ifp->bssidx = bssidx;
  805. init_waitqueue_head(&ifp->pend_8021x_wait);
  806. spin_lock_init(&ifp->netif_stop_lock);
  807. if (mac_addr != NULL)
  808. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  809. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  810. current->pid, name, ifp->mac_addr);
  811. return ifp;
  812. }
  813. void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  814. {
  815. struct brcmf_if *ifp;
  816. ifp = drvr->iflist[bssidx];
  817. drvr->iflist[bssidx] = NULL;
  818. if (!ifp) {
  819. brcmf_err("Null interface, idx=%d\n", bssidx);
  820. return;
  821. }
  822. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  823. if (ifp->ndev) {
  824. if (bssidx == 0) {
  825. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  826. rtnl_lock();
  827. brcmf_netdev_stop(ifp->ndev);
  828. rtnl_unlock();
  829. }
  830. } else {
  831. netif_stop_queue(ifp->ndev);
  832. }
  833. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  834. cancel_work_sync(&ifp->setmacaddr_work);
  835. cancel_work_sync(&ifp->multicast_work);
  836. }
  837. /* unregister will take care of freeing it */
  838. unregister_netdev(ifp->ndev);
  839. if (bssidx == 0)
  840. brcmf_cfg80211_detach(drvr->config);
  841. } else {
  842. kfree(ifp);
  843. }
  844. }
  845. int brcmf_attach(uint bus_hdrlen, struct device *dev)
  846. {
  847. struct brcmf_pub *drvr = NULL;
  848. int ret = 0;
  849. brcmf_dbg(TRACE, "Enter\n");
  850. /* Allocate primary brcmf_info */
  851. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  852. if (!drvr)
  853. return -ENOMEM;
  854. mutex_init(&drvr->proto_block);
  855. /* Link to bus module */
  856. drvr->hdrlen = bus_hdrlen;
  857. drvr->bus_if = dev_get_drvdata(dev);
  858. drvr->bus_if->drvr = drvr;
  859. /* create device debugfs folder */
  860. brcmf_debugfs_attach(drvr);
  861. /* Attach and link in the protocol */
  862. ret = brcmf_proto_attach(drvr);
  863. if (ret != 0) {
  864. brcmf_err("brcmf_prot_attach failed\n");
  865. goto fail;
  866. }
  867. /* attach firmware event handler */
  868. brcmf_fweh_attach(drvr);
  869. INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
  870. return ret;
  871. fail:
  872. brcmf_detach(dev);
  873. return ret;
  874. }
  875. int brcmf_bus_start(struct device *dev)
  876. {
  877. int ret = -1;
  878. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  879. struct brcmf_pub *drvr = bus_if->drvr;
  880. struct brcmf_if *ifp;
  881. struct brcmf_if *p2p_ifp;
  882. brcmf_dbg(TRACE, "\n");
  883. /* Bring up the bus */
  884. ret = brcmf_bus_init(bus_if);
  885. if (ret != 0) {
  886. brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
  887. return ret;
  888. }
  889. /* add primary networking interface */
  890. ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
  891. if (IS_ERR(ifp))
  892. return PTR_ERR(ifp);
  893. if (brcmf_p2p_enable)
  894. p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
  895. else
  896. p2p_ifp = NULL;
  897. if (IS_ERR(p2p_ifp))
  898. p2p_ifp = NULL;
  899. /* signal bus ready */
  900. bus_if->state = BRCMF_BUS_DATA;
  901. /* Bus is ready, do any initialization */
  902. ret = brcmf_c_preinit_dcmds(ifp);
  903. if (ret < 0)
  904. goto fail;
  905. ret = brcmf_fws_init(drvr);
  906. if (ret < 0)
  907. goto fail;
  908. brcmf_fws_add_interface(ifp);
  909. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
  910. if (drvr->config == NULL) {
  911. ret = -ENOMEM;
  912. goto fail;
  913. }
  914. ret = brcmf_fweh_activate_events(ifp);
  915. if (ret < 0)
  916. goto fail;
  917. ret = brcmf_net_attach(ifp, false);
  918. fail:
  919. if (ret < 0) {
  920. brcmf_err("failed: %d\n", ret);
  921. if (drvr->config)
  922. brcmf_cfg80211_detach(drvr->config);
  923. if (drvr->fws) {
  924. brcmf_fws_del_interface(ifp);
  925. brcmf_fws_deinit(drvr);
  926. }
  927. if (drvr->iflist[0]) {
  928. free_netdev(ifp->ndev);
  929. drvr->iflist[0] = NULL;
  930. }
  931. if (p2p_ifp) {
  932. free_netdev(p2p_ifp->ndev);
  933. drvr->iflist[1] = NULL;
  934. }
  935. return ret;
  936. }
  937. if ((brcmf_p2p_enable) && (p2p_ifp))
  938. if (brcmf_net_p2p_attach(p2p_ifp) < 0)
  939. brcmf_p2p_enable = 0;
  940. return 0;
  941. }
  942. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  943. {
  944. brcmf_dbg(TRACE, "Enter\n");
  945. if (drvr) {
  946. /* Stop the protocol module */
  947. brcmf_proto_stop(drvr);
  948. /* Stop the bus module */
  949. brcmf_bus_stop(drvr->bus_if);
  950. }
  951. }
  952. void brcmf_dev_reset(struct device *dev)
  953. {
  954. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  955. struct brcmf_pub *drvr = bus_if->drvr;
  956. if (drvr == NULL)
  957. return;
  958. if (drvr->iflist[0])
  959. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  960. }
  961. void brcmf_detach(struct device *dev)
  962. {
  963. s32 i;
  964. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  965. struct brcmf_pub *drvr = bus_if->drvr;
  966. brcmf_dbg(TRACE, "Enter\n");
  967. if (drvr == NULL)
  968. return;
  969. /* stop firmware event handling */
  970. brcmf_fweh_detach(drvr);
  971. /* make sure primary interface removed last */
  972. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  973. if (drvr->iflist[i]) {
  974. brcmf_fws_del_interface(drvr->iflist[i]);
  975. brcmf_del_if(drvr, i);
  976. }
  977. brcmf_bus_detach(drvr);
  978. if (drvr->prot)
  979. brcmf_proto_detach(drvr);
  980. brcmf_fws_deinit(drvr);
  981. brcmf_debugfs_detach(drvr);
  982. bus_if->drvr = NULL;
  983. kfree(drvr);
  984. }
  985. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  986. {
  987. return atomic_read(&ifp->pend_8021x_cnt);
  988. }
  989. int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
  990. {
  991. struct brcmf_if *ifp = netdev_priv(ndev);
  992. int err;
  993. err = wait_event_timeout(ifp->pend_8021x_wait,
  994. !brcmf_get_pend_8021x_cnt(ifp),
  995. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  996. WARN_ON(!err);
  997. return !err;
  998. }
  999. /*
  1000. * return chip id and rev of the device encoded in u32.
  1001. */
  1002. u32 brcmf_get_chip_info(struct brcmf_if *ifp)
  1003. {
  1004. struct brcmf_bus *bus = ifp->drvr->bus_if;
  1005. return bus->chip << 4 | bus->chiprev;
  1006. }
  1007. static void brcmf_driver_register(struct work_struct *work)
  1008. {
  1009. #ifdef CONFIG_BRCMFMAC_SDIO
  1010. brcmf_sdio_register();
  1011. #endif
  1012. #ifdef CONFIG_BRCMFMAC_USB
  1013. brcmf_usb_register();
  1014. #endif
  1015. }
  1016. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  1017. static int __init brcmfmac_module_init(void)
  1018. {
  1019. brcmf_debugfs_init();
  1020. #ifdef CONFIG_BRCMFMAC_SDIO
  1021. brcmf_sdio_init();
  1022. #endif
  1023. if (!schedule_work(&brcmf_driver_work))
  1024. return -EBUSY;
  1025. return 0;
  1026. }
  1027. static void __exit brcmfmac_module_exit(void)
  1028. {
  1029. cancel_work_sync(&brcmf_driver_work);
  1030. #ifdef CONFIG_BRCMFMAC_SDIO
  1031. brcmf_sdio_exit();
  1032. #endif
  1033. #ifdef CONFIG_BRCMFMAC_USB
  1034. brcmf_usb_exit();
  1035. #endif
  1036. brcmf_debugfs_exit();
  1037. }
  1038. module_init(brcmfmac_module_init);
  1039. module_exit(brcmfmac_module_exit);