dhd_linux.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "dhd.h"
  24. #include "dhd_bus.h"
  25. #include "dhd_proto.h"
  26. #include "dhd_dbg.h"
  27. #include "fwil_types.h"
  28. #include "p2p.h"
  29. #include "wl_cfg80211.h"
  30. #include "fwil.h"
  31. #include "fwsignal.h"
  32. MODULE_AUTHOR("Broadcom Corporation");
  33. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  34. MODULE_LICENSE("Dual BSD/GPL");
  35. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  36. /* AMPDU rx reordering definitions */
  37. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  38. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  39. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  40. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  41. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  42. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  43. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  44. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  45. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  46. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  47. /* Error bits */
  48. int brcmf_msg_level;
  49. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  50. MODULE_PARM_DESC(debug, "level of debug output");
  51. /* P2P0 enable */
  52. static int brcmf_p2p_enable;
  53. #ifdef CONFIG_BRCMDBG
  54. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  55. MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
  56. #endif
  57. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  58. {
  59. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  60. brcmf_err("ifidx %d out of range\n", ifidx);
  61. return "<if_bad>";
  62. }
  63. if (drvr->iflist[ifidx] == NULL) {
  64. brcmf_err("null i/f %d\n", ifidx);
  65. return "<if_null>";
  66. }
  67. if (drvr->iflist[ifidx]->ndev)
  68. return drvr->iflist[ifidx]->ndev->name;
  69. return "<if_none>";
  70. }
  71. static void _brcmf_set_multicast_list(struct work_struct *work)
  72. {
  73. struct brcmf_if *ifp;
  74. struct net_device *ndev;
  75. struct netdev_hw_addr *ha;
  76. u32 cmd_value, cnt;
  77. __le32 cnt_le;
  78. char *buf, *bufp;
  79. u32 buflen;
  80. s32 err;
  81. ifp = container_of(work, struct brcmf_if, multicast_work);
  82. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  83. ndev = ifp->ndev;
  84. /* Determine initial value of allmulti flag */
  85. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  86. /* Send down the multicast list first. */
  87. cnt = netdev_mc_count(ndev);
  88. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  89. buf = kmalloc(buflen, GFP_ATOMIC);
  90. if (!buf)
  91. return;
  92. bufp = buf;
  93. cnt_le = cpu_to_le32(cnt);
  94. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  95. bufp += sizeof(cnt_le);
  96. netdev_for_each_mc_addr(ha, ndev) {
  97. if (!cnt)
  98. break;
  99. memcpy(bufp, ha->addr, ETH_ALEN);
  100. bufp += ETH_ALEN;
  101. cnt--;
  102. }
  103. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  104. if (err < 0) {
  105. brcmf_err("Setting mcast_list failed, %d\n", err);
  106. cmd_value = cnt ? true : cmd_value;
  107. }
  108. kfree(buf);
  109. /*
  110. * Now send the allmulti setting. This is based on the setting in the
  111. * net_device flags, but might be modified above to be turned on if we
  112. * were trying to set some addresses and dongle rejected it...
  113. */
  114. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  115. if (err < 0)
  116. brcmf_err("Setting allmulti failed, %d\n", err);
  117. /*Finally, pick up the PROMISC flag */
  118. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  119. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  120. if (err < 0)
  121. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  122. err);
  123. }
  124. static void
  125. _brcmf_set_mac_address(struct work_struct *work)
  126. {
  127. struct brcmf_if *ifp;
  128. s32 err;
  129. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  130. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  131. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  132. ETH_ALEN);
  133. if (err < 0) {
  134. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  135. } else {
  136. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  137. ifp->mac_addr);
  138. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  139. }
  140. }
  141. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  142. {
  143. struct brcmf_if *ifp = netdev_priv(ndev);
  144. struct sockaddr *sa = (struct sockaddr *)addr;
  145. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  146. schedule_work(&ifp->setmacaddr_work);
  147. return 0;
  148. }
  149. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  150. {
  151. struct brcmf_if *ifp = netdev_priv(ndev);
  152. schedule_work(&ifp->multicast_work);
  153. }
  154. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  155. struct net_device *ndev)
  156. {
  157. int ret;
  158. struct brcmf_if *ifp = netdev_priv(ndev);
  159. struct brcmf_pub *drvr = ifp->drvr;
  160. struct ethhdr *eh;
  161. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  162. /* Can the device send data? */
  163. if (drvr->bus_if->state != BRCMF_BUS_DATA) {
  164. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  165. netif_stop_queue(ndev);
  166. dev_kfree_skb(skb);
  167. ret = -ENODEV;
  168. goto done;
  169. }
  170. if (!drvr->iflist[ifp->bssidx]) {
  171. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  172. netif_stop_queue(ndev);
  173. dev_kfree_skb(skb);
  174. ret = -ENODEV;
  175. goto done;
  176. }
  177. /* Make sure there's enough room for any header */
  178. if (skb_headroom(skb) < drvr->hdrlen) {
  179. struct sk_buff *skb2;
  180. brcmf_dbg(INFO, "%s: insufficient headroom\n",
  181. brcmf_ifname(drvr, ifp->bssidx));
  182. drvr->bus_if->tx_realloc++;
  183. skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
  184. dev_kfree_skb(skb);
  185. skb = skb2;
  186. if (skb == NULL) {
  187. brcmf_err("%s: skb_realloc_headroom failed\n",
  188. brcmf_ifname(drvr, ifp->bssidx));
  189. ret = -ENOMEM;
  190. goto done;
  191. }
  192. }
  193. /* validate length for ether packet */
  194. if (skb->len < sizeof(*eh)) {
  195. ret = -EINVAL;
  196. dev_kfree_skb(skb);
  197. goto done;
  198. }
  199. ret = brcmf_fws_process_skb(ifp, skb);
  200. done:
  201. if (ret) {
  202. ifp->stats.tx_dropped++;
  203. } else {
  204. ifp->stats.tx_packets++;
  205. ifp->stats.tx_bytes += skb->len;
  206. }
  207. /* Return ok: we always eat the packet */
  208. return NETDEV_TX_OK;
  209. }
  210. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  211. enum brcmf_netif_stop_reason reason, bool state)
  212. {
  213. unsigned long flags;
  214. if (!ifp || !ifp->ndev)
  215. return;
  216. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  217. ifp->bssidx, ifp->netif_stop, reason, state);
  218. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  219. if (state) {
  220. if (!ifp->netif_stop)
  221. netif_stop_queue(ifp->ndev);
  222. ifp->netif_stop |= reason;
  223. } else {
  224. ifp->netif_stop &= ~reason;
  225. if (!ifp->netif_stop)
  226. netif_wake_queue(ifp->ndev);
  227. }
  228. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  229. }
  230. void brcmf_txflowblock(struct device *dev, bool state)
  231. {
  232. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  233. struct brcmf_pub *drvr = bus_if->drvr;
  234. brcmf_dbg(TRACE, "Enter\n");
  235. brcmf_fws_bus_blocked(drvr, state);
  236. }
  237. static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  238. {
  239. skb->dev = ifp->ndev;
  240. skb->protocol = eth_type_trans(skb, skb->dev);
  241. if (skb->pkt_type == PACKET_MULTICAST)
  242. ifp->stats.multicast++;
  243. /* Process special event packets */
  244. brcmf_fweh_process_skb(ifp->drvr, skb);
  245. if (!(ifp->ndev->flags & IFF_UP)) {
  246. brcmu_pkt_buf_free_skb(skb);
  247. return;
  248. }
  249. ifp->stats.rx_bytes += skb->len;
  250. ifp->stats.rx_packets++;
  251. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  252. if (in_interrupt())
  253. netif_rx(skb);
  254. else
  255. /* If the receive is not processed inside an ISR,
  256. * the softirqd must be woken explicitly to service
  257. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  258. */
  259. netif_rx_ni(skb);
  260. }
  261. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  262. u8 start, u8 end,
  263. struct sk_buff_head *skb_list)
  264. {
  265. /* initialize return list */
  266. __skb_queue_head_init(skb_list);
  267. if (rfi->pend_pkts == 0) {
  268. brcmf_dbg(INFO, "no packets in reorder queue\n");
  269. return;
  270. }
  271. do {
  272. if (rfi->pktslots[start]) {
  273. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  274. rfi->pktslots[start] = NULL;
  275. }
  276. start++;
  277. if (start > rfi->max_idx)
  278. start = 0;
  279. } while (start != end);
  280. rfi->pend_pkts -= skb_queue_len(skb_list);
  281. }
  282. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  283. struct sk_buff *pkt)
  284. {
  285. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  286. struct brcmf_ampdu_rx_reorder *rfi;
  287. struct sk_buff_head reorder_list;
  288. struct sk_buff *pnext;
  289. u8 flags;
  290. u32 buf_size;
  291. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  292. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  293. /* validate flags and flow id */
  294. if (flags == 0xFF) {
  295. brcmf_err("invalid flags...so ignore this packet\n");
  296. brcmf_netif_rx(ifp, pkt);
  297. return;
  298. }
  299. rfi = ifp->drvr->reorder_flows[flow_id];
  300. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  301. brcmf_dbg(INFO, "flow-%d: delete\n",
  302. flow_id);
  303. if (rfi == NULL) {
  304. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  305. flow_id);
  306. brcmf_netif_rx(ifp, pkt);
  307. return;
  308. }
  309. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  310. &reorder_list);
  311. /* add the last packet */
  312. __skb_queue_tail(&reorder_list, pkt);
  313. kfree(rfi);
  314. ifp->drvr->reorder_flows[flow_id] = NULL;
  315. goto netif_rx;
  316. }
  317. /* from here on we need a flow reorder instance */
  318. if (rfi == NULL) {
  319. buf_size = sizeof(*rfi);
  320. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  321. buf_size += (max_idx + 1) * sizeof(pkt);
  322. /* allocate space for flow reorder info */
  323. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  324. flow_id, max_idx);
  325. rfi = kzalloc(buf_size, GFP_ATOMIC);
  326. if (rfi == NULL) {
  327. brcmf_err("failed to alloc buffer\n");
  328. brcmf_netif_rx(ifp, pkt);
  329. return;
  330. }
  331. ifp->drvr->reorder_flows[flow_id] = rfi;
  332. rfi->pktslots = (struct sk_buff **)(rfi+1);
  333. rfi->max_idx = max_idx;
  334. }
  335. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  336. if (rfi->pend_pkts) {
  337. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  338. rfi->exp_idx,
  339. &reorder_list);
  340. WARN_ON(rfi->pend_pkts);
  341. } else {
  342. __skb_queue_head_init(&reorder_list);
  343. }
  344. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  345. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  346. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  347. rfi->pktslots[rfi->cur_idx] = pkt;
  348. rfi->pend_pkts++;
  349. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  350. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  351. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  352. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  353. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  354. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  355. /* still in the current hole */
  356. /* enqueue the current on the buffer chain */
  357. if (rfi->pktslots[cur_idx] != NULL) {
  358. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  359. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  360. rfi->pktslots[cur_idx] = NULL;
  361. }
  362. rfi->pktslots[cur_idx] = pkt;
  363. rfi->pend_pkts++;
  364. rfi->cur_idx = cur_idx;
  365. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  366. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  367. /* can return now as there is no reorder
  368. * list to process.
  369. */
  370. return;
  371. }
  372. if (rfi->exp_idx == cur_idx) {
  373. if (rfi->pktslots[cur_idx] != NULL) {
  374. brcmf_dbg(INFO, "error buffer pending..free it\n");
  375. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  376. rfi->pktslots[cur_idx] = NULL;
  377. }
  378. rfi->pktslots[cur_idx] = pkt;
  379. rfi->pend_pkts++;
  380. /* got the expected one. flush from current to expected
  381. * and update expected
  382. */
  383. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  384. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  385. rfi->cur_idx = cur_idx;
  386. rfi->exp_idx = exp_idx;
  387. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  388. &reorder_list);
  389. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  390. flow_id, skb_queue_len(&reorder_list),
  391. rfi->pend_pkts);
  392. } else {
  393. u8 end_idx;
  394. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  395. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  396. cur_idx, exp_idx);
  397. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  398. end_idx = rfi->exp_idx;
  399. else
  400. end_idx = exp_idx;
  401. /* flush pkts first */
  402. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  403. &reorder_list);
  404. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  405. __skb_queue_tail(&reorder_list, pkt);
  406. } else {
  407. rfi->pktslots[cur_idx] = pkt;
  408. rfi->pend_pkts++;
  409. }
  410. rfi->exp_idx = exp_idx;
  411. rfi->cur_idx = cur_idx;
  412. }
  413. } else {
  414. /* explicity window move updating the expected index */
  415. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  416. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  417. flow_id, flags, rfi->exp_idx, exp_idx);
  418. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  419. end_idx = rfi->exp_idx;
  420. else
  421. end_idx = exp_idx;
  422. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  423. &reorder_list);
  424. __skb_queue_tail(&reorder_list, pkt);
  425. /* set the new expected idx */
  426. rfi->exp_idx = exp_idx;
  427. }
  428. netif_rx:
  429. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  430. __skb_unlink(pkt, &reorder_list);
  431. brcmf_netif_rx(ifp, pkt);
  432. }
  433. }
  434. void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
  435. {
  436. struct brcmf_if *ifp;
  437. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  438. struct brcmf_pub *drvr = bus_if->drvr;
  439. struct brcmf_skb_reorder_data *rd;
  440. u8 ifidx;
  441. int ret;
  442. brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
  443. /* process and remove protocol-specific header */
  444. ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
  445. ifp = drvr->iflist[ifidx];
  446. if (ret || !ifp || !ifp->ndev) {
  447. if ((ret != -ENODATA) && ifp)
  448. ifp->stats.rx_errors++;
  449. brcmu_pkt_buf_free_skb(skb);
  450. return;
  451. }
  452. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  453. if (rd->reorder)
  454. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  455. else
  456. brcmf_netif_rx(ifp, skb);
  457. }
  458. void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
  459. bool success)
  460. {
  461. struct brcmf_if *ifp;
  462. struct ethhdr *eh;
  463. u8 ifidx;
  464. u16 type;
  465. int res;
  466. res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
  467. ifp = drvr->iflist[ifidx];
  468. if (!ifp)
  469. goto done;
  470. if (res == 0) {
  471. eh = (struct ethhdr *)(txp->data);
  472. type = ntohs(eh->h_proto);
  473. if (type == ETH_P_PAE) {
  474. atomic_dec(&ifp->pend_8021x_cnt);
  475. if (waitqueue_active(&ifp->pend_8021x_wait))
  476. wake_up(&ifp->pend_8021x_wait);
  477. }
  478. }
  479. if (!success)
  480. ifp->stats.tx_errors++;
  481. done:
  482. brcmu_pkt_buf_free_skb(txp);
  483. }
  484. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  485. {
  486. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  487. struct brcmf_pub *drvr = bus_if->drvr;
  488. /* await txstatus signal for firmware if active */
  489. if (brcmf_fws_fc_active(drvr->fws)) {
  490. if (!success)
  491. brcmf_fws_bustxfail(drvr->fws, txp);
  492. } else {
  493. brcmf_txfinalize(drvr, txp, success);
  494. }
  495. }
  496. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  497. {
  498. struct brcmf_if *ifp = netdev_priv(ndev);
  499. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  500. return &ifp->stats;
  501. }
  502. /*
  503. * Set current toe component enables in toe_ol iovar,
  504. * and set toe global enable iovar
  505. */
  506. static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
  507. {
  508. s32 err;
  509. err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
  510. if (err < 0) {
  511. brcmf_err("Setting toe_ol failed, %d\n", err);
  512. return err;
  513. }
  514. err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
  515. if (err < 0)
  516. brcmf_err("Setting toe failed, %d\n", err);
  517. return err;
  518. }
  519. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  520. struct ethtool_drvinfo *info)
  521. {
  522. struct brcmf_if *ifp = netdev_priv(ndev);
  523. struct brcmf_pub *drvr = ifp->drvr;
  524. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  525. snprintf(info->version, sizeof(info->version), "%lu",
  526. drvr->drv_version);
  527. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  528. sizeof(info->bus_info));
  529. }
  530. static const struct ethtool_ops brcmf_ethtool_ops = {
  531. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  532. };
  533. static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
  534. {
  535. struct brcmf_pub *drvr = ifp->drvr;
  536. struct ethtool_drvinfo info;
  537. char drvname[sizeof(info.driver)];
  538. u32 cmd;
  539. struct ethtool_value edata;
  540. u32 toe_cmpnt, csum_dir;
  541. int ret;
  542. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  543. /* all ethtool calls start with a cmd word */
  544. if (copy_from_user(&cmd, uaddr, sizeof(u32)))
  545. return -EFAULT;
  546. switch (cmd) {
  547. case ETHTOOL_GDRVINFO:
  548. /* Copy out any request driver name */
  549. if (copy_from_user(&info, uaddr, sizeof(info)))
  550. return -EFAULT;
  551. strncpy(drvname, info.driver, sizeof(info.driver));
  552. drvname[sizeof(info.driver) - 1] = '\0';
  553. /* clear struct for return */
  554. memset(&info, 0, sizeof(info));
  555. info.cmd = cmd;
  556. /* if requested, identify ourselves */
  557. if (strcmp(drvname, "?dhd") == 0) {
  558. sprintf(info.driver, "dhd");
  559. strcpy(info.version, BRCMF_VERSION_STR);
  560. }
  561. /* report dongle driver type */
  562. else
  563. sprintf(info.driver, "wl");
  564. sprintf(info.version, "%lu", drvr->drv_version);
  565. if (copy_to_user(uaddr, &info, sizeof(info)))
  566. return -EFAULT;
  567. brcmf_dbg(TRACE, "given %*s, returning %s\n",
  568. (int)sizeof(drvname), drvname, info.driver);
  569. break;
  570. /* Get toe offload components from dongle */
  571. case ETHTOOL_GRXCSUM:
  572. case ETHTOOL_GTXCSUM:
  573. ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
  574. if (ret < 0)
  575. return ret;
  576. csum_dir =
  577. (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
  578. edata.cmd = cmd;
  579. edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
  580. if (copy_to_user(uaddr, &edata, sizeof(edata)))
  581. return -EFAULT;
  582. break;
  583. /* Set toe offload components in dongle */
  584. case ETHTOOL_SRXCSUM:
  585. case ETHTOOL_STXCSUM:
  586. if (copy_from_user(&edata, uaddr, sizeof(edata)))
  587. return -EFAULT;
  588. /* Read the current settings, update and write back */
  589. ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
  590. if (ret < 0)
  591. return ret;
  592. csum_dir =
  593. (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
  594. if (edata.data != 0)
  595. toe_cmpnt |= csum_dir;
  596. else
  597. toe_cmpnt &= ~csum_dir;
  598. ret = brcmf_toe_set(ifp, toe_cmpnt);
  599. if (ret < 0)
  600. return ret;
  601. /* If setting TX checksum mode, tell Linux the new mode */
  602. if (cmd == ETHTOOL_STXCSUM) {
  603. if (edata.data)
  604. ifp->ndev->features |= NETIF_F_IP_CSUM;
  605. else
  606. ifp->ndev->features &= ~NETIF_F_IP_CSUM;
  607. }
  608. break;
  609. default:
  610. return -EOPNOTSUPP;
  611. }
  612. return 0;
  613. }
  614. static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
  615. int cmd)
  616. {
  617. struct brcmf_if *ifp = netdev_priv(ndev);
  618. struct brcmf_pub *drvr = ifp->drvr;
  619. brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
  620. if (!drvr->iflist[ifp->bssidx])
  621. return -1;
  622. if (cmd == SIOCETHTOOL)
  623. return brcmf_ethtool(ifp, ifr->ifr_data);
  624. return -EOPNOTSUPP;
  625. }
  626. static int brcmf_netdev_stop(struct net_device *ndev)
  627. {
  628. struct brcmf_if *ifp = netdev_priv(ndev);
  629. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  630. brcmf_cfg80211_down(ndev);
  631. /* Set state and stop OS transmissions */
  632. netif_stop_queue(ndev);
  633. return 0;
  634. }
  635. static int brcmf_netdev_open(struct net_device *ndev)
  636. {
  637. struct brcmf_if *ifp = netdev_priv(ndev);
  638. struct brcmf_pub *drvr = ifp->drvr;
  639. struct brcmf_bus *bus_if = drvr->bus_if;
  640. u32 toe_ol;
  641. s32 ret = 0;
  642. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  643. /* If bus is not ready, can't continue */
  644. if (bus_if->state != BRCMF_BUS_DATA) {
  645. brcmf_err("failed bus is not ready\n");
  646. return -EAGAIN;
  647. }
  648. atomic_set(&ifp->pend_8021x_cnt, 0);
  649. /* Get current TOE mode from dongle */
  650. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  651. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  652. ndev->features |= NETIF_F_IP_CSUM;
  653. else
  654. ndev->features &= ~NETIF_F_IP_CSUM;
  655. /* Allow transmit calls */
  656. netif_start_queue(ndev);
  657. if (brcmf_cfg80211_up(ndev)) {
  658. brcmf_err("failed to bring up cfg80211\n");
  659. return -1;
  660. }
  661. return ret;
  662. }
  663. static const struct net_device_ops brcmf_netdev_ops_pri = {
  664. .ndo_open = brcmf_netdev_open,
  665. .ndo_stop = brcmf_netdev_stop,
  666. .ndo_get_stats = brcmf_netdev_get_stats,
  667. .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
  668. .ndo_start_xmit = brcmf_netdev_start_xmit,
  669. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  670. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  671. };
  672. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  673. {
  674. struct brcmf_pub *drvr = ifp->drvr;
  675. struct net_device *ndev;
  676. s32 err;
  677. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  678. ifp->mac_addr);
  679. ndev = ifp->ndev;
  680. /* set appropriate operations */
  681. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  682. ndev->hard_header_len += drvr->hdrlen;
  683. ndev->ethtool_ops = &brcmf_ethtool_ops;
  684. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  685. drvr->hdrlen;
  686. /* set the mac address */
  687. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  688. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  689. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  690. if (rtnl_locked)
  691. err = register_netdevice(ndev);
  692. else
  693. err = register_netdev(ndev);
  694. if (err != 0) {
  695. brcmf_err("couldn't register the net device\n");
  696. goto fail;
  697. }
  698. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  699. ndev->destructor = free_netdev;
  700. return 0;
  701. fail:
  702. drvr->iflist[ifp->bssidx] = NULL;
  703. ndev->netdev_ops = NULL;
  704. free_netdev(ndev);
  705. return -EBADE;
  706. }
  707. static int brcmf_net_p2p_open(struct net_device *ndev)
  708. {
  709. brcmf_dbg(TRACE, "Enter\n");
  710. return brcmf_cfg80211_up(ndev);
  711. }
  712. static int brcmf_net_p2p_stop(struct net_device *ndev)
  713. {
  714. brcmf_dbg(TRACE, "Enter\n");
  715. return brcmf_cfg80211_down(ndev);
  716. }
  717. static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
  718. struct ifreq *ifr, int cmd)
  719. {
  720. brcmf_dbg(TRACE, "Enter\n");
  721. return 0;
  722. }
  723. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  724. struct net_device *ndev)
  725. {
  726. if (skb)
  727. dev_kfree_skb_any(skb);
  728. return NETDEV_TX_OK;
  729. }
  730. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  731. .ndo_open = brcmf_net_p2p_open,
  732. .ndo_stop = brcmf_net_p2p_stop,
  733. .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
  734. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  735. };
  736. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  737. {
  738. struct net_device *ndev;
  739. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  740. ifp->mac_addr);
  741. ndev = ifp->ndev;
  742. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  743. /* set the mac address */
  744. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  745. if (register_netdev(ndev) != 0) {
  746. brcmf_err("couldn't register the p2p net device\n");
  747. goto fail;
  748. }
  749. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  750. return 0;
  751. fail:
  752. ifp->drvr->iflist[ifp->bssidx] = NULL;
  753. ndev->netdev_ops = NULL;
  754. free_netdev(ndev);
  755. return -EBADE;
  756. }
  757. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  758. char *name, u8 *mac_addr)
  759. {
  760. struct brcmf_if *ifp;
  761. struct net_device *ndev;
  762. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  763. ifp = drvr->iflist[bssidx];
  764. /*
  765. * Delete the existing interface before overwriting it
  766. * in case we missed the BRCMF_E_IF_DEL event.
  767. */
  768. if (ifp) {
  769. brcmf_err("ERROR: netdev:%s already exists\n",
  770. ifp->ndev->name);
  771. if (ifidx) {
  772. netif_stop_queue(ifp->ndev);
  773. unregister_netdev(ifp->ndev);
  774. free_netdev(ifp->ndev);
  775. drvr->iflist[bssidx] = NULL;
  776. } else {
  777. brcmf_err("ignore IF event\n");
  778. return ERR_PTR(-EINVAL);
  779. }
  780. }
  781. if (!brcmf_p2p_enable && bssidx == 1) {
  782. /* this is P2P_DEVICE interface */
  783. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  784. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  785. if (!ifp)
  786. return ERR_PTR(-ENOMEM);
  787. } else {
  788. brcmf_dbg(INFO, "allocate netdev interface\n");
  789. /* Allocate netdev, including space for private structure */
  790. ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
  791. if (!ndev)
  792. return ERR_PTR(-ENOMEM);
  793. ifp = netdev_priv(ndev);
  794. ifp->ndev = ndev;
  795. }
  796. ifp->drvr = drvr;
  797. drvr->iflist[bssidx] = ifp;
  798. ifp->ifidx = ifidx;
  799. ifp->bssidx = bssidx;
  800. init_waitqueue_head(&ifp->pend_8021x_wait);
  801. spin_lock_init(&ifp->netif_stop_lock);
  802. if (mac_addr != NULL)
  803. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  804. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  805. current->pid, name, ifp->mac_addr);
  806. return ifp;
  807. }
  808. void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  809. {
  810. struct brcmf_if *ifp;
  811. ifp = drvr->iflist[bssidx];
  812. drvr->iflist[bssidx] = NULL;
  813. if (!ifp) {
  814. brcmf_err("Null interface, idx=%d\n", bssidx);
  815. return;
  816. }
  817. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  818. if (ifp->ndev) {
  819. if (bssidx == 0) {
  820. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  821. rtnl_lock();
  822. brcmf_netdev_stop(ifp->ndev);
  823. rtnl_unlock();
  824. }
  825. } else {
  826. netif_stop_queue(ifp->ndev);
  827. }
  828. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  829. cancel_work_sync(&ifp->setmacaddr_work);
  830. cancel_work_sync(&ifp->multicast_work);
  831. }
  832. /* unregister will take care of freeing it */
  833. unregister_netdev(ifp->ndev);
  834. if (bssidx == 0)
  835. brcmf_cfg80211_detach(drvr->config);
  836. } else {
  837. kfree(ifp);
  838. }
  839. }
  840. int brcmf_attach(uint bus_hdrlen, struct device *dev)
  841. {
  842. struct brcmf_pub *drvr = NULL;
  843. int ret = 0;
  844. brcmf_dbg(TRACE, "Enter\n");
  845. /* Allocate primary brcmf_info */
  846. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  847. if (!drvr)
  848. return -ENOMEM;
  849. mutex_init(&drvr->proto_block);
  850. /* Link to bus module */
  851. drvr->hdrlen = bus_hdrlen;
  852. drvr->bus_if = dev_get_drvdata(dev);
  853. drvr->bus_if->drvr = drvr;
  854. /* create device debugfs folder */
  855. brcmf_debugfs_attach(drvr);
  856. /* Attach and link in the protocol */
  857. ret = brcmf_proto_attach(drvr);
  858. if (ret != 0) {
  859. brcmf_err("brcmf_prot_attach failed\n");
  860. goto fail;
  861. }
  862. /* attach firmware event handler */
  863. brcmf_fweh_attach(drvr);
  864. INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
  865. return ret;
  866. fail:
  867. brcmf_detach(dev);
  868. return ret;
  869. }
  870. int brcmf_bus_start(struct device *dev)
  871. {
  872. int ret = -1;
  873. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  874. struct brcmf_pub *drvr = bus_if->drvr;
  875. struct brcmf_if *ifp;
  876. struct brcmf_if *p2p_ifp;
  877. brcmf_dbg(TRACE, "\n");
  878. /* Bring up the bus */
  879. ret = brcmf_bus_init(bus_if);
  880. if (ret != 0) {
  881. brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
  882. return ret;
  883. }
  884. /* add primary networking interface */
  885. ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
  886. if (IS_ERR(ifp))
  887. return PTR_ERR(ifp);
  888. if (brcmf_p2p_enable)
  889. p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
  890. else
  891. p2p_ifp = NULL;
  892. if (IS_ERR(p2p_ifp))
  893. p2p_ifp = NULL;
  894. /* signal bus ready */
  895. bus_if->state = BRCMF_BUS_DATA;
  896. /* Bus is ready, do any initialization */
  897. ret = brcmf_c_preinit_dcmds(ifp);
  898. if (ret < 0)
  899. goto fail;
  900. ret = brcmf_fws_init(drvr);
  901. if (ret < 0)
  902. goto fail;
  903. brcmf_fws_add_interface(ifp);
  904. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
  905. if (drvr->config == NULL) {
  906. ret = -ENOMEM;
  907. goto fail;
  908. }
  909. ret = brcmf_fweh_activate_events(ifp);
  910. if (ret < 0)
  911. goto fail;
  912. ret = brcmf_net_attach(ifp, false);
  913. fail:
  914. if (ret < 0) {
  915. brcmf_err("failed: %d\n", ret);
  916. if (drvr->config)
  917. brcmf_cfg80211_detach(drvr->config);
  918. if (drvr->fws) {
  919. brcmf_fws_del_interface(ifp);
  920. brcmf_fws_deinit(drvr);
  921. }
  922. if (drvr->iflist[0]) {
  923. free_netdev(ifp->ndev);
  924. drvr->iflist[0] = NULL;
  925. }
  926. if (p2p_ifp) {
  927. free_netdev(p2p_ifp->ndev);
  928. drvr->iflist[1] = NULL;
  929. }
  930. return ret;
  931. }
  932. if ((brcmf_p2p_enable) && (p2p_ifp))
  933. if (brcmf_net_p2p_attach(p2p_ifp) < 0)
  934. brcmf_p2p_enable = 0;
  935. return 0;
  936. }
  937. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  938. {
  939. brcmf_dbg(TRACE, "Enter\n");
  940. if (drvr) {
  941. /* Stop the protocol module */
  942. brcmf_proto_stop(drvr);
  943. /* Stop the bus module */
  944. brcmf_bus_stop(drvr->bus_if);
  945. }
  946. }
  947. void brcmf_dev_reset(struct device *dev)
  948. {
  949. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  950. struct brcmf_pub *drvr = bus_if->drvr;
  951. if (drvr == NULL)
  952. return;
  953. if (drvr->iflist[0])
  954. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  955. }
  956. void brcmf_detach(struct device *dev)
  957. {
  958. s32 i;
  959. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  960. struct brcmf_pub *drvr = bus_if->drvr;
  961. brcmf_dbg(TRACE, "Enter\n");
  962. if (drvr == NULL)
  963. return;
  964. /* stop firmware event handling */
  965. brcmf_fweh_detach(drvr);
  966. /* make sure primary interface removed last */
  967. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  968. if (drvr->iflist[i]) {
  969. brcmf_fws_del_interface(drvr->iflist[i]);
  970. brcmf_del_if(drvr, i);
  971. }
  972. brcmf_bus_detach(drvr);
  973. if (drvr->prot)
  974. brcmf_proto_detach(drvr);
  975. brcmf_fws_deinit(drvr);
  976. brcmf_debugfs_detach(drvr);
  977. bus_if->drvr = NULL;
  978. kfree(drvr);
  979. }
  980. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  981. {
  982. return atomic_read(&ifp->pend_8021x_cnt);
  983. }
  984. int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
  985. {
  986. struct brcmf_if *ifp = netdev_priv(ndev);
  987. int err;
  988. err = wait_event_timeout(ifp->pend_8021x_wait,
  989. !brcmf_get_pend_8021x_cnt(ifp),
  990. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  991. WARN_ON(!err);
  992. return !err;
  993. }
  994. /*
  995. * return chip id and rev of the device encoded in u32.
  996. */
  997. u32 brcmf_get_chip_info(struct brcmf_if *ifp)
  998. {
  999. struct brcmf_bus *bus = ifp->drvr->bus_if;
  1000. return bus->chip << 4 | bus->chiprev;
  1001. }
  1002. static void brcmf_driver_register(struct work_struct *work)
  1003. {
  1004. #ifdef CONFIG_BRCMFMAC_SDIO
  1005. brcmf_sdio_register();
  1006. #endif
  1007. #ifdef CONFIG_BRCMFMAC_USB
  1008. brcmf_usb_register();
  1009. #endif
  1010. }
  1011. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  1012. static int __init brcmfmac_module_init(void)
  1013. {
  1014. brcmf_debugfs_init();
  1015. #ifdef CONFIG_BRCMFMAC_SDIO
  1016. brcmf_sdio_init();
  1017. #endif
  1018. if (!schedule_work(&brcmf_driver_work))
  1019. return -EBUSY;
  1020. return 0;
  1021. }
  1022. static void __exit brcmfmac_module_exit(void)
  1023. {
  1024. cancel_work_sync(&brcmf_driver_work);
  1025. #ifdef CONFIG_BRCMFMAC_SDIO
  1026. brcmf_sdio_exit();
  1027. #endif
  1028. #ifdef CONFIG_BRCMFMAC_USB
  1029. brcmf_usb_exit();
  1030. #endif
  1031. brcmf_debugfs_exit();
  1032. }
  1033. module_init(brcmfmac_module_init);
  1034. module_exit(brcmfmac_module_exit);