bcmsdh_sdmmc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/types.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/mmc/sdio.h>
  19. #include <linux/mmc/core.h>
  20. #include <linux/mmc/sdio_func.h>
  21. #include <linux/mmc/sdio_ids.h>
  22. #include <linux/mmc/card.h>
  23. #include <linux/suspend.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h> /* request_irq() */
  26. #include <linux/module.h>
  27. #include <net/cfg80211.h>
  28. #include <defs.h>
  29. #include <brcm_hw_ids.h>
  30. #include <brcmu_utils.h>
  31. #include <brcmu_wifi.h>
  32. #include "sdio_host.h"
  33. #include "dhd.h"
  34. #include "dhd_dbg.h"
  35. #include "wl_cfg80211.h"
  36. #define SDIO_VENDOR_ID_BROADCOM 0x02d0
  37. #define DMA_ALIGN_MASK 0x03
  38. #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
  39. #define SDIO_FUNC1_BLOCKSIZE 64
  40. #define SDIO_FUNC2_BLOCKSIZE 512
  41. /* devices we support, null terminated */
  42. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  43. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
  44. { /* end: all zeroes */ },
  45. };
  46. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  47. static bool
  48. brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
  49. {
  50. bool is_err = false;
  51. #ifdef CONFIG_PM_SLEEP
  52. is_err = atomic_read(&sdiodev->suspend);
  53. #endif
  54. return is_err;
  55. }
  56. static void
  57. brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
  58. {
  59. #ifdef CONFIG_PM_SLEEP
  60. int retry = 0;
  61. while (atomic_read(&sdiodev->suspend) && retry++ != 30)
  62. wait_event_timeout(*wq, false, HZ/100);
  63. #endif
  64. }
  65. static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
  66. uint regaddr, u8 *byte)
  67. {
  68. struct sdio_func *sdfunc = sdiodev->func[0];
  69. int err_ret;
  70. /*
  71. * Can only directly write to some F0 registers.
  72. * Handle F2 enable/disable and Abort command
  73. * as a special case.
  74. */
  75. if (regaddr == SDIO_CCCR_IOEx) {
  76. sdfunc = sdiodev->func[2];
  77. if (sdfunc) {
  78. sdio_claim_host(sdfunc);
  79. if (*byte & SDIO_FUNC_ENABLE_2) {
  80. /* Enable Function 2 */
  81. err_ret = sdio_enable_func(sdfunc);
  82. if (err_ret)
  83. brcmf_dbg(ERROR,
  84. "enable F2 failed:%d\n",
  85. err_ret);
  86. } else {
  87. /* Disable Function 2 */
  88. err_ret = sdio_disable_func(sdfunc);
  89. if (err_ret)
  90. brcmf_dbg(ERROR,
  91. "Disable F2 failed:%d\n",
  92. err_ret);
  93. }
  94. sdio_release_host(sdfunc);
  95. }
  96. } else if (regaddr == SDIO_CCCR_ABORT) {
  97. sdio_claim_host(sdfunc);
  98. sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
  99. sdio_release_host(sdfunc);
  100. } else if (regaddr < 0xF0) {
  101. brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
  102. err_ret = -EPERM;
  103. } else {
  104. sdio_claim_host(sdfunc);
  105. sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
  106. sdio_release_host(sdfunc);
  107. }
  108. return err_ret;
  109. }
  110. int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
  111. uint regaddr, u8 *byte)
  112. {
  113. int err_ret;
  114. brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
  115. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
  116. if (brcmf_pm_resume_error(sdiodev))
  117. return -EIO;
  118. if (rw && func == 0) {
  119. /* handle F0 separately */
  120. err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
  121. } else {
  122. sdio_claim_host(sdiodev->func[func]);
  123. if (rw) /* CMD52 Write */
  124. sdio_writeb(sdiodev->func[func], *byte, regaddr,
  125. &err_ret);
  126. else if (func == 0) {
  127. *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
  128. &err_ret);
  129. } else {
  130. *byte = sdio_readb(sdiodev->func[func], regaddr,
  131. &err_ret);
  132. }
  133. sdio_release_host(sdiodev->func[func]);
  134. }
  135. if (err_ret)
  136. brcmf_dbg(ERROR, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
  137. rw ? "write" : "read", func, regaddr, *byte, err_ret);
  138. return err_ret;
  139. }
  140. int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
  141. uint rw, uint func, uint addr, u32 *word,
  142. uint nbytes)
  143. {
  144. int err_ret = -EIO;
  145. if (func == 0) {
  146. brcmf_dbg(ERROR, "Only CMD52 allowed to F0\n");
  147. return -EINVAL;
  148. }
  149. brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  150. rw, func, addr, nbytes);
  151. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
  152. if (brcmf_pm_resume_error(sdiodev))
  153. return -EIO;
  154. /* Claim host controller */
  155. sdio_claim_host(sdiodev->func[func]);
  156. if (rw) { /* CMD52 Write */
  157. if (nbytes == 4)
  158. sdio_writel(sdiodev->func[func], *word, addr,
  159. &err_ret);
  160. else if (nbytes == 2)
  161. sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
  162. addr, &err_ret);
  163. else
  164. brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
  165. } else { /* CMD52 Read */
  166. if (nbytes == 4)
  167. *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
  168. else if (nbytes == 2)
  169. *word = sdio_readw(sdiodev->func[func], addr,
  170. &err_ret) & 0xFFFF;
  171. else
  172. brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
  173. }
  174. /* Release host controller */
  175. sdio_release_host(sdiodev->func[func]);
  176. if (err_ret)
  177. brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
  178. rw ? "write" : "read", err_ret);
  179. return err_ret;
  180. }
  181. /* precondition: host controller is claimed */
  182. static int
  183. brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
  184. uint func, uint addr, struct sk_buff *pkt, uint pktlen)
  185. {
  186. int err_ret = 0;
  187. if ((write) && (!fifo)) {
  188. err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
  189. ((u8 *) (pkt->data)), pktlen);
  190. } else if (write) {
  191. err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
  192. ((u8 *) (pkt->data)), pktlen);
  193. } else if (fifo) {
  194. err_ret = sdio_readsb(sdiodev->func[func],
  195. ((u8 *) (pkt->data)), addr, pktlen);
  196. } else {
  197. err_ret = sdio_memcpy_fromio(sdiodev->func[func],
  198. ((u8 *) (pkt->data)),
  199. addr, pktlen);
  200. }
  201. return err_ret;
  202. }
  203. static int
  204. brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
  205. uint write, uint func, uint addr,
  206. struct sk_buff *pkt)
  207. {
  208. bool fifo = (fix_inc == SDIOH_DATA_FIX);
  209. int err_ret = 0;
  210. uint pkt_len = pkt->len;
  211. brcmf_dbg(TRACE, "Enter\n");
  212. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
  213. if (brcmf_pm_resume_error(sdiodev))
  214. return -EIO;
  215. /* Claim host controller */
  216. sdio_claim_host(sdiodev->func[func]);
  217. pkt_len += 3;
  218. pkt_len &= 0xFFFFFFFC;
  219. err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
  220. addr, pkt, pkt_len);
  221. if (err_ret) {
  222. brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
  223. write ? "TX" : "RX", pkt, addr, pkt_len, err_ret);
  224. } else {
  225. brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
  226. write ? "TX" : "RX", pkt, addr, pkt_len);
  227. }
  228. /* Release host controller */
  229. sdio_release_host(sdiodev->func[func]);
  230. brcmf_dbg(TRACE, "Exit\n");
  231. return err_ret;
  232. }
  233. int
  234. brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
  235. uint write, uint func, uint addr,
  236. struct sk_buff_head *pktq)
  237. {
  238. bool fifo = (fix_inc == SDIOH_DATA_FIX);
  239. u32 SGCount = 0;
  240. int err_ret = 0;
  241. struct sk_buff *pkt;
  242. brcmf_dbg(TRACE, "Enter\n");
  243. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
  244. if (brcmf_pm_resume_error(sdiodev))
  245. return -EIO;
  246. /* Claim host controller */
  247. sdio_claim_host(sdiodev->func[func]);
  248. skb_queue_walk(pktq, pkt) {
  249. uint pkt_len = pkt->len;
  250. pkt_len += 3;
  251. pkt_len &= 0xFFFFFFFC;
  252. err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
  253. addr, pkt, pkt_len);
  254. if (err_ret) {
  255. brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
  256. write ? "TX" : "RX", pkt, SGCount, addr,
  257. pkt_len, err_ret);
  258. } else {
  259. brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
  260. write ? "TX" : "RX", pkt, SGCount, addr,
  261. pkt_len);
  262. }
  263. if (!fifo)
  264. addr += pkt_len;
  265. SGCount++;
  266. }
  267. /* Release host controller */
  268. sdio_release_host(sdiodev->func[func]);
  269. brcmf_dbg(TRACE, "Exit\n");
  270. return err_ret;
  271. }
  272. /*
  273. * This function takes a buffer or packet, and fixes everything up
  274. * so that in the end, a DMA-able packet is created.
  275. *
  276. * A buffer does not have an associated packet pointer,
  277. * and may or may not be aligned.
  278. * A packet may consist of a single packet, or a packet chain.
  279. * If it is a packet chain, then all the packets in the chain
  280. * must be properly aligned.
  281. *
  282. * If the packet data is not aligned, then there may only be
  283. * one packet, and in this case, it is copied to a new
  284. * aligned packet.
  285. *
  286. */
  287. int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
  288. uint fix_inc, uint write, uint func, uint addr,
  289. uint buflen_u, u8 *buffer, struct sk_buff *pkt)
  290. {
  291. int Status;
  292. struct sk_buff *mypkt = NULL;
  293. brcmf_dbg(TRACE, "Enter\n");
  294. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  295. if (brcmf_pm_resume_error(sdiodev))
  296. return -EIO;
  297. /* Case 1: we don't have a packet. */
  298. if (pkt == NULL) {
  299. brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
  300. write ? "TX" : "RX", buflen_u);
  301. mypkt = brcmu_pkt_buf_get_skb(buflen_u);
  302. if (!mypkt) {
  303. brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
  304. buflen_u);
  305. return -EIO;
  306. }
  307. /* For a write, copy the buffer data into the packet. */
  308. if (write)
  309. memcpy(mypkt->data, buffer, buflen_u);
  310. Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
  311. func, addr, mypkt);
  312. /* For a read, copy the packet data back to the buffer. */
  313. if (!write)
  314. memcpy(buffer, mypkt->data, buflen_u);
  315. brcmu_pkt_buf_free_skb(mypkt);
  316. } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
  317. /*
  318. * Case 2: We have a packet, but it is unaligned.
  319. * In this case, we cannot have a chain (pkt->next == NULL)
  320. */
  321. brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
  322. write ? "TX" : "RX", pkt->len);
  323. mypkt = brcmu_pkt_buf_get_skb(pkt->len);
  324. if (!mypkt) {
  325. brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
  326. pkt->len);
  327. return -EIO;
  328. }
  329. /* For a write, copy the buffer data into the packet. */
  330. if (write)
  331. memcpy(mypkt->data, pkt->data, pkt->len);
  332. Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
  333. func, addr, mypkt);
  334. /* For a read, copy the packet data back to the buffer. */
  335. if (!write)
  336. memcpy(pkt->data, mypkt->data, mypkt->len);
  337. brcmu_pkt_buf_free_skb(mypkt);
  338. } else { /* case 3: We have a packet and
  339. it is aligned. */
  340. brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
  341. write ? "Tx" : "Rx");
  342. Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
  343. func, addr, pkt);
  344. }
  345. return Status;
  346. }
  347. /* Read client card reg */
  348. static int
  349. brcmf_sdioh_card_regread(struct brcmf_sdio_dev *sdiodev, int func, u32 regaddr,
  350. int regsize, u32 *data)
  351. {
  352. if ((func == 0) || (regsize == 1)) {
  353. u8 temp = 0;
  354. brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, func, regaddr,
  355. &temp);
  356. *data = temp;
  357. *data &= 0xff;
  358. brcmf_dbg(DATA, "byte read data=0x%02x\n", *data);
  359. } else {
  360. brcmf_sdioh_request_word(sdiodev, SDIOH_READ, func, regaddr,
  361. data, regsize);
  362. if (regsize == 2)
  363. *data &= 0xffff;
  364. brcmf_dbg(DATA, "word read data=0x%08x\n", *data);
  365. }
  366. return SUCCESS;
  367. }
  368. static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
  369. {
  370. /* read 24 bits and return valid 17 bit addr */
  371. int i;
  372. u32 scratch, regdata;
  373. __le32 scratch_le;
  374. u8 *ptr = (u8 *)&scratch_le;
  375. for (i = 0; i < 3; i++) {
  376. if ((brcmf_sdioh_card_regread(sdiodev, 0, regaddr, 1,
  377. &regdata)) != SUCCESS)
  378. brcmf_dbg(ERROR, "Can't read!\n");
  379. *ptr++ = (u8) regdata;
  380. regaddr++;
  381. }
  382. /* Only the lower 17-bits are valid */
  383. scratch = le32_to_cpu(scratch_le);
  384. scratch &= 0x0001FFFF;
  385. return scratch;
  386. }
  387. static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
  388. {
  389. int err_ret;
  390. u32 fbraddr;
  391. u8 func;
  392. brcmf_dbg(TRACE, "\n");
  393. /* Get the Card's common CIS address */
  394. sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
  395. SDIO_CCCR_CIS);
  396. brcmf_dbg(INFO, "Card's Common CIS Ptr = 0x%x\n",
  397. sdiodev->func_cis_ptr[0]);
  398. /* Get the Card's function CIS (for each function) */
  399. for (fbraddr = SDIO_FBR_BASE(1), func = 1;
  400. func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
  401. sdiodev->func_cis_ptr[func] =
  402. brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
  403. brcmf_dbg(INFO, "Function %d CIS Ptr = 0x%x\n",
  404. func, sdiodev->func_cis_ptr[func]);
  405. }
  406. /* Enable Function 1 */
  407. sdio_claim_host(sdiodev->func[1]);
  408. err_ret = sdio_enable_func(sdiodev->func[1]);
  409. sdio_release_host(sdiodev->func[1]);
  410. if (err_ret)
  411. brcmf_dbg(ERROR, "Failed to enable F1 Err: 0x%08x\n", err_ret);
  412. return false;
  413. }
  414. /*
  415. * Public entry points & extern's
  416. */
  417. int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
  418. {
  419. int err_ret = 0;
  420. brcmf_dbg(TRACE, "\n");
  421. sdiodev->num_funcs = 2;
  422. sdio_claim_host(sdiodev->func[1]);
  423. err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
  424. sdio_release_host(sdiodev->func[1]);
  425. if (err_ret) {
  426. brcmf_dbg(ERROR, "Failed to set F1 blocksize\n");
  427. goto out;
  428. }
  429. sdio_claim_host(sdiodev->func[2]);
  430. err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
  431. sdio_release_host(sdiodev->func[2]);
  432. if (err_ret) {
  433. brcmf_dbg(ERROR, "Failed to set F2 blocksize\n");
  434. goto out;
  435. }
  436. brcmf_sdioh_enablefuncs(sdiodev);
  437. out:
  438. brcmf_dbg(TRACE, "Done\n");
  439. return err_ret;
  440. }
  441. void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
  442. {
  443. brcmf_dbg(TRACE, "\n");
  444. /* Disable Function 2 */
  445. sdio_claim_host(sdiodev->func[2]);
  446. sdio_disable_func(sdiodev->func[2]);
  447. sdio_release_host(sdiodev->func[2]);
  448. /* Disable Function 1 */
  449. sdio_claim_host(sdiodev->func[1]);
  450. sdio_disable_func(sdiodev->func[1]);
  451. sdio_release_host(sdiodev->func[1]);
  452. }
  453. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  454. const struct sdio_device_id *id)
  455. {
  456. int ret = 0;
  457. struct brcmf_sdio_dev *sdiodev;
  458. brcmf_dbg(TRACE, "Enter\n");
  459. brcmf_dbg(TRACE, "func->class=%x\n", func->class);
  460. brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
  461. brcmf_dbg(TRACE, "sdio_device: 0x%04x\n", func->device);
  462. brcmf_dbg(TRACE, "Function#: 0x%04x\n", func->num);
  463. if (func->num == 1) {
  464. if (dev_get_drvdata(&func->card->dev)) {
  465. brcmf_dbg(ERROR, "card private drvdata occupied\n");
  466. return -ENXIO;
  467. }
  468. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  469. if (!sdiodev)
  470. return -ENOMEM;
  471. sdiodev->func[0] = func->card->sdio_func[0];
  472. sdiodev->func[1] = func;
  473. dev_set_drvdata(&func->card->dev, sdiodev);
  474. atomic_set(&sdiodev->suspend, false);
  475. init_waitqueue_head(&sdiodev->request_byte_wait);
  476. init_waitqueue_head(&sdiodev->request_word_wait);
  477. init_waitqueue_head(&sdiodev->request_packet_wait);
  478. init_waitqueue_head(&sdiodev->request_buffer_wait);
  479. }
  480. if (func->num == 2) {
  481. sdiodev = dev_get_drvdata(&func->card->dev);
  482. if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
  483. return -ENODEV;
  484. sdiodev->func[2] = func;
  485. brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
  486. ret = brcmf_sdio_probe(sdiodev);
  487. }
  488. return ret;
  489. }
  490. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  491. {
  492. struct brcmf_sdio_dev *sdiodev;
  493. brcmf_dbg(TRACE, "Enter\n");
  494. brcmf_dbg(INFO, "func->class=%x\n", func->class);
  495. brcmf_dbg(INFO, "sdio_vendor: 0x%04x\n", func->vendor);
  496. brcmf_dbg(INFO, "sdio_device: 0x%04x\n", func->device);
  497. brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
  498. if (func->num == 2) {
  499. sdiodev = dev_get_drvdata(&func->card->dev);
  500. brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
  501. brcmf_sdio_remove(sdiodev);
  502. dev_set_drvdata(&func->card->dev, NULL);
  503. kfree(sdiodev);
  504. }
  505. }
  506. #ifdef CONFIG_PM_SLEEP
  507. static int brcmf_sdio_suspend(struct device *dev)
  508. {
  509. mmc_pm_flag_t sdio_flags;
  510. struct brcmf_sdio_dev *sdiodev;
  511. struct sdio_func *func = dev_to_sdio_func(dev);
  512. int ret = 0;
  513. brcmf_dbg(TRACE, "\n");
  514. sdiodev = dev_get_drvdata(&func->card->dev);
  515. atomic_set(&sdiodev->suspend, true);
  516. sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
  517. if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
  518. brcmf_dbg(ERROR, "Host can't keep power while suspended\n");
  519. return -EINVAL;
  520. }
  521. ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
  522. if (ret) {
  523. brcmf_dbg(ERROR, "Failed to set pm_flags\n");
  524. return ret;
  525. }
  526. brcmf_sdio_wdtmr_enable(sdiodev, false);
  527. return ret;
  528. }
  529. static int brcmf_sdio_resume(struct device *dev)
  530. {
  531. struct brcmf_sdio_dev *sdiodev;
  532. struct sdio_func *func = dev_to_sdio_func(dev);
  533. sdiodev = dev_get_drvdata(&func->card->dev);
  534. brcmf_sdio_wdtmr_enable(sdiodev, true);
  535. atomic_set(&sdiodev->suspend, false);
  536. return 0;
  537. }
  538. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  539. .suspend = brcmf_sdio_suspend,
  540. .resume = brcmf_sdio_resume,
  541. };
  542. #endif /* CONFIG_PM_SLEEP */
  543. static struct sdio_driver brcmf_sdmmc_driver = {
  544. .probe = brcmf_ops_sdio_probe,
  545. .remove = brcmf_ops_sdio_remove,
  546. .name = "brcmfmac",
  547. .id_table = brcmf_sdmmc_ids,
  548. #ifdef CONFIG_PM_SLEEP
  549. .drv = {
  550. .pm = &brcmf_sdio_pm_ops,
  551. },
  552. #endif /* CONFIG_PM_SLEEP */
  553. };
  554. /* bus register interface */
  555. int brcmf_bus_register(void)
  556. {
  557. brcmf_dbg(TRACE, "Enter\n");
  558. return sdio_register_driver(&brcmf_sdmmc_driver);
  559. }
  560. void brcmf_bus_unregister(void)
  561. {
  562. brcmf_dbg(TRACE, "Enter\n");
  563. sdio_unregister_driver(&brcmf_sdmmc_driver);
  564. }