bcmsdh.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/export.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_ids.h>
  22. #include <linux/sched.h>
  23. #include <linux/completion.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/mmc/sdio.h>
  26. #include <linux/mmc/sdio_func.h>
  27. #include <linux/mmc/card.h>
  28. #include <linux/mmc/host.h>
  29. #include <linux/platform_data/brcmfmac-sdio.h>
  30. #include <defs.h>
  31. #include <brcm_hw_ids.h>
  32. #include <brcmu_utils.h>
  33. #include <brcmu_wifi.h>
  34. #include <soc.h>
  35. #include "dhd_bus.h"
  36. #include "dhd_dbg.h"
  37. #include "sdio_host.h"
  38. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  39. static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
  40. {
  41. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  42. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  43. brcmf_dbg(INTR, "OOB intr triggered\n");
  44. /* out-of-band interrupt is level-triggered which won't
  45. * be cleared until dpc
  46. */
  47. if (sdiodev->irq_en) {
  48. disable_irq_nosync(irq);
  49. sdiodev->irq_en = false;
  50. }
  51. brcmf_sdbrcm_isr(sdiodev->bus);
  52. return IRQ_HANDLED;
  53. }
  54. static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
  55. {
  56. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  57. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  58. brcmf_dbg(INTR, "IB intr triggered\n");
  59. brcmf_sdbrcm_isr(sdiodev->bus);
  60. }
  61. /* dummy handler for SDIO function 2 interrupt */
  62. static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
  63. {
  64. }
  65. int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
  66. {
  67. int ret = 0;
  68. u8 data;
  69. unsigned long flags;
  70. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  71. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  72. sdiodev->pdata->oob_irq_nr);
  73. ret = request_irq(sdiodev->pdata->oob_irq_nr,
  74. brcmf_sdio_oob_irqhandler,
  75. sdiodev->pdata->oob_irq_flags,
  76. "brcmf_oob_intr",
  77. &sdiodev->func[1]->dev);
  78. if (ret != 0) {
  79. brcmf_err("request_irq failed %d\n", ret);
  80. return ret;
  81. }
  82. sdiodev->oob_irq_requested = true;
  83. spin_lock_init(&sdiodev->irq_en_lock);
  84. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  85. sdiodev->irq_en = true;
  86. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  87. ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
  88. if (ret != 0) {
  89. brcmf_err("enable_irq_wake failed %d\n", ret);
  90. return ret;
  91. }
  92. sdiodev->irq_wake = true;
  93. sdio_claim_host(sdiodev->func[1]);
  94. /* must configure SDIO_CCCR_IENx to enable irq */
  95. data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  96. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  97. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  98. /* redirect, configure and enable io for interrupt signal */
  99. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  100. if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  101. data |= SDIO_SEPINT_ACT_HI;
  102. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  103. sdio_release_host(sdiodev->func[1]);
  104. } else {
  105. brcmf_dbg(SDIO, "Entering\n");
  106. sdio_claim_host(sdiodev->func[1]);
  107. sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
  108. sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
  109. sdio_release_host(sdiodev->func[1]);
  110. }
  111. return 0;
  112. }
  113. int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  114. {
  115. brcmf_dbg(SDIO, "Entering\n");
  116. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  117. sdio_claim_host(sdiodev->func[1]);
  118. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  119. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  120. sdio_release_host(sdiodev->func[1]);
  121. if (sdiodev->oob_irq_requested) {
  122. sdiodev->oob_irq_requested = false;
  123. if (sdiodev->irq_wake) {
  124. disable_irq_wake(sdiodev->pdata->oob_irq_nr);
  125. sdiodev->irq_wake = false;
  126. }
  127. free_irq(sdiodev->pdata->oob_irq_nr,
  128. &sdiodev->func[1]->dev);
  129. sdiodev->irq_en = false;
  130. }
  131. } else {
  132. sdio_claim_host(sdiodev->func[1]);
  133. sdio_release_irq(sdiodev->func[2]);
  134. sdio_release_irq(sdiodev->func[1]);
  135. sdio_release_host(sdiodev->func[1]);
  136. }
  137. return 0;
  138. }
  139. static int
  140. brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  141. {
  142. int err = 0, i;
  143. u8 addr[3];
  144. s32 retry;
  145. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  146. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  147. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  148. for (i = 0; i < 3; i++) {
  149. retry = 0;
  150. do {
  151. if (retry)
  152. usleep_range(1000, 2000);
  153. err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
  154. SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
  155. &addr[i]);
  156. } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  157. if (err) {
  158. brcmf_err("failed at addr:0x%0x\n",
  159. SBSDIO_FUNC1_SBADDRLOW + i);
  160. break;
  161. }
  162. }
  163. return err;
  164. }
  165. static int
  166. brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  167. {
  168. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  169. int err = 0;
  170. if (bar0 != sdiodev->sbwad) {
  171. err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
  172. if (err)
  173. return err;
  174. sdiodev->sbwad = bar0;
  175. }
  176. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  177. if (width == 4)
  178. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  179. return 0;
  180. }
  181. int
  182. brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  183. void *data, bool write)
  184. {
  185. u8 func_num, reg_size;
  186. s32 retry = 0;
  187. int ret;
  188. /*
  189. * figure out how to read the register based on address range
  190. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  191. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  192. * The rest: function 1 silicon backplane core registers
  193. */
  194. if ((addr & ~REG_F0_REG_MASK) == 0) {
  195. func_num = SDIO_FUNC_0;
  196. reg_size = 1;
  197. } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
  198. func_num = SDIO_FUNC_1;
  199. reg_size = 1;
  200. } else {
  201. func_num = SDIO_FUNC_1;
  202. reg_size = 4;
  203. brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
  204. }
  205. do {
  206. if (!write)
  207. memset(data, 0, reg_size);
  208. if (retry) /* wait for 1 ms till bus get settled down */
  209. usleep_range(1000, 2000);
  210. if (reg_size == 1)
  211. ret = brcmf_sdioh_request_byte(sdiodev, write,
  212. func_num, addr, data);
  213. else
  214. ret = brcmf_sdioh_request_word(sdiodev, write,
  215. func_num, addr, data, 4);
  216. } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  217. if (ret != 0)
  218. brcmf_err("failed with %d\n", ret);
  219. return ret;
  220. }
  221. u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  222. {
  223. u8 data;
  224. int retval;
  225. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  226. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
  227. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  228. if (ret)
  229. *ret = retval;
  230. return data;
  231. }
  232. u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  233. {
  234. u32 data;
  235. int retval;
  236. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  237. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
  238. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  239. if (ret)
  240. *ret = retval;
  241. return data;
  242. }
  243. void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  244. u8 data, int *ret)
  245. {
  246. int retval;
  247. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  248. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
  249. if (ret)
  250. *ret = retval;
  251. }
  252. void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  253. u32 data, int *ret)
  254. {
  255. int retval;
  256. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  257. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
  258. if (ret)
  259. *ret = retval;
  260. }
  261. /**
  262. * brcmf_sdio_buffrw - SDIO interface function for block data access
  263. * @sdiodev: brcmfmac sdio device
  264. * @fn: SDIO function number
  265. * @write: direction flag
  266. * @addr: dongle memory address as source/destination
  267. * @pkt: skb pointer
  268. *
  269. * This function takes the respbonsibility as the interface function to MMC
  270. * stack for block data access. It assumes that the skb passed down by the
  271. * caller has already been padded and aligned.
  272. */
  273. static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  274. bool write, u32 addr, struct sk_buff_head *pktlist)
  275. {
  276. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  277. unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
  278. unsigned short max_seg_sz, seg_sz;
  279. unsigned char *pkt_data, *orig_data, *dst_data;
  280. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  281. struct sk_buff_head local_list, *target_list;
  282. struct mmc_request mmc_req;
  283. struct mmc_command mmc_cmd;
  284. struct mmc_data mmc_dat;
  285. struct sg_table st;
  286. struct scatterlist *sgl;
  287. struct mmc_host *host;
  288. int ret = 0;
  289. if (!pktlist->qlen)
  290. return -EINVAL;
  291. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  292. if (brcmf_pm_resume_error(sdiodev))
  293. return -EIO;
  294. /* Single skb use the standard mmc interface */
  295. if (pktlist->qlen == 1) {
  296. pkt_next = pktlist->next;
  297. req_sz = pkt_next->len + 3;
  298. req_sz &= (uint)~3;
  299. if (write)
  300. return sdio_memcpy_toio(sdiodev->func[fn], addr,
  301. ((u8 *)(pkt_next->data)),
  302. req_sz);
  303. else if (fn == 1)
  304. return sdio_memcpy_fromio(sdiodev->func[fn],
  305. ((u8 *)(pkt_next->data)),
  306. addr, req_sz);
  307. else
  308. /* function 2 read is FIFO operation */
  309. return sdio_readsb(sdiodev->func[fn],
  310. ((u8 *)(pkt_next->data)), addr,
  311. req_sz);
  312. }
  313. target_list = pktlist;
  314. /* for host with broken sg support, prepare a page aligned list */
  315. __skb_queue_head_init(&local_list);
  316. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  317. req_sz = 0;
  318. skb_queue_walk(pktlist, pkt_next)
  319. req_sz += pkt_next->len;
  320. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  321. while (req_sz > PAGE_SIZE) {
  322. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  323. if (pkt_next == NULL) {
  324. ret = -ENOMEM;
  325. goto exit;
  326. }
  327. __skb_queue_tail(&local_list, pkt_next);
  328. req_sz -= PAGE_SIZE;
  329. }
  330. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  331. if (pkt_next == NULL) {
  332. ret = -ENOMEM;
  333. goto exit;
  334. }
  335. __skb_queue_tail(&local_list, pkt_next);
  336. target_list = &local_list;
  337. }
  338. host = sdiodev->func[fn]->card->host;
  339. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  340. /* Blocks per command is limited by host count, host transfer
  341. * size and the maximum for IO_RW_EXTENDED of 511 blocks.
  342. */
  343. max_blks = min_t(unsigned int, host->max_blk_count, 511u);
  344. max_req_sz = min_t(unsigned int, host->max_req_size,
  345. max_blks * func_blk_sz);
  346. max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
  347. max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
  348. seg_sz = target_list->qlen;
  349. pkt_offset = 0;
  350. pkt_next = target_list->next;
  351. if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
  352. ret = -ENOMEM;
  353. goto exit;
  354. }
  355. while (seg_sz) {
  356. req_sz = 0;
  357. sg_cnt = 0;
  358. memset(&mmc_req, 0, sizeof(struct mmc_request));
  359. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  360. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  361. sgl = st.sgl;
  362. /* prep sg table */
  363. while (pkt_next != (struct sk_buff *)target_list) {
  364. pkt_data = pkt_next->data + pkt_offset;
  365. sg_data_sz = pkt_next->len - pkt_offset;
  366. if (sg_data_sz > host->max_seg_size)
  367. sg_data_sz = host->max_seg_size;
  368. if (sg_data_sz > max_req_sz - req_sz)
  369. sg_data_sz = max_req_sz - req_sz;
  370. sg_set_buf(sgl, pkt_data, sg_data_sz);
  371. sg_cnt++;
  372. sgl = sg_next(sgl);
  373. req_sz += sg_data_sz;
  374. pkt_offset += sg_data_sz;
  375. if (pkt_offset == pkt_next->len) {
  376. pkt_offset = 0;
  377. pkt_next = pkt_next->next;
  378. }
  379. if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
  380. break;
  381. }
  382. seg_sz -= sg_cnt;
  383. if (req_sz % func_blk_sz != 0) {
  384. brcmf_err("sg request length %u is not %u aligned\n",
  385. req_sz, func_blk_sz);
  386. ret = -ENOTBLK;
  387. goto exit;
  388. }
  389. mmc_dat.sg = st.sgl;
  390. mmc_dat.sg_len = sg_cnt;
  391. mmc_dat.blksz = func_blk_sz;
  392. mmc_dat.blocks = req_sz / func_blk_sz;
  393. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  394. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  395. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  396. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  397. mmc_cmd.arg |= 1<<27; /* block mode */
  398. /* incrementing addr for function 1 */
  399. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  400. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  401. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  402. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  403. mmc_req.cmd = &mmc_cmd;
  404. mmc_req.data = &mmc_dat;
  405. if (fn == 1)
  406. addr += req_sz;
  407. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  408. mmc_wait_for_req(host, &mmc_req);
  409. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  410. if (ret != 0) {
  411. brcmf_err("CMD53 sg block %s failed %d\n",
  412. write ? "write" : "read", ret);
  413. ret = -EIO;
  414. break;
  415. }
  416. }
  417. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  418. local_pkt_next = local_list.next;
  419. orig_offset = 0;
  420. skb_queue_walk(pktlist, pkt_next) {
  421. dst_offset = 0;
  422. do {
  423. req_sz = local_pkt_next->len - orig_offset;
  424. req_sz = min_t(uint, pkt_next->len - dst_offset,
  425. req_sz);
  426. orig_data = local_pkt_next->data + orig_offset;
  427. dst_data = pkt_next->data + dst_offset;
  428. memcpy(dst_data, orig_data, req_sz);
  429. orig_offset += req_sz;
  430. dst_offset += req_sz;
  431. if (orig_offset == local_pkt_next->len) {
  432. orig_offset = 0;
  433. local_pkt_next = local_pkt_next->next;
  434. }
  435. if (dst_offset == pkt_next->len)
  436. break;
  437. } while (!skb_queue_empty(&local_list));
  438. }
  439. }
  440. exit:
  441. sg_free_table(&st);
  442. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  443. brcmu_pkt_buf_free_skb(pkt_next);
  444. return ret;
  445. }
  446. int
  447. brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  448. uint flags, u8 *buf, uint nbytes)
  449. {
  450. struct sk_buff *mypkt;
  451. int err;
  452. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  453. if (!mypkt) {
  454. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  455. nbytes);
  456. return -EIO;
  457. }
  458. err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
  459. if (!err)
  460. memcpy(buf, mypkt->data, nbytes);
  461. brcmu_pkt_buf_free_skb(mypkt);
  462. return err;
  463. }
  464. int
  465. brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  466. uint flags, struct sk_buff *pkt)
  467. {
  468. uint width;
  469. int err = 0;
  470. struct sk_buff_head pkt_list;
  471. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  472. fn, addr, pkt->len);
  473. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  474. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  475. if (err)
  476. goto done;
  477. skb_queue_head_init(&pkt_list);
  478. skb_queue_tail(&pkt_list, pkt);
  479. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
  480. skb_dequeue_tail(&pkt_list);
  481. done:
  482. return err;
  483. }
  484. int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  485. uint flags, struct sk_buff_head *pktq)
  486. {
  487. uint incr_fix;
  488. uint width;
  489. int err = 0;
  490. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  491. fn, addr, pktq->qlen);
  492. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  493. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  494. if (err)
  495. goto done;
  496. incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
  497. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
  498. done:
  499. return err;
  500. }
  501. int
  502. brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  503. uint flags, u8 *buf, uint nbytes)
  504. {
  505. struct sk_buff *mypkt;
  506. struct sk_buff_head pktq;
  507. int err;
  508. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  509. if (!mypkt) {
  510. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  511. nbytes);
  512. return -EIO;
  513. }
  514. memcpy(mypkt->data, buf, nbytes);
  515. __skb_queue_head_init(&pktq);
  516. __skb_queue_tail(&pktq, mypkt);
  517. err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
  518. __skb_dequeue_tail(&pktq);
  519. brcmu_pkt_buf_free_skb(mypkt);
  520. return err;
  521. }
  522. int
  523. brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  524. uint flags, struct sk_buff_head *pktq)
  525. {
  526. uint width;
  527. int err = 0;
  528. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  529. fn, addr, pktq->qlen);
  530. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  531. brcmf_sdio_addrprep(sdiodev, width, &addr);
  532. err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
  533. return err;
  534. }
  535. int
  536. brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  537. u8 *data, uint size)
  538. {
  539. int bcmerror = 0;
  540. struct sk_buff *pkt;
  541. u32 sdaddr;
  542. uint dsize;
  543. struct sk_buff_head pkt_list;
  544. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  545. pkt = dev_alloc_skb(dsize);
  546. if (!pkt) {
  547. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  548. return -EIO;
  549. }
  550. pkt->priority = 0;
  551. skb_queue_head_init(&pkt_list);
  552. /* Determine initial transfer parameters */
  553. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  554. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  555. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  556. else
  557. dsize = size;
  558. sdio_claim_host(sdiodev->func[1]);
  559. /* Do the transfer(s) */
  560. while (size) {
  561. /* Set the backplane window to include the start address */
  562. bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
  563. if (bcmerror)
  564. break;
  565. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  566. write ? "write" : "read", dsize,
  567. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  568. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  569. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  570. skb_put(pkt, dsize);
  571. if (write)
  572. memcpy(pkt->data, data, dsize);
  573. skb_queue_tail(&pkt_list, pkt);
  574. bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
  575. sdaddr, &pkt_list);
  576. skb_dequeue_tail(&pkt_list);
  577. if (bcmerror) {
  578. brcmf_err("membytes transfer failed\n");
  579. break;
  580. }
  581. if (!write)
  582. memcpy(data, pkt->data, dsize);
  583. skb_trim(pkt, dsize);
  584. /* Adjust for next transfer (if any) */
  585. size -= dsize;
  586. if (size) {
  587. data += dsize;
  588. address += dsize;
  589. sdaddr = 0;
  590. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  591. }
  592. }
  593. dev_kfree_skb(pkt);
  594. /* Return the window to backplane enumeration space for core access */
  595. if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  596. brcmf_err("FAILED to set window back to 0x%x\n",
  597. sdiodev->sbwad);
  598. sdio_release_host(sdiodev->func[1]);
  599. return bcmerror;
  600. }
  601. int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  602. {
  603. char t_func = (char)fn;
  604. brcmf_dbg(SDIO, "Enter\n");
  605. /* issue abort cmd52 command through F0 */
  606. brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
  607. SDIO_CCCR_ABORT, &t_func);
  608. brcmf_dbg(SDIO, "Exit\n");
  609. return 0;
  610. }
  611. int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
  612. {
  613. u32 regs = 0;
  614. int ret = 0;
  615. ret = brcmf_sdioh_attach(sdiodev);
  616. if (ret)
  617. goto out;
  618. regs = SI_ENUM_BASE;
  619. /* try to attach to the target device */
  620. sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
  621. if (!sdiodev->bus) {
  622. brcmf_err("device attach failed\n");
  623. ret = -ENODEV;
  624. goto out;
  625. }
  626. out:
  627. if (ret)
  628. brcmf_sdio_remove(sdiodev);
  629. return ret;
  630. }
  631. EXPORT_SYMBOL(brcmf_sdio_probe);
  632. int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
  633. {
  634. sdiodev->bus_if->state = BRCMF_BUS_DOWN;
  635. if (sdiodev->bus) {
  636. brcmf_sdbrcm_disconnect(sdiodev->bus);
  637. sdiodev->bus = NULL;
  638. }
  639. brcmf_sdioh_detach(sdiodev);
  640. sdiodev->sbwad = 0;
  641. return 0;
  642. }
  643. EXPORT_SYMBOL(brcmf_sdio_remove);
  644. void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
  645. {
  646. if (enable)
  647. brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
  648. else
  649. brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
  650. }