bcmsdh.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/export.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_ids.h>
  22. #include <linux/sched.h>
  23. #include <linux/completion.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/mmc/sdio.h>
  26. #include <linux/mmc/sdio_func.h>
  27. #include <linux/mmc/card.h>
  28. #include <linux/platform_data/brcmfmac-sdio.h>
  29. #include <defs.h>
  30. #include <brcm_hw_ids.h>
  31. #include <brcmu_utils.h>
  32. #include <brcmu_wifi.h>
  33. #include <soc.h>
  34. #include "dhd_bus.h"
  35. #include "dhd_dbg.h"
  36. #include "sdio_host.h"
  37. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  38. static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
  39. {
  40. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  41. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  42. brcmf_dbg(INTR, "OOB intr triggered\n");
  43. /* out-of-band interrupt is level-triggered which won't
  44. * be cleared until dpc
  45. */
  46. if (sdiodev->irq_en) {
  47. disable_irq_nosync(irq);
  48. sdiodev->irq_en = false;
  49. }
  50. brcmf_sdbrcm_isr(sdiodev->bus);
  51. return IRQ_HANDLED;
  52. }
  53. static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
  54. {
  55. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  56. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  57. brcmf_dbg(INTR, "IB intr triggered\n");
  58. brcmf_sdbrcm_isr(sdiodev->bus);
  59. }
  60. /* dummy handler for SDIO function 2 interrupt */
  61. static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
  62. {
  63. }
  64. int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
  65. {
  66. int ret = 0;
  67. u8 data;
  68. unsigned long flags;
  69. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  70. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  71. sdiodev->pdata->oob_irq_nr);
  72. ret = request_irq(sdiodev->pdata->oob_irq_nr,
  73. brcmf_sdio_oob_irqhandler,
  74. sdiodev->pdata->oob_irq_flags,
  75. "brcmf_oob_intr",
  76. &sdiodev->func[1]->dev);
  77. if (ret != 0) {
  78. brcmf_err("request_irq failed %d\n", ret);
  79. return ret;
  80. }
  81. sdiodev->oob_irq_requested = true;
  82. spin_lock_init(&sdiodev->irq_en_lock);
  83. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  84. sdiodev->irq_en = true;
  85. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  86. ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
  87. if (ret != 0) {
  88. brcmf_err("enable_irq_wake failed %d\n", ret);
  89. return ret;
  90. }
  91. sdiodev->irq_wake = true;
  92. sdio_claim_host(sdiodev->func[1]);
  93. /* must configure SDIO_CCCR_IENx to enable irq */
  94. data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  95. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  96. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  97. /* redirect, configure and enable io for interrupt signal */
  98. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  99. if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  100. data |= SDIO_SEPINT_ACT_HI;
  101. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  102. sdio_release_host(sdiodev->func[1]);
  103. } else {
  104. brcmf_dbg(SDIO, "Entering\n");
  105. sdio_claim_host(sdiodev->func[1]);
  106. sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
  107. sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
  108. sdio_release_host(sdiodev->func[1]);
  109. }
  110. return 0;
  111. }
  112. int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  113. {
  114. brcmf_dbg(SDIO, "Entering\n");
  115. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  116. sdio_claim_host(sdiodev->func[1]);
  117. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  118. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  119. sdio_release_host(sdiodev->func[1]);
  120. if (sdiodev->oob_irq_requested) {
  121. sdiodev->oob_irq_requested = false;
  122. if (sdiodev->irq_wake) {
  123. disable_irq_wake(sdiodev->pdata->oob_irq_nr);
  124. sdiodev->irq_wake = false;
  125. }
  126. free_irq(sdiodev->pdata->oob_irq_nr,
  127. &sdiodev->func[1]->dev);
  128. sdiodev->irq_en = false;
  129. }
  130. } else {
  131. sdio_claim_host(sdiodev->func[1]);
  132. sdio_release_irq(sdiodev->func[2]);
  133. sdio_release_irq(sdiodev->func[1]);
  134. sdio_release_host(sdiodev->func[1]);
  135. }
  136. return 0;
  137. }
  138. static int
  139. brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  140. {
  141. int err = 0, i;
  142. u8 addr[3];
  143. s32 retry;
  144. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  145. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  146. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  147. for (i = 0; i < 3; i++) {
  148. retry = 0;
  149. do {
  150. if (retry)
  151. usleep_range(1000, 2000);
  152. err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
  153. SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
  154. &addr[i]);
  155. } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  156. if (err) {
  157. brcmf_err("failed at addr:0x%0x\n",
  158. SBSDIO_FUNC1_SBADDRLOW + i);
  159. break;
  160. }
  161. }
  162. return err;
  163. }
  164. static int
  165. brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  166. {
  167. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  168. int err = 0;
  169. if (bar0 != sdiodev->sbwad) {
  170. err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
  171. if (err)
  172. return err;
  173. sdiodev->sbwad = bar0;
  174. }
  175. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  176. if (width == 4)
  177. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  178. return 0;
  179. }
  180. int
  181. brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  182. void *data, bool write)
  183. {
  184. u8 func_num, reg_size;
  185. s32 retry = 0;
  186. int ret;
  187. /*
  188. * figure out how to read the register based on address range
  189. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  190. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  191. * The rest: function 1 silicon backplane core registers
  192. */
  193. if ((addr & ~REG_F0_REG_MASK) == 0) {
  194. func_num = SDIO_FUNC_0;
  195. reg_size = 1;
  196. } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
  197. func_num = SDIO_FUNC_1;
  198. reg_size = 1;
  199. } else {
  200. func_num = SDIO_FUNC_1;
  201. reg_size = 4;
  202. ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
  203. if (ret)
  204. goto done;
  205. }
  206. do {
  207. if (!write)
  208. memset(data, 0, reg_size);
  209. if (retry) /* wait for 1 ms till bus get settled down */
  210. usleep_range(1000, 2000);
  211. if (reg_size == 1)
  212. ret = brcmf_sdioh_request_byte(sdiodev, write,
  213. func_num, addr, data);
  214. else
  215. ret = brcmf_sdioh_request_word(sdiodev, write,
  216. func_num, addr, data, 4);
  217. } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  218. done:
  219. if (ret != 0)
  220. brcmf_err("failed with %d\n", ret);
  221. return ret;
  222. }
  223. u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  224. {
  225. u8 data;
  226. int retval;
  227. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  228. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
  229. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  230. if (ret)
  231. *ret = retval;
  232. return data;
  233. }
  234. u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  235. {
  236. u32 data;
  237. int retval;
  238. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  239. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
  240. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  241. if (ret)
  242. *ret = retval;
  243. return data;
  244. }
  245. void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  246. u8 data, int *ret)
  247. {
  248. int retval;
  249. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  250. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
  251. if (ret)
  252. *ret = retval;
  253. }
  254. void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  255. u32 data, int *ret)
  256. {
  257. int retval;
  258. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  259. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
  260. if (ret)
  261. *ret = retval;
  262. }
  263. static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  264. bool write, u32 addr, struct sk_buff *pkt)
  265. {
  266. unsigned int req_sz;
  267. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  268. if (brcmf_pm_resume_error(sdiodev))
  269. return -EIO;
  270. /* Single skb use the standard mmc interface */
  271. req_sz = pkt->len + 3;
  272. req_sz &= (uint)~3;
  273. if (write)
  274. return sdio_memcpy_toio(sdiodev->func[fn], addr,
  275. ((u8 *)(pkt->data)),
  276. req_sz);
  277. else if (fn == 1)
  278. return sdio_memcpy_fromio(sdiodev->func[fn],
  279. ((u8 *)(pkt->data)),
  280. addr, req_sz);
  281. else
  282. /* function 2 read is FIFO operation */
  283. return sdio_readsb(sdiodev->func[fn],
  284. ((u8 *)(pkt->data)), addr,
  285. req_sz);
  286. }
  287. /**
  288. * brcmf_sdio_sglist_rw - SDIO interface function for block data access
  289. * @sdiodev: brcmfmac sdio device
  290. * @fn: SDIO function number
  291. * @write: direction flag
  292. * @addr: dongle memory address as source/destination
  293. * @pkt: skb pointer
  294. *
  295. * This function takes the respbonsibility as the interface function to MMC
  296. * stack for block data access. It assumes that the skb passed down by the
  297. * caller has already been padded and aligned.
  298. */
  299. static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  300. bool write, u32 addr,
  301. struct sk_buff_head *pktlist)
  302. {
  303. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  304. unsigned int max_req_sz, orig_offset, dst_offset;
  305. unsigned short max_seg_cnt, seg_sz;
  306. unsigned char *pkt_data, *orig_data, *dst_data;
  307. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  308. struct sk_buff_head local_list, *target_list;
  309. struct mmc_request mmc_req;
  310. struct mmc_command mmc_cmd;
  311. struct mmc_data mmc_dat;
  312. struct sg_table st;
  313. struct scatterlist *sgl;
  314. int ret = 0;
  315. if (!pktlist->qlen)
  316. return -EINVAL;
  317. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  318. if (brcmf_pm_resume_error(sdiodev))
  319. return -EIO;
  320. target_list = pktlist;
  321. /* for host with broken sg support, prepare a page aligned list */
  322. __skb_queue_head_init(&local_list);
  323. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  324. req_sz = 0;
  325. skb_queue_walk(pktlist, pkt_next)
  326. req_sz += pkt_next->len;
  327. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  328. while (req_sz > PAGE_SIZE) {
  329. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  330. if (pkt_next == NULL) {
  331. ret = -ENOMEM;
  332. goto exit;
  333. }
  334. __skb_queue_tail(&local_list, pkt_next);
  335. req_sz -= PAGE_SIZE;
  336. }
  337. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  338. if (pkt_next == NULL) {
  339. ret = -ENOMEM;
  340. goto exit;
  341. }
  342. __skb_queue_tail(&local_list, pkt_next);
  343. target_list = &local_list;
  344. }
  345. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  346. max_req_sz = sdiodev->max_request_size;
  347. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  348. target_list->qlen);
  349. seg_sz = target_list->qlen;
  350. pkt_offset = 0;
  351. pkt_next = target_list->next;
  352. if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
  353. ret = -ENOMEM;
  354. goto exit;
  355. }
  356. memset(&mmc_req, 0, sizeof(struct mmc_request));
  357. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  358. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  359. mmc_dat.sg = st.sgl;
  360. mmc_dat.blksz = func_blk_sz;
  361. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  362. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  363. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  364. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  365. mmc_cmd.arg |= 1<<27; /* block mode */
  366. /* for function 1 the addr will be incremented */
  367. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  368. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  369. mmc_req.cmd = &mmc_cmd;
  370. mmc_req.data = &mmc_dat;
  371. while (seg_sz) {
  372. req_sz = 0;
  373. sg_cnt = 0;
  374. sgl = st.sgl;
  375. /* prep sg table */
  376. while (pkt_next != (struct sk_buff *)target_list) {
  377. pkt_data = pkt_next->data + pkt_offset;
  378. sg_data_sz = pkt_next->len - pkt_offset;
  379. if (sg_data_sz > sdiodev->max_segment_size)
  380. sg_data_sz = sdiodev->max_segment_size;
  381. if (sg_data_sz > max_req_sz - req_sz)
  382. sg_data_sz = max_req_sz - req_sz;
  383. sg_set_buf(sgl, pkt_data, sg_data_sz);
  384. sg_cnt++;
  385. sgl = sg_next(sgl);
  386. req_sz += sg_data_sz;
  387. pkt_offset += sg_data_sz;
  388. if (pkt_offset == pkt_next->len) {
  389. pkt_offset = 0;
  390. pkt_next = pkt_next->next;
  391. }
  392. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  393. break;
  394. }
  395. seg_sz -= sg_cnt;
  396. if (req_sz % func_blk_sz != 0) {
  397. brcmf_err("sg request length %u is not %u aligned\n",
  398. req_sz, func_blk_sz);
  399. ret = -ENOTBLK;
  400. goto exit;
  401. }
  402. mmc_dat.sg_len = sg_cnt;
  403. mmc_dat.blocks = req_sz / func_blk_sz;
  404. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  405. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  406. /* incrementing addr for function 1 */
  407. if (fn == 1)
  408. addr += req_sz;
  409. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  410. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  411. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  412. if (ret != 0) {
  413. brcmf_err("CMD53 sg block %s failed %d\n",
  414. write ? "write" : "read", ret);
  415. ret = -EIO;
  416. break;
  417. }
  418. }
  419. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  420. local_pkt_next = local_list.next;
  421. orig_offset = 0;
  422. skb_queue_walk(pktlist, pkt_next) {
  423. dst_offset = 0;
  424. do {
  425. req_sz = local_pkt_next->len - orig_offset;
  426. req_sz = min_t(uint, pkt_next->len - dst_offset,
  427. req_sz);
  428. orig_data = local_pkt_next->data + orig_offset;
  429. dst_data = pkt_next->data + dst_offset;
  430. memcpy(dst_data, orig_data, req_sz);
  431. orig_offset += req_sz;
  432. dst_offset += req_sz;
  433. if (orig_offset == local_pkt_next->len) {
  434. orig_offset = 0;
  435. local_pkt_next = local_pkt_next->next;
  436. }
  437. if (dst_offset == pkt_next->len)
  438. break;
  439. } while (!skb_queue_empty(&local_list));
  440. }
  441. }
  442. exit:
  443. sg_free_table(&st);
  444. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  445. brcmu_pkt_buf_free_skb(pkt_next);
  446. return ret;
  447. }
  448. int
  449. brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  450. uint flags, u8 *buf, uint nbytes)
  451. {
  452. struct sk_buff *mypkt;
  453. int err;
  454. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  455. if (!mypkt) {
  456. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  457. nbytes);
  458. return -EIO;
  459. }
  460. err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
  461. if (!err)
  462. memcpy(buf, mypkt->data, nbytes);
  463. brcmu_pkt_buf_free_skb(mypkt);
  464. return err;
  465. }
  466. int
  467. brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  468. uint flags, struct sk_buff *pkt)
  469. {
  470. uint width;
  471. int err = 0;
  472. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  473. fn, addr, pkt->len);
  474. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  475. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  476. if (err)
  477. goto done;
  478. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
  479. done:
  480. return err;
  481. }
  482. int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  483. uint flags, struct sk_buff_head *pktq, uint totlen)
  484. {
  485. struct sk_buff *glom_skb;
  486. struct sk_buff *skb;
  487. uint width;
  488. int err = 0;
  489. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  490. fn, addr, pktq->qlen);
  491. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  492. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  493. if (err)
  494. goto done;
  495. if (pktq->qlen == 1)
  496. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
  497. else if (!sdiodev->sg_support) {
  498. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  499. if (!glom_skb)
  500. return -ENOMEM;
  501. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
  502. if (err)
  503. goto done;
  504. skb_queue_walk(pktq, skb) {
  505. memcpy(skb->data, glom_skb->data, skb->len);
  506. skb_pull(glom_skb, skb->len);
  507. }
  508. } else
  509. err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
  510. done:
  511. return err;
  512. }
  513. int
  514. brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  515. uint flags, u8 *buf, uint nbytes)
  516. {
  517. struct sk_buff *mypkt;
  518. uint width;
  519. int err;
  520. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  521. if (!mypkt) {
  522. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  523. nbytes);
  524. return -EIO;
  525. }
  526. memcpy(mypkt->data, buf, nbytes);
  527. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  528. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  529. if (!err)
  530. err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
  531. brcmu_pkt_buf_free_skb(mypkt);
  532. return err;
  533. }
  534. int
  535. brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  536. uint flags, struct sk_buff_head *pktq)
  537. {
  538. struct sk_buff *skb;
  539. uint width;
  540. int err;
  541. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  542. fn, addr, pktq->qlen);
  543. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  544. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  545. if (err)
  546. return err;
  547. if (pktq->qlen == 1 || !sdiodev->sg_support)
  548. skb_queue_walk(pktq, skb) {
  549. err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
  550. if (err)
  551. break;
  552. }
  553. else
  554. err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
  555. return err;
  556. }
  557. int
  558. brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  559. u8 *data, uint size)
  560. {
  561. int bcmerror = 0;
  562. struct sk_buff *pkt;
  563. u32 sdaddr;
  564. uint dsize;
  565. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  566. pkt = dev_alloc_skb(dsize);
  567. if (!pkt) {
  568. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  569. return -EIO;
  570. }
  571. pkt->priority = 0;
  572. /* Determine initial transfer parameters */
  573. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  574. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  575. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  576. else
  577. dsize = size;
  578. sdio_claim_host(sdiodev->func[1]);
  579. /* Do the transfer(s) */
  580. while (size) {
  581. /* Set the backplane window to include the start address */
  582. bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
  583. if (bcmerror)
  584. break;
  585. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  586. write ? "write" : "read", dsize,
  587. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  588. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  589. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  590. skb_put(pkt, dsize);
  591. if (write)
  592. memcpy(pkt->data, data, dsize);
  593. bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
  594. sdaddr, pkt);
  595. if (bcmerror) {
  596. brcmf_err("membytes transfer failed\n");
  597. break;
  598. }
  599. if (!write)
  600. memcpy(data, pkt->data, dsize);
  601. skb_trim(pkt, dsize);
  602. /* Adjust for next transfer (if any) */
  603. size -= dsize;
  604. if (size) {
  605. data += dsize;
  606. address += dsize;
  607. sdaddr = 0;
  608. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  609. }
  610. }
  611. dev_kfree_skb(pkt);
  612. /* Return the window to backplane enumeration space for core access */
  613. if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  614. brcmf_err("FAILED to set window back to 0x%x\n",
  615. sdiodev->sbwad);
  616. sdio_release_host(sdiodev->func[1]);
  617. return bcmerror;
  618. }
  619. int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  620. {
  621. char t_func = (char)fn;
  622. brcmf_dbg(SDIO, "Enter\n");
  623. /* issue abort cmd52 command through F0 */
  624. brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
  625. SDIO_CCCR_ABORT, &t_func);
  626. brcmf_dbg(SDIO, "Exit\n");
  627. return 0;
  628. }
  629. int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
  630. {
  631. u32 regs = 0;
  632. int ret = 0;
  633. ret = brcmf_sdioh_attach(sdiodev);
  634. if (ret)
  635. goto out;
  636. regs = SI_ENUM_BASE;
  637. /* try to attach to the target device */
  638. sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
  639. if (!sdiodev->bus) {
  640. brcmf_err("device attach failed\n");
  641. ret = -ENODEV;
  642. goto out;
  643. }
  644. out:
  645. if (ret)
  646. brcmf_sdio_remove(sdiodev);
  647. return ret;
  648. }
  649. EXPORT_SYMBOL(brcmf_sdio_probe);
  650. int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
  651. {
  652. sdiodev->bus_if->state = BRCMF_BUS_DOWN;
  653. if (sdiodev->bus) {
  654. brcmf_sdbrcm_disconnect(sdiodev->bus);
  655. sdiodev->bus = NULL;
  656. }
  657. brcmf_sdioh_detach(sdiodev);
  658. sdiodev->sbwad = 0;
  659. return 0;
  660. }
  661. EXPORT_SYMBOL(brcmf_sdio_remove);
  662. void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
  663. {
  664. if (enable)
  665. brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
  666. else
  667. brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
  668. }