bmi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (c) 2004-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #include "hif-ops.h"
  18. #include "target.h"
  19. #include "debug.h"
  20. static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
  21. {
  22. u32 addr;
  23. unsigned long timeout;
  24. int ret;
  25. ar->bmi.cmd_credits = 0;
  26. /* Read the counter register to get the command credits */
  27. addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
  28. timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
  29. while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
  30. /*
  31. * Hit the credit counter with a 4-byte access, the first byte
  32. * read will hit the counter and cause a decrement, while the
  33. * remaining 3 bytes has no effect. The rationale behind this
  34. * is to make all HIF accesses 4-byte aligned.
  35. */
  36. ret = hif_read_write_sync(ar, addr,
  37. (u8 *)&ar->bmi.cmd_credits, 4,
  38. HIF_RD_SYNC_BYTE_INC);
  39. if (ret) {
  40. ath6kl_err("Unable to decrement the command credit count register: %d\n",
  41. ret);
  42. return ret;
  43. }
  44. /* The counter is only 8 bits.
  45. * Ignore anything in the upper 3 bytes
  46. */
  47. ar->bmi.cmd_credits &= 0xFF;
  48. }
  49. if (!ar->bmi.cmd_credits) {
  50. ath6kl_err("bmi communication timeout\n");
  51. return -ETIMEDOUT;
  52. }
  53. return 0;
  54. }
  55. static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar, bool need_timeout)
  56. {
  57. unsigned long timeout;
  58. u32 rx_word = 0;
  59. int ret = 0;
  60. timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
  61. while ((!need_timeout || time_before(jiffies, timeout)) && !rx_word) {
  62. ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
  63. (u8 *)&rx_word, sizeof(rx_word),
  64. HIF_RD_SYNC_BYTE_INC);
  65. if (ret) {
  66. ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
  67. return ret;
  68. }
  69. /* all we really want is one bit */
  70. rx_word &= (1 << ENDPOINT1);
  71. }
  72. if (!rx_word) {
  73. ath6kl_err("bmi_recv_buf FIFO empty\n");
  74. return -EINVAL;
  75. }
  76. return ret;
  77. }
  78. static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
  79. {
  80. int ret;
  81. u32 addr;
  82. ret = ath6kl_get_bmi_cmd_credits(ar);
  83. if (ret)
  84. return ret;
  85. addr = ar->mbox_info.htc_addr;
  86. ret = hif_read_write_sync(ar, addr, buf, len,
  87. HIF_WR_SYNC_BYTE_INC);
  88. if (ret)
  89. ath6kl_err("unable to send the bmi data to the device\n");
  90. return ret;
  91. }
  92. static int ath6kl_bmi_recv_buf(struct ath6kl *ar,
  93. u8 *buf, u32 len, bool want_timeout)
  94. {
  95. int ret;
  96. u32 addr;
  97. /*
  98. * During normal bootup, small reads may be required.
  99. * Rather than issue an HIF Read and then wait as the Target
  100. * adds successive bytes to the FIFO, we wait here until
  101. * we know that response data is available.
  102. *
  103. * This allows us to cleanly timeout on an unexpected
  104. * Target failure rather than risk problems at the HIF level.
  105. * In particular, this avoids SDIO timeouts and possibly garbage
  106. * data on some host controllers. And on an interconnect
  107. * such as Compact Flash (as well as some SDIO masters) which
  108. * does not provide any indication on data timeout, it avoids
  109. * a potential hang or garbage response.
  110. *
  111. * Synchronization is more difficult for reads larger than the
  112. * size of the MBOX FIFO (128B), because the Target is unable
  113. * to push the 129th byte of data until AFTER the Host posts an
  114. * HIF Read and removes some FIFO data. So for large reads the
  115. * Host proceeds to post an HIF Read BEFORE all the data is
  116. * actually available to read. Fortunately, large BMI reads do
  117. * not occur in practice -- they're supported for debug/development.
  118. *
  119. * So Host/Target BMI synchronization is divided into these cases:
  120. * CASE 1: length < 4
  121. * Should not happen
  122. *
  123. * CASE 2: 4 <= length <= 128
  124. * Wait for first 4 bytes to be in FIFO
  125. * If CONSERVATIVE_BMI_READ is enabled, also wait for
  126. * a BMI command credit, which indicates that the ENTIRE
  127. * response is available in the the FIFO
  128. *
  129. * CASE 3: length > 128
  130. * Wait for the first 4 bytes to be in FIFO
  131. *
  132. * For most uses, a small timeout should be sufficient and we will
  133. * usually see a response quickly; but there may be some unusual
  134. * (debug) cases of BMI_EXECUTE where we want an larger timeout.
  135. * For now, we use an unbounded busy loop while waiting for
  136. * BMI_EXECUTE.
  137. *
  138. * If BMI_EXECUTE ever needs to support longer-latency execution,
  139. * especially in production, this code needs to be enhanced to sleep
  140. * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
  141. * a function of Host processor speed.
  142. */
  143. if (len >= 4) { /* NB: Currently, always true */
  144. ret = ath6kl_bmi_get_rx_lkahd(ar, want_timeout);
  145. if (ret)
  146. return ret;
  147. }
  148. addr = ar->mbox_info.htc_addr;
  149. ret = hif_read_write_sync(ar, addr, buf, len,
  150. HIF_RD_SYNC_BYTE_INC);
  151. if (ret) {
  152. ath6kl_err("Unable to read the bmi data from the device: %d\n",
  153. ret);
  154. return ret;
  155. }
  156. return 0;
  157. }
  158. int ath6kl_bmi_done(struct ath6kl *ar)
  159. {
  160. int ret;
  161. u32 cid = BMI_DONE;
  162. if (ar->bmi.done_sent) {
  163. ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
  164. return 0;
  165. }
  166. ar->bmi.done_sent = true;
  167. ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
  168. if (ret) {
  169. ath6kl_err("Unable to send bmi done: %d\n", ret);
  170. return ret;
  171. }
  172. ath6kl_bmi_cleanup(ar);
  173. return 0;
  174. }
  175. int ath6kl_bmi_get_target_info(struct ath6kl *ar,
  176. struct ath6kl_bmi_target_info *targ_info)
  177. {
  178. int ret;
  179. u32 cid = BMI_GET_TARGET_INFO;
  180. if (ar->bmi.done_sent) {
  181. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  182. return -EACCES;
  183. }
  184. ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
  185. if (ret) {
  186. ath6kl_err("Unable to send get target info: %d\n", ret);
  187. return ret;
  188. }
  189. ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
  190. sizeof(targ_info->version), true);
  191. if (ret) {
  192. ath6kl_err("Unable to recv target info: %d\n", ret);
  193. return ret;
  194. }
  195. if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
  196. /* Determine how many bytes are in the Target's targ_info */
  197. ret = ath6kl_bmi_recv_buf(ar,
  198. (u8 *)&targ_info->byte_count,
  199. sizeof(targ_info->byte_count),
  200. true);
  201. if (ret) {
  202. ath6kl_err("unable to read target info byte count: %d\n",
  203. ret);
  204. return ret;
  205. }
  206. /*
  207. * The target's targ_info doesn't match the host's targ_info.
  208. * We need to do some backwards compatibility to make this work.
  209. */
  210. if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
  211. WARN_ON(1);
  212. return -EINVAL;
  213. }
  214. /* Read the remainder of the targ_info */
  215. ret = ath6kl_bmi_recv_buf(ar,
  216. ((u8 *)targ_info) +
  217. sizeof(targ_info->byte_count),
  218. sizeof(*targ_info) -
  219. sizeof(targ_info->byte_count),
  220. true);
  221. if (ret) {
  222. ath6kl_err("Unable to read target info (%d bytes): %d\n",
  223. targ_info->byte_count, ret);
  224. return ret;
  225. }
  226. }
  227. ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
  228. targ_info->version, targ_info->type);
  229. return 0;
  230. }
  231. int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
  232. {
  233. u32 cid = BMI_READ_MEMORY;
  234. int ret;
  235. u32 offset;
  236. u32 len_remain, rx_len;
  237. u16 size;
  238. if (ar->bmi.done_sent) {
  239. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  240. return -EACCES;
  241. }
  242. size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
  243. if (size > MAX_BMI_CMDBUF_SZ) {
  244. WARN_ON(1);
  245. return -EINVAL;
  246. }
  247. memset(ar->bmi.cmd_buf, 0, size);
  248. ath6kl_dbg(ATH6KL_DBG_BMI,
  249. "bmi read memory: device: addr: 0x%x, len: %d\n",
  250. addr, len);
  251. len_remain = len;
  252. while (len_remain) {
  253. rx_len = (len_remain < BMI_DATASZ_MAX) ?
  254. len_remain : BMI_DATASZ_MAX;
  255. offset = 0;
  256. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  257. offset += sizeof(cid);
  258. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  259. offset += sizeof(addr);
  260. memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
  261. offset += sizeof(len);
  262. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  263. if (ret) {
  264. ath6kl_err("Unable to write to the device: %d\n",
  265. ret);
  266. return ret;
  267. }
  268. ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len, true);
  269. if (ret) {
  270. ath6kl_err("Unable to read from the device: %d\n",
  271. ret);
  272. return ret;
  273. }
  274. memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
  275. len_remain -= rx_len; addr += rx_len;
  276. }
  277. return 0;
  278. }
  279. int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
  280. {
  281. u32 cid = BMI_WRITE_MEMORY;
  282. int ret;
  283. u32 offset;
  284. u32 len_remain, tx_len;
  285. const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
  286. u8 aligned_buf[BMI_DATASZ_MAX];
  287. u8 *src;
  288. if (ar->bmi.done_sent) {
  289. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  290. return -EACCES;
  291. }
  292. if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
  293. WARN_ON(1);
  294. return -EINVAL;
  295. }
  296. memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
  297. ath6kl_dbg(ATH6KL_DBG_BMI,
  298. "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
  299. len_remain = len;
  300. while (len_remain) {
  301. src = &buf[len - len_remain];
  302. if (len_remain < (BMI_DATASZ_MAX - header)) {
  303. if (len_remain & 3) {
  304. /* align it with 4 bytes */
  305. len_remain = len_remain +
  306. (4 - (len_remain & 3));
  307. memcpy(aligned_buf, src, len_remain);
  308. src = aligned_buf;
  309. }
  310. tx_len = len_remain;
  311. } else {
  312. tx_len = (BMI_DATASZ_MAX - header);
  313. }
  314. offset = 0;
  315. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  316. offset += sizeof(cid);
  317. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  318. offset += sizeof(addr);
  319. memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
  320. offset += sizeof(tx_len);
  321. memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
  322. offset += tx_len;
  323. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  324. if (ret) {
  325. ath6kl_err("Unable to write to the device: %d\n",
  326. ret);
  327. return ret;
  328. }
  329. len_remain -= tx_len; addr += tx_len;
  330. }
  331. return 0;
  332. }
  333. int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
  334. {
  335. u32 cid = BMI_EXECUTE;
  336. int ret;
  337. u32 offset;
  338. u16 size;
  339. if (ar->bmi.done_sent) {
  340. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  341. return -EACCES;
  342. }
  343. size = sizeof(cid) + sizeof(addr) + sizeof(param);
  344. if (size > MAX_BMI_CMDBUF_SZ) {
  345. WARN_ON(1);
  346. return -EINVAL;
  347. }
  348. memset(ar->bmi.cmd_buf, 0, size);
  349. ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
  350. addr, *param);
  351. offset = 0;
  352. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  353. offset += sizeof(cid);
  354. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  355. offset += sizeof(addr);
  356. memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
  357. offset += sizeof(*param);
  358. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  359. if (ret) {
  360. ath6kl_err("Unable to write to the device: %d\n", ret);
  361. return ret;
  362. }
  363. ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), false);
  364. if (ret) {
  365. ath6kl_err("Unable to read from the device: %d\n", ret);
  366. return ret;
  367. }
  368. memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
  369. return 0;
  370. }
  371. int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
  372. {
  373. u32 cid = BMI_SET_APP_START;
  374. int ret;
  375. u32 offset;
  376. u16 size;
  377. if (ar->bmi.done_sent) {
  378. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  379. return -EACCES;
  380. }
  381. size = sizeof(cid) + sizeof(addr);
  382. if (size > MAX_BMI_CMDBUF_SZ) {
  383. WARN_ON(1);
  384. return -EINVAL;
  385. }
  386. memset(ar->bmi.cmd_buf, 0, size);
  387. ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
  388. offset = 0;
  389. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  390. offset += sizeof(cid);
  391. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  392. offset += sizeof(addr);
  393. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  394. if (ret) {
  395. ath6kl_err("Unable to write to the device: %d\n", ret);
  396. return ret;
  397. }
  398. return 0;
  399. }
  400. int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
  401. {
  402. u32 cid = BMI_READ_SOC_REGISTER;
  403. int ret;
  404. u32 offset;
  405. u16 size;
  406. if (ar->bmi.done_sent) {
  407. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  408. return -EACCES;
  409. }
  410. size = sizeof(cid) + sizeof(addr);
  411. if (size > MAX_BMI_CMDBUF_SZ) {
  412. WARN_ON(1);
  413. return -EINVAL;
  414. }
  415. memset(ar->bmi.cmd_buf, 0, size);
  416. ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
  417. offset = 0;
  418. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  419. offset += sizeof(cid);
  420. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  421. offset += sizeof(addr);
  422. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  423. if (ret) {
  424. ath6kl_err("Unable to write to the device: %d\n", ret);
  425. return ret;
  426. }
  427. ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), true);
  428. if (ret) {
  429. ath6kl_err("Unable to read from the device: %d\n", ret);
  430. return ret;
  431. }
  432. memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
  433. return 0;
  434. }
  435. int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
  436. {
  437. u32 cid = BMI_WRITE_SOC_REGISTER;
  438. int ret;
  439. u32 offset;
  440. u16 size;
  441. if (ar->bmi.done_sent) {
  442. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  443. return -EACCES;
  444. }
  445. size = sizeof(cid) + sizeof(addr) + sizeof(param);
  446. if (size > MAX_BMI_CMDBUF_SZ) {
  447. WARN_ON(1);
  448. return -EINVAL;
  449. }
  450. memset(ar->bmi.cmd_buf, 0, size);
  451. ath6kl_dbg(ATH6KL_DBG_BMI,
  452. "bmi write SOC reg: addr: 0x%x, param: %d\n",
  453. addr, param);
  454. offset = 0;
  455. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  456. offset += sizeof(cid);
  457. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  458. offset += sizeof(addr);
  459. memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
  460. offset += sizeof(param);
  461. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  462. if (ret) {
  463. ath6kl_err("Unable to write to the device: %d\n", ret);
  464. return ret;
  465. }
  466. return 0;
  467. }
  468. int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
  469. {
  470. u32 cid = BMI_LZ_DATA;
  471. int ret;
  472. u32 offset;
  473. u32 len_remain, tx_len;
  474. const u32 header = sizeof(cid) + sizeof(len);
  475. u16 size;
  476. if (ar->bmi.done_sent) {
  477. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  478. return -EACCES;
  479. }
  480. size = BMI_DATASZ_MAX + header;
  481. if (size > MAX_BMI_CMDBUF_SZ) {
  482. WARN_ON(1);
  483. return -EINVAL;
  484. }
  485. memset(ar->bmi.cmd_buf, 0, size);
  486. ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
  487. len);
  488. len_remain = len;
  489. while (len_remain) {
  490. tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
  491. len_remain : (BMI_DATASZ_MAX - header);
  492. offset = 0;
  493. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  494. offset += sizeof(cid);
  495. memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
  496. offset += sizeof(tx_len);
  497. memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
  498. tx_len);
  499. offset += tx_len;
  500. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  501. if (ret) {
  502. ath6kl_err("Unable to write to the device: %d\n",
  503. ret);
  504. return ret;
  505. }
  506. len_remain -= tx_len;
  507. }
  508. return 0;
  509. }
  510. int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
  511. {
  512. u32 cid = BMI_LZ_STREAM_START;
  513. int ret;
  514. u32 offset;
  515. u16 size;
  516. if (ar->bmi.done_sent) {
  517. ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
  518. return -EACCES;
  519. }
  520. size = sizeof(cid) + sizeof(addr);
  521. if (size > MAX_BMI_CMDBUF_SZ) {
  522. WARN_ON(1);
  523. return -EINVAL;
  524. }
  525. memset(ar->bmi.cmd_buf, 0, size);
  526. ath6kl_dbg(ATH6KL_DBG_BMI,
  527. "bmi LZ stream start: addr: 0x%x)\n",
  528. addr);
  529. offset = 0;
  530. memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
  531. offset += sizeof(cid);
  532. memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
  533. offset += sizeof(addr);
  534. ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
  535. if (ret) {
  536. ath6kl_err("Unable to start LZ stream to the device: %d\n",
  537. ret);
  538. return ret;
  539. }
  540. return 0;
  541. }
  542. int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
  543. {
  544. int ret;
  545. u32 last_word = 0;
  546. u32 last_word_offset = len & ~0x3;
  547. u32 unaligned_bytes = len & 0x3;
  548. ret = ath6kl_bmi_lz_stream_start(ar, addr);
  549. if (ret)
  550. return ret;
  551. if (unaligned_bytes) {
  552. /* copy the last word into a zero padded buffer */
  553. memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
  554. }
  555. ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
  556. if (ret)
  557. return ret;
  558. if (unaligned_bytes)
  559. ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
  560. if (!ret) {
  561. /* Close compressed stream and open a new (fake) one.
  562. * This serves mainly to flush Target caches. */
  563. ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
  564. }
  565. return ret;
  566. }
  567. int ath6kl_bmi_init(struct ath6kl *ar)
  568. {
  569. ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
  570. if (!ar->bmi.cmd_buf)
  571. return -ENOMEM;
  572. return 0;
  573. }
  574. void ath6kl_bmi_cleanup(struct ath6kl *ar)
  575. {
  576. kfree(ar->bmi.cmd_buf);
  577. ar->bmi.cmd_buf = NULL;
  578. }