iwl-test.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/export.h>
  64. #include <net/netlink.h>
  65. #include "iwl-io.h"
  66. #include "iwl-fh.h"
  67. #include "iwl-prph.h"
  68. #include "iwl-trans.h"
  69. #include "iwl-test.h"
  70. #include "iwl-csr.h"
  71. #include "iwl-testmode.h"
  72. /*
  73. * Periphery registers absolute lower bound. This is used in order to
  74. * differentiate registery access through HBUS_TARG_PRPH_* and
  75. * HBUS_TARG_MEM_* accesses.
  76. */
  77. #define IWL_ABS_PRPH_START (0xA00000)
  78. /*
  79. * The TLVs used in the gnl message policy between the kernel module and
  80. * user space application. iwl_testmode_gnl_msg_policy is to be carried
  81. * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
  82. * See iwl-testmode.h
  83. */
  84. static
  85. struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
  86. [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
  87. [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
  88. [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
  89. [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
  90. [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
  91. [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
  92. [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
  93. [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
  94. [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
  95. [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
  96. [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
  97. [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
  98. [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
  99. [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
  100. [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
  101. [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
  102. [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
  103. [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
  104. [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
  105. [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
  106. [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
  107. [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
  108. [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
  109. };
  110. static inline void iwl_test_trace_clear(struct iwl_test *tst)
  111. {
  112. memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
  113. }
  114. static void iwl_test_trace_stop(struct iwl_test *tst)
  115. {
  116. if (!tst->trace.enabled)
  117. return;
  118. if (tst->trace.cpu_addr && tst->trace.dma_addr)
  119. dma_free_coherent(tst->trans->dev,
  120. tst->trace.tsize,
  121. tst->trace.cpu_addr,
  122. tst->trace.dma_addr);
  123. iwl_test_trace_clear(tst);
  124. }
  125. static inline void iwl_test_mem_clear(struct iwl_test *tst)
  126. {
  127. memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
  128. }
  129. static inline void iwl_test_mem_stop(struct iwl_test *tst)
  130. {
  131. if (!tst->mem.in_read)
  132. return;
  133. iwl_test_mem_clear(tst);
  134. }
  135. /*
  136. * Initializes the test object
  137. * During the lifetime of the test object it is assumed that the transport is
  138. * started. The test object should be stopped before the transport is stopped.
  139. */
  140. void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
  141. struct iwl_test_ops *ops)
  142. {
  143. tst->trans = trans;
  144. tst->ops = ops;
  145. iwl_test_trace_clear(tst);
  146. iwl_test_mem_clear(tst);
  147. }
  148. EXPORT_SYMBOL_GPL(iwl_test_init);
  149. /*
  150. * Stop the test object
  151. */
  152. void iwl_test_free(struct iwl_test *tst)
  153. {
  154. iwl_test_mem_stop(tst);
  155. iwl_test_trace_stop(tst);
  156. }
  157. EXPORT_SYMBOL_GPL(iwl_test_free);
  158. static inline int iwl_test_send_cmd(struct iwl_test *tst,
  159. struct iwl_host_cmd *cmd)
  160. {
  161. return tst->ops->send_cmd(tst->trans->op_mode, cmd);
  162. }
  163. static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
  164. {
  165. return tst->ops->valid_hw_addr(addr);
  166. }
  167. static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
  168. {
  169. return tst->ops->get_fw_ver(tst->trans->op_mode);
  170. }
  171. static inline struct sk_buff*
  172. iwl_test_alloc_reply(struct iwl_test *tst, int len)
  173. {
  174. return tst->ops->alloc_reply(tst->trans->op_mode, len);
  175. }
  176. static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
  177. {
  178. return tst->ops->reply(tst->trans->op_mode, skb);
  179. }
  180. static inline struct sk_buff*
  181. iwl_test_alloc_event(struct iwl_test *tst, int len)
  182. {
  183. return tst->ops->alloc_event(tst->trans->op_mode, len);
  184. }
  185. static inline void
  186. iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
  187. {
  188. return tst->ops->event(tst->trans->op_mode, skb);
  189. }
  190. /*
  191. * This function handles the user application commands to the fw. The fw
  192. * commands are sent in a synchronuous manner. In case that the user requested
  193. * to get commands response, it is send to the user.
  194. */
  195. static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
  196. {
  197. struct iwl_host_cmd cmd;
  198. struct iwl_rx_packet *pkt;
  199. struct sk_buff *skb;
  200. void *reply_buf;
  201. u32 reply_len;
  202. int ret;
  203. bool cmd_want_skb;
  204. memset(&cmd, 0, sizeof(struct iwl_host_cmd));
  205. if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
  206. !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
  207. IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
  208. return -ENOMSG;
  209. }
  210. cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
  211. cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
  212. if (cmd_want_skb)
  213. cmd.flags |= CMD_WANT_SKB;
  214. cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
  215. cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
  216. cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
  217. cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
  218. IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
  219. cmd.id, cmd.flags, cmd.len[0]);
  220. ret = iwl_test_send_cmd(tst, &cmd);
  221. if (ret) {
  222. IWL_ERR(tst->trans, "Failed to send hcmd\n");
  223. return ret;
  224. }
  225. if (!cmd_want_skb)
  226. return ret;
  227. /* Handling return of SKB to the user */
  228. pkt = cmd.resp_pkt;
  229. if (!pkt) {
  230. IWL_ERR(tst->trans, "HCMD received a null response packet\n");
  231. return ret;
  232. }
  233. reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  234. skb = iwl_test_alloc_reply(tst, reply_len + 20);
  235. reply_buf = kmalloc(reply_len, GFP_KERNEL);
  236. if (!skb || !reply_buf) {
  237. kfree_skb(skb);
  238. kfree(reply_buf);
  239. return -ENOMEM;
  240. }
  241. /* The reply is in a page, that we cannot send to user space. */
  242. memcpy(reply_buf, &(pkt->hdr), reply_len);
  243. iwl_free_resp(&cmd);
  244. if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
  245. IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
  246. nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
  247. goto nla_put_failure;
  248. return iwl_test_reply(tst, skb);
  249. nla_put_failure:
  250. IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
  251. kfree(reply_buf);
  252. kfree_skb(skb);
  253. return -ENOMSG;
  254. }
  255. /*
  256. * Handles the user application commands for register access.
  257. */
  258. static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
  259. {
  260. u32 ofs, val32, cmd;
  261. u8 val8;
  262. struct sk_buff *skb;
  263. int status = 0;
  264. struct iwl_trans *trans = tst->trans;
  265. if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
  266. IWL_ERR(trans, "Missing reg offset\n");
  267. return -ENOMSG;
  268. }
  269. ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
  270. IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
  271. cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
  272. /*
  273. * Allow access only to FH/CSR/HBUS in direct mode.
  274. * Since we don't have the upper bounds for the CSR and HBUS segments,
  275. * we will use only the upper bound of FH for sanity check.
  276. */
  277. if (ofs >= FH_MEM_UPPER_BOUND) {
  278. IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
  279. FH_MEM_UPPER_BOUND);
  280. return -EINVAL;
  281. }
  282. switch (cmd) {
  283. case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
  284. val32 = iwl_read_direct32(tst->trans, ofs);
  285. IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
  286. skb = iwl_test_alloc_reply(tst, 20);
  287. if (!skb) {
  288. IWL_ERR(trans, "Memory allocation fail\n");
  289. return -ENOMEM;
  290. }
  291. if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
  292. goto nla_put_failure;
  293. status = iwl_test_reply(tst, skb);
  294. if (status < 0)
  295. IWL_ERR(trans, "Error sending msg : %d\n", status);
  296. break;
  297. case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
  298. if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
  299. IWL_ERR(trans, "Missing value to write\n");
  300. return -ENOMSG;
  301. } else {
  302. val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
  303. IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
  304. iwl_write_direct32(tst->trans, ofs, val32);
  305. }
  306. break;
  307. case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
  308. if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
  309. IWL_ERR(trans, "Missing value to write\n");
  310. return -ENOMSG;
  311. } else {
  312. val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
  313. IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
  314. iwl_write8(tst->trans, ofs, val8);
  315. }
  316. break;
  317. default:
  318. IWL_ERR(trans, "Unknown test register cmd ID\n");
  319. return -ENOMSG;
  320. }
  321. return status;
  322. nla_put_failure:
  323. kfree_skb(skb);
  324. return -EMSGSIZE;
  325. }
  326. /*
  327. * Handles the request to start FW tracing. Allocates of the trace buffer
  328. * and sends a reply to user space with the address of the allocated buffer.
  329. */
  330. static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
  331. {
  332. struct sk_buff *skb;
  333. int status = 0;
  334. if (tst->trace.enabled)
  335. return -EBUSY;
  336. if (!tb[IWL_TM_ATTR_TRACE_SIZE])
  337. tst->trace.size = TRACE_BUFF_SIZE_DEF;
  338. else
  339. tst->trace.size =
  340. nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
  341. if (!tst->trace.size)
  342. return -EINVAL;
  343. if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
  344. tst->trace.size > TRACE_BUFF_SIZE_MAX)
  345. return -EINVAL;
  346. tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
  347. tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
  348. tst->trace.tsize,
  349. &tst->trace.dma_addr,
  350. GFP_KERNEL);
  351. if (!tst->trace.cpu_addr)
  352. return -ENOMEM;
  353. tst->trace.enabled = true;
  354. tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
  355. memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
  356. skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
  357. if (!skb) {
  358. IWL_ERR(tst->trans, "Memory allocation fail\n");
  359. iwl_test_trace_stop(tst);
  360. return -ENOMEM;
  361. }
  362. if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
  363. sizeof(tst->trace.dma_addr),
  364. (u64 *)&tst->trace.dma_addr))
  365. goto nla_put_failure;
  366. status = iwl_test_reply(tst, skb);
  367. if (status < 0)
  368. IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
  369. tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
  370. DUMP_CHUNK_SIZE);
  371. return status;
  372. nla_put_failure:
  373. kfree_skb(skb);
  374. if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
  375. IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
  376. iwl_test_trace_stop(tst);
  377. return -EMSGSIZE;
  378. }
  379. /*
  380. * Handles indirect read from the periphery or the SRAM. The read is performed
  381. * to a temporary buffer. The user space application should later issue a dump
  382. */
  383. static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
  384. {
  385. struct iwl_trans *trans = tst->trans;
  386. unsigned long flags;
  387. int i;
  388. if (size & 0x3)
  389. return -EINVAL;
  390. tst->mem.size = size;
  391. tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
  392. if (tst->mem.addr == NULL)
  393. return -ENOMEM;
  394. /* Hard-coded periphery absolute address */
  395. if (IWL_ABS_PRPH_START <= addr &&
  396. addr < IWL_ABS_PRPH_START + PRPH_END) {
  397. spin_lock_irqsave(&trans->reg_lock, flags);
  398. iwl_grab_nic_access(trans);
  399. iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
  400. addr | (3 << 24));
  401. for (i = 0; i < size; i += 4)
  402. *(u32 *)(tst->mem.addr + i) =
  403. iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
  404. iwl_release_nic_access(trans);
  405. spin_unlock_irqrestore(&trans->reg_lock, flags);
  406. } else { /* target memory (SRAM) */
  407. _iwl_read_targ_mem_dwords(trans, addr,
  408. tst->mem.addr,
  409. tst->mem.size / 4);
  410. }
  411. tst->mem.nchunks =
  412. DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
  413. tst->mem.in_read = true;
  414. return 0;
  415. }
  416. /*
  417. * Handles indirect write to the periphery or SRAM. The is performed to a
  418. * temporary buffer.
  419. */
  420. static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
  421. u32 size, unsigned char *buf)
  422. {
  423. struct iwl_trans *trans = tst->trans;
  424. u32 val, i;
  425. unsigned long flags;
  426. if (IWL_ABS_PRPH_START <= addr &&
  427. addr < IWL_ABS_PRPH_START + PRPH_END) {
  428. /* Periphery writes can be 1-3 bytes long, or DWORDs */
  429. if (size < 4) {
  430. memcpy(&val, buf, size);
  431. spin_lock_irqsave(&trans->reg_lock, flags);
  432. iwl_grab_nic_access(trans);
  433. iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
  434. (addr & 0x0000FFFF) |
  435. ((size - 1) << 24));
  436. iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
  437. iwl_release_nic_access(trans);
  438. /* needed after consecutive writes w/o read */
  439. mmiowb();
  440. spin_unlock_irqrestore(&trans->reg_lock, flags);
  441. } else {
  442. if (size % 4)
  443. return -EINVAL;
  444. for (i = 0; i < size; i += 4)
  445. iwl_write_prph(trans, addr+i,
  446. *(u32 *)(buf+i));
  447. }
  448. } else if (iwl_test_valid_hw_addr(tst, addr)) {
  449. _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
  450. } else {
  451. return -EINVAL;
  452. }
  453. return 0;
  454. }
  455. /*
  456. * Handles the user application commands for indirect read/write
  457. * to/from the periphery or the SRAM.
  458. */
  459. static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
  460. {
  461. u32 addr, size, cmd;
  462. unsigned char *buf;
  463. /* Both read and write should be blocked, for atomicity */
  464. if (tst->mem.in_read)
  465. return -EBUSY;
  466. cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
  467. if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
  468. IWL_ERR(tst->trans, "Error finding memory offset address\n");
  469. return -ENOMSG;
  470. }
  471. addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
  472. if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
  473. IWL_ERR(tst->trans, "Error finding size for memory reading\n");
  474. return -ENOMSG;
  475. }
  476. size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
  477. if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
  478. return iwl_test_indirect_read(tst, addr, size);
  479. } else {
  480. if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
  481. return -EINVAL;
  482. buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
  483. return iwl_test_indirect_write(tst, addr, size, buf);
  484. }
  485. }
  486. /*
  487. * Enable notifications to user space
  488. */
  489. static int iwl_test_notifications(struct iwl_test *tst,
  490. struct nlattr **tb)
  491. {
  492. tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
  493. return 0;
  494. }
  495. /*
  496. * Handles the request to get the device id
  497. */
  498. static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
  499. {
  500. u32 devid = tst->trans->hw_id;
  501. struct sk_buff *skb;
  502. int status;
  503. IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
  504. skb = iwl_test_alloc_reply(tst, 20);
  505. if (!skb) {
  506. IWL_ERR(tst->trans, "Memory allocation fail\n");
  507. return -ENOMEM;
  508. }
  509. if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
  510. goto nla_put_failure;
  511. status = iwl_test_reply(tst, skb);
  512. if (status < 0)
  513. IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
  514. return 0;
  515. nla_put_failure:
  516. kfree_skb(skb);
  517. return -EMSGSIZE;
  518. }
  519. /*
  520. * Handles the request to get the FW version
  521. */
  522. static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
  523. {
  524. struct sk_buff *skb;
  525. int status;
  526. u32 ver = iwl_test_fw_ver(tst);
  527. IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
  528. skb = iwl_test_alloc_reply(tst, 20);
  529. if (!skb) {
  530. IWL_ERR(tst->trans, "Memory allocation fail\n");
  531. return -ENOMEM;
  532. }
  533. if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
  534. goto nla_put_failure;
  535. status = iwl_test_reply(tst, skb);
  536. if (status < 0)
  537. IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
  538. return 0;
  539. nla_put_failure:
  540. kfree_skb(skb);
  541. return -EMSGSIZE;
  542. }
  543. /*
  544. * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
  545. */
  546. int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
  547. void *data, int len)
  548. {
  549. int result;
  550. result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
  551. iwl_testmode_gnl_msg_policy);
  552. if (result) {
  553. IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
  554. return result;
  555. }
  556. /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
  557. if (!tb[IWL_TM_ATTR_COMMAND]) {
  558. IWL_ERR(tst->trans, "Missing testmode command type\n");
  559. return -ENOMSG;
  560. }
  561. return 0;
  562. }
  563. EXPORT_SYMBOL_GPL(iwl_test_parse);
  564. /*
  565. * Handle test commands.
  566. * Returns 1 for unknown commands (not handled by the test object); negative
  567. * value in case of error.
  568. */
  569. int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
  570. {
  571. int result;
  572. switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
  573. case IWL_TM_CMD_APP2DEV_UCODE:
  574. IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
  575. result = iwl_test_fw_cmd(tst, tb);
  576. break;
  577. case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
  578. case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
  579. case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
  580. IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
  581. result = iwl_test_reg(tst, tb);
  582. break;
  583. case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
  584. IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
  585. result = iwl_test_trace_begin(tst, tb);
  586. break;
  587. case IWL_TM_CMD_APP2DEV_END_TRACE:
  588. iwl_test_trace_stop(tst);
  589. result = 0;
  590. break;
  591. case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
  592. case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
  593. IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
  594. result = iwl_test_indirect_mem(tst, tb);
  595. break;
  596. case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
  597. IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
  598. result = iwl_test_notifications(tst, tb);
  599. break;
  600. case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
  601. IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
  602. result = iwl_test_get_fw_ver(tst, tb);
  603. break;
  604. case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
  605. IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
  606. result = iwl_test_get_dev_id(tst, tb);
  607. break;
  608. default:
  609. IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
  610. result = 1;
  611. break;
  612. }
  613. return result;
  614. }
  615. EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
  616. static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
  617. struct netlink_callback *cb)
  618. {
  619. int idx, length;
  620. if (!tst->trace.enabled || !tst->trace.trace_addr)
  621. return -EFAULT;
  622. idx = cb->args[4];
  623. if (idx >= tst->trace.nchunks)
  624. return -ENOENT;
  625. length = DUMP_CHUNK_SIZE;
  626. if (((idx + 1) == tst->trace.nchunks) &&
  627. (tst->trace.size % DUMP_CHUNK_SIZE))
  628. length = tst->trace.size %
  629. DUMP_CHUNK_SIZE;
  630. if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
  631. tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
  632. goto nla_put_failure;
  633. cb->args[4] = ++idx;
  634. return 0;
  635. nla_put_failure:
  636. return -ENOBUFS;
  637. }
  638. static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
  639. struct netlink_callback *cb)
  640. {
  641. int idx, length;
  642. if (!tst->mem.in_read)
  643. return -EFAULT;
  644. idx = cb->args[4];
  645. if (idx >= tst->mem.nchunks) {
  646. iwl_test_mem_stop(tst);
  647. return -ENOENT;
  648. }
  649. length = DUMP_CHUNK_SIZE;
  650. if (((idx + 1) == tst->mem.nchunks) &&
  651. (tst->mem.size % DUMP_CHUNK_SIZE))
  652. length = tst->mem.size % DUMP_CHUNK_SIZE;
  653. if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
  654. tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
  655. goto nla_put_failure;
  656. cb->args[4] = ++idx;
  657. return 0;
  658. nla_put_failure:
  659. return -ENOBUFS;
  660. }
  661. /*
  662. * Handle dump commands.
  663. * Returns 1 for unknown commands (not handled by the test object); negative
  664. * value in case of error.
  665. */
  666. int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
  667. struct netlink_callback *cb)
  668. {
  669. int result;
  670. switch (cmd) {
  671. case IWL_TM_CMD_APP2DEV_READ_TRACE:
  672. IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
  673. result = iwl_test_trace_dump(tst, skb, cb);
  674. break;
  675. case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
  676. IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
  677. result = iwl_test_buffer_dump(tst, skb, cb);
  678. break;
  679. default:
  680. result = 1;
  681. break;
  682. }
  683. return result;
  684. }
  685. EXPORT_SYMBOL_GPL(iwl_test_dump);
  686. /*
  687. * Multicast a spontaneous messages from the device to the user space.
  688. */
  689. static void iwl_test_send_rx(struct iwl_test *tst,
  690. struct iwl_rx_cmd_buffer *rxb)
  691. {
  692. struct sk_buff *skb;
  693. struct iwl_rx_packet *data;
  694. int length;
  695. data = rxb_addr(rxb);
  696. length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  697. /* the length doesn't include len_n_flags field, so add it manually */
  698. length += sizeof(__le32);
  699. skb = iwl_test_alloc_event(tst, length + 20);
  700. if (skb == NULL) {
  701. IWL_ERR(tst->trans, "Out of memory for message to user\n");
  702. return;
  703. }
  704. if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
  705. IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
  706. nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
  707. goto nla_put_failure;
  708. iwl_test_event(tst, skb);
  709. return;
  710. nla_put_failure:
  711. kfree_skb(skb);
  712. IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
  713. }
  714. /*
  715. * Called whenever a Rx frames is recevied from the device. If notifications to
  716. * the user space are requested, sends the frames to the user.
  717. */
  718. void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
  719. {
  720. if (tst->notify)
  721. iwl_test_send_rx(tst, rxb);
  722. }
  723. EXPORT_SYMBOL_GPL(iwl_test_rx);