iwl-agn-ucode.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /******************************************************************************
  2. *
  3. * GPL LICENSE SUMMARY
  4. *
  5. * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19. * USA
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/init.h>
  32. #include <linux/sched.h>
  33. #include "iwl-dev.h"
  34. #include "iwl-core.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. #include "iwl-agn-hw.h"
  38. #include "iwl-agn.h"
  39. static const s8 iwlagn_default_queue_to_tx_fifo[] = {
  40. IWL_TX_FIFO_VO,
  41. IWL_TX_FIFO_VI,
  42. IWL_TX_FIFO_BE,
  43. IWL_TX_FIFO_BK,
  44. IWLAGN_CMD_FIFO_NUM,
  45. IWL_TX_FIFO_UNUSED,
  46. IWL_TX_FIFO_UNUSED,
  47. IWL_TX_FIFO_UNUSED,
  48. IWL_TX_FIFO_UNUSED,
  49. IWL_TX_FIFO_UNUSED,
  50. };
  51. /*
  52. * ucode
  53. */
  54. static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
  55. struct fw_desc *image, u32 dst_addr)
  56. {
  57. dma_addr_t phy_addr = image->p_addr;
  58. u32 byte_cnt = image->len;
  59. int ret;
  60. priv->ucode_write_complete = 0;
  61. iwl_write_direct32(priv,
  62. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  63. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  64. iwl_write_direct32(priv,
  65. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
  66. iwl_write_direct32(priv,
  67. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  68. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  69. iwl_write_direct32(priv,
  70. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  71. (iwl_get_dma_hi_addr(phy_addr)
  72. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  73. iwl_write_direct32(priv,
  74. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  75. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  76. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  77. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  78. iwl_write_direct32(priv,
  79. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  80. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  81. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  82. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  83. IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
  84. ret = wait_event_interruptible_timeout(priv->wait_command_queue,
  85. priv->ucode_write_complete, 5 * HZ);
  86. if (ret == -ERESTARTSYS) {
  87. IWL_ERR(priv, "Could not load the %s uCode section due "
  88. "to interrupt\n", name);
  89. return ret;
  90. }
  91. if (!ret) {
  92. IWL_ERR(priv, "Could not load the %s uCode section\n",
  93. name);
  94. return -ETIMEDOUT;
  95. }
  96. return 0;
  97. }
  98. static int iwlagn_load_given_ucode(struct iwl_priv *priv,
  99. struct fw_desc *inst_image,
  100. struct fw_desc *data_image)
  101. {
  102. int ret = 0;
  103. ret = iwlagn_load_section(priv, "INST", inst_image,
  104. IWLAGN_RTC_INST_LOWER_BOUND);
  105. if (ret)
  106. return ret;
  107. return iwlagn_load_section(priv, "DATA", data_image,
  108. IWLAGN_RTC_DATA_LOWER_BOUND);
  109. }
  110. int iwlagn_load_ucode(struct iwl_priv *priv)
  111. {
  112. int ret = 0;
  113. /* check whether init ucode should be loaded, or rather runtime ucode */
  114. if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
  115. IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
  116. ret = iwlagn_load_given_ucode(priv,
  117. &priv->ucode_init, &priv->ucode_init_data);
  118. if (!ret) {
  119. IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
  120. priv->ucode_type = UCODE_INIT;
  121. }
  122. } else {
  123. IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
  124. "Loading runtime ucode...\n");
  125. ret = iwlagn_load_given_ucode(priv,
  126. &priv->ucode_code, &priv->ucode_data);
  127. if (!ret) {
  128. IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
  129. priv->ucode_type = UCODE_RT;
  130. }
  131. }
  132. return ret;
  133. }
  134. #define IWL_UCODE_GET(item) \
  135. static u32 iwlagn_ucode_get_##item(const struct iwl_ucode_header *ucode,\
  136. u32 api_ver) \
  137. { \
  138. if (api_ver <= 2) \
  139. return le32_to_cpu(ucode->u.v1.item); \
  140. return le32_to_cpu(ucode->u.v2.item); \
  141. }
  142. static u32 iwlagn_ucode_get_header_size(u32 api_ver)
  143. {
  144. if (api_ver <= 2)
  145. return UCODE_HEADER_SIZE(1);
  146. return UCODE_HEADER_SIZE(2);
  147. }
  148. static u32 iwlagn_ucode_get_build(const struct iwl_ucode_header *ucode,
  149. u32 api_ver)
  150. {
  151. if (api_ver <= 2)
  152. return 0;
  153. return le32_to_cpu(ucode->u.v2.build);
  154. }
  155. static u8 *iwlagn_ucode_get_data(const struct iwl_ucode_header *ucode,
  156. u32 api_ver)
  157. {
  158. if (api_ver <= 2)
  159. return (u8 *) ucode->u.v1.data;
  160. return (u8 *) ucode->u.v2.data;
  161. }
  162. IWL_UCODE_GET(inst_size);
  163. IWL_UCODE_GET(data_size);
  164. IWL_UCODE_GET(init_size);
  165. IWL_UCODE_GET(init_data_size);
  166. IWL_UCODE_GET(boot_size);
  167. struct iwl_ucode_ops iwlagn_ucode = {
  168. .get_header_size = iwlagn_ucode_get_header_size,
  169. .get_build = iwlagn_ucode_get_build,
  170. .get_inst_size = iwlagn_ucode_get_inst_size,
  171. .get_data_size = iwlagn_ucode_get_data_size,
  172. .get_init_size = iwlagn_ucode_get_init_size,
  173. .get_init_data_size = iwlagn_ucode_get_init_data_size,
  174. .get_boot_size = iwlagn_ucode_get_boot_size,
  175. .get_data = iwlagn_ucode_get_data,
  176. };
  177. /*
  178. * Calibration
  179. */
  180. static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
  181. {
  182. struct iwl_calib_xtal_freq_cmd cmd;
  183. __le16 *xtal_calib =
  184. (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
  185. cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
  186. cmd.hdr.first_group = 0;
  187. cmd.hdr.groups_num = 1;
  188. cmd.hdr.data_valid = 1;
  189. cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
  190. cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
  191. return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
  192. (u8 *)&cmd, sizeof(cmd));
  193. }
  194. static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
  195. {
  196. struct iwl_calib_cfg_cmd calib_cfg_cmd;
  197. struct iwl_host_cmd cmd = {
  198. .id = CALIBRATION_CFG_CMD,
  199. .len = sizeof(struct iwl_calib_cfg_cmd),
  200. .data = &calib_cfg_cmd,
  201. };
  202. memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
  203. calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
  204. calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
  205. calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
  206. calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
  207. return iwl_send_cmd(priv, &cmd);
  208. }
  209. void iwlagn_rx_calib_result(struct iwl_priv *priv,
  210. struct iwl_rx_mem_buffer *rxb)
  211. {
  212. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  213. struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
  214. int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  215. int index;
  216. /* reduce the size of the length field itself */
  217. len -= 4;
  218. /* Define the order in which the results will be sent to the runtime
  219. * uCode. iwl_send_calib_results sends them in a row according to
  220. * their index. We sort them here
  221. */
  222. switch (hdr->op_code) {
  223. case IWL_PHY_CALIBRATE_DC_CMD:
  224. index = IWL_CALIB_DC;
  225. break;
  226. case IWL_PHY_CALIBRATE_LO_CMD:
  227. index = IWL_CALIB_LO;
  228. break;
  229. case IWL_PHY_CALIBRATE_TX_IQ_CMD:
  230. index = IWL_CALIB_TX_IQ;
  231. break;
  232. case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
  233. index = IWL_CALIB_TX_IQ_PERD;
  234. break;
  235. case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
  236. index = IWL_CALIB_BASE_BAND;
  237. break;
  238. default:
  239. IWL_ERR(priv, "Unknown calibration notification %d\n",
  240. hdr->op_code);
  241. return;
  242. }
  243. iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
  244. }
  245. void iwlagn_rx_calib_complete(struct iwl_priv *priv,
  246. struct iwl_rx_mem_buffer *rxb)
  247. {
  248. IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
  249. queue_work(priv->workqueue, &priv->restart);
  250. }
  251. void iwlagn_init_alive_start(struct iwl_priv *priv)
  252. {
  253. int ret = 0;
  254. /* Check alive response for "valid" sign from uCode */
  255. if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
  256. /* We had an error bringing up the hardware, so take it
  257. * all the way back down so we can try again */
  258. IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
  259. goto restart;
  260. }
  261. /* initialize uCode was loaded... verify inst image.
  262. * This is a paranoid check, because we would not have gotten the
  263. * "initialize" alive if code weren't properly loaded. */
  264. if (iwl_verify_ucode(priv)) {
  265. /* Runtime instruction load was bad;
  266. * take it all the way back down so we can try again */
  267. IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
  268. goto restart;
  269. }
  270. ret = priv->cfg->ops->lib->alive_notify(priv);
  271. if (ret) {
  272. IWL_WARN(priv,
  273. "Could not complete ALIVE transition: %d\n", ret);
  274. goto restart;
  275. }
  276. iwlagn_send_calib_cfg(priv);
  277. return;
  278. restart:
  279. /* real restart (first load init_ucode) */
  280. queue_work(priv->workqueue, &priv->restart);
  281. }
  282. int iwlagn_alive_notify(struct iwl_priv *priv)
  283. {
  284. u32 a;
  285. unsigned long flags;
  286. int i, chan;
  287. u32 reg_val;
  288. spin_lock_irqsave(&priv->lock, flags);
  289. priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
  290. a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
  291. for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
  292. a += 4)
  293. iwl_write_targ_mem(priv, a, 0);
  294. for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
  295. a += 4)
  296. iwl_write_targ_mem(priv, a, 0);
  297. for (; a < priv->scd_base_addr +
  298. IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
  299. iwl_write_targ_mem(priv, a, 0);
  300. iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
  301. priv->scd_bc_tbls.dma >> 10);
  302. /* Enable DMA channel */
  303. for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
  304. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  305. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  306. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  307. /* Update FH chicken bits */
  308. reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
  309. iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
  310. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  311. iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
  312. IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
  313. iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
  314. /* initiate the queues */
  315. for (i = 0; i < priv->hw_params.max_txq_num; i++) {
  316. iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
  317. iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
  318. iwl_write_targ_mem(priv, priv->scd_base_addr +
  319. IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  320. iwl_write_targ_mem(priv, priv->scd_base_addr +
  321. IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
  322. sizeof(u32),
  323. ((SCD_WIN_SIZE <<
  324. IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  325. IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  326. ((SCD_FRAME_LIMIT <<
  327. IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  328. IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  329. }
  330. iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
  331. IWL_MASK(0, priv->hw_params.max_txq_num));
  332. /* Activate all Tx DMA/FIFO channels */
  333. priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
  334. iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
  335. /* make sure all queue are not stopped */
  336. memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
  337. for (i = 0; i < 4; i++)
  338. atomic_set(&priv->queue_stop_count[i], 0);
  339. /* reset to 0 to enable all the queue first */
  340. priv->txq_ctx_active_msk = 0;
  341. /* map qos queues to fifos one-to-one */
  342. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
  343. for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
  344. int ac = iwlagn_default_queue_to_tx_fifo[i];
  345. iwl_txq_ctx_activate(priv, i);
  346. if (ac == IWL_TX_FIFO_UNUSED)
  347. continue;
  348. iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
  349. }
  350. spin_unlock_irqrestore(&priv->lock, flags);
  351. iwl_send_wimax_coex(priv);
  352. iwlagn_set_Xtal_calib(priv);
  353. iwl_send_calib_results(priv);
  354. return 0;
  355. }