iwl-agn-ucode.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /******************************************************************************
  2. *
  3. * GPL LICENSE SUMMARY
  4. *
  5. * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19. * USA
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/init.h>
  32. #include <linux/sched.h>
  33. #include "iwl-dev.h"
  34. #include "iwl-core.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. #include "iwl-agn-hw.h"
  38. #include "iwl-agn.h"
  39. #include "iwl-agn-calib.h"
  40. static const s8 iwlagn_default_queue_to_tx_fifo[] = {
  41. IWL_TX_FIFO_VO,
  42. IWL_TX_FIFO_VI,
  43. IWL_TX_FIFO_BE,
  44. IWL_TX_FIFO_BK,
  45. IWLAGN_CMD_FIFO_NUM,
  46. IWL_TX_FIFO_UNUSED,
  47. IWL_TX_FIFO_UNUSED,
  48. IWL_TX_FIFO_UNUSED,
  49. IWL_TX_FIFO_UNUSED,
  50. IWL_TX_FIFO_UNUSED,
  51. };
  52. static const s8 iwlagn_ipan_queue_to_tx_fifo[] = {
  53. IWL_TX_FIFO_VO,
  54. IWL_TX_FIFO_VI,
  55. IWL_TX_FIFO_BE,
  56. IWL_TX_FIFO_BK,
  57. IWL_TX_FIFO_BK_IPAN,
  58. IWL_TX_FIFO_BE_IPAN,
  59. IWL_TX_FIFO_VI_IPAN,
  60. IWL_TX_FIFO_VO_IPAN,
  61. IWL_TX_FIFO_BE_IPAN,
  62. IWLAGN_CMD_FIFO_NUM,
  63. };
  64. static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
  65. {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
  66. 0, COEX_UNASSOC_IDLE_FLAGS},
  67. {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
  68. 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
  69. {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
  70. 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
  71. {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
  72. 0, COEX_CALIBRATION_FLAGS},
  73. {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
  74. 0, COEX_PERIODIC_CALIBRATION_FLAGS},
  75. {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
  76. 0, COEX_CONNECTION_ESTAB_FLAGS},
  77. {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
  78. 0, COEX_ASSOCIATED_IDLE_FLAGS},
  79. {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
  80. 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
  81. {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
  82. 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
  83. {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
  84. 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
  85. {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
  86. {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
  87. {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
  88. 0, COEX_STAND_ALONE_DEBUG_FLAGS},
  89. {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
  90. 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
  91. {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
  92. {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
  93. };
  94. /*
  95. * ucode
  96. */
  97. static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
  98. struct fw_desc *image, u32 dst_addr)
  99. {
  100. dma_addr_t phy_addr = image->p_addr;
  101. u32 byte_cnt = image->len;
  102. int ret;
  103. priv->ucode_write_complete = 0;
  104. iwl_write_direct32(priv,
  105. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  106. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  107. iwl_write_direct32(priv,
  108. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
  109. iwl_write_direct32(priv,
  110. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  111. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  112. iwl_write_direct32(priv,
  113. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  114. (iwl_get_dma_hi_addr(phy_addr)
  115. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  116. iwl_write_direct32(priv,
  117. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  118. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  119. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  120. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  121. iwl_write_direct32(priv,
  122. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  123. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  124. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  125. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  126. IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
  127. ret = wait_event_interruptible_timeout(priv->wait_command_queue,
  128. priv->ucode_write_complete, 5 * HZ);
  129. if (ret == -ERESTARTSYS) {
  130. IWL_ERR(priv, "Could not load the %s uCode section due "
  131. "to interrupt\n", name);
  132. return ret;
  133. }
  134. if (!ret) {
  135. IWL_ERR(priv, "Could not load the %s uCode section\n",
  136. name);
  137. return -ETIMEDOUT;
  138. }
  139. return 0;
  140. }
  141. static int iwlagn_load_given_ucode(struct iwl_priv *priv,
  142. struct fw_desc *inst_image,
  143. struct fw_desc *data_image)
  144. {
  145. int ret = 0;
  146. ret = iwlagn_load_section(priv, "INST", inst_image,
  147. IWLAGN_RTC_INST_LOWER_BOUND);
  148. if (ret)
  149. return ret;
  150. return iwlagn_load_section(priv, "DATA", data_image,
  151. IWLAGN_RTC_DATA_LOWER_BOUND);
  152. }
  153. int iwlagn_load_ucode(struct iwl_priv *priv)
  154. {
  155. int ret = 0;
  156. /* check whether init ucode should be loaded, or rather runtime ucode */
  157. if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
  158. IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
  159. ret = iwlagn_load_given_ucode(priv,
  160. &priv->ucode_init, &priv->ucode_init_data);
  161. if (!ret) {
  162. IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
  163. priv->ucode_type = UCODE_INIT;
  164. }
  165. } else {
  166. IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
  167. "Loading runtime ucode...\n");
  168. ret = iwlagn_load_given_ucode(priv,
  169. &priv->ucode_code, &priv->ucode_data);
  170. if (!ret) {
  171. IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
  172. priv->ucode_type = UCODE_RT;
  173. }
  174. }
  175. return ret;
  176. }
  177. /*
  178. * Calibration
  179. */
  180. static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
  181. {
  182. struct iwl_calib_xtal_freq_cmd cmd;
  183. __le16 *xtal_calib =
  184. (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
  185. cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
  186. cmd.hdr.first_group = 0;
  187. cmd.hdr.groups_num = 1;
  188. cmd.hdr.data_valid = 1;
  189. cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
  190. cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
  191. return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
  192. (u8 *)&cmd, sizeof(cmd));
  193. }
  194. static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
  195. {
  196. struct iwl_calib_temperature_offset_cmd cmd;
  197. __le16 *offset_calib =
  198. (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
  199. cmd.hdr.op_code = IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD;
  200. cmd.hdr.first_group = 0;
  201. cmd.hdr.groups_num = 1;
  202. cmd.hdr.data_valid = 1;
  203. cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
  204. if (!(cmd.radio_sensor_offset))
  205. cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
  206. cmd.reserved = 0;
  207. IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
  208. cmd.radio_sensor_offset);
  209. return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
  210. (u8 *)&cmd, sizeof(cmd));
  211. }
  212. static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
  213. {
  214. struct iwl_calib_cfg_cmd calib_cfg_cmd;
  215. struct iwl_host_cmd cmd = {
  216. .id = CALIBRATION_CFG_CMD,
  217. .len = sizeof(struct iwl_calib_cfg_cmd),
  218. .data = &calib_cfg_cmd,
  219. };
  220. memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
  221. calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
  222. calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
  223. calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
  224. calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
  225. return iwl_send_cmd(priv, &cmd);
  226. }
  227. void iwlagn_rx_calib_result(struct iwl_priv *priv,
  228. struct iwl_rx_mem_buffer *rxb)
  229. {
  230. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  231. struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
  232. int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  233. int index;
  234. /* reduce the size of the length field itself */
  235. len -= 4;
  236. /* Define the order in which the results will be sent to the runtime
  237. * uCode. iwl_send_calib_results sends them in a row according to
  238. * their index. We sort them here
  239. */
  240. switch (hdr->op_code) {
  241. case IWL_PHY_CALIBRATE_DC_CMD:
  242. index = IWL_CALIB_DC;
  243. break;
  244. case IWL_PHY_CALIBRATE_LO_CMD:
  245. index = IWL_CALIB_LO;
  246. break;
  247. case IWL_PHY_CALIBRATE_TX_IQ_CMD:
  248. index = IWL_CALIB_TX_IQ;
  249. break;
  250. case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
  251. index = IWL_CALIB_TX_IQ_PERD;
  252. break;
  253. case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
  254. index = IWL_CALIB_BASE_BAND;
  255. break;
  256. default:
  257. IWL_ERR(priv, "Unknown calibration notification %d\n",
  258. hdr->op_code);
  259. return;
  260. }
  261. iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
  262. }
  263. void iwlagn_rx_calib_complete(struct iwl_priv *priv,
  264. struct iwl_rx_mem_buffer *rxb)
  265. {
  266. IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
  267. queue_work(priv->workqueue, &priv->restart);
  268. }
  269. void iwlagn_init_alive_start(struct iwl_priv *priv)
  270. {
  271. int ret = 0;
  272. /* Check alive response for "valid" sign from uCode */
  273. if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
  274. /* We had an error bringing up the hardware, so take it
  275. * all the way back down so we can try again */
  276. IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
  277. goto restart;
  278. }
  279. /* initialize uCode was loaded... verify inst image.
  280. * This is a paranoid check, because we would not have gotten the
  281. * "initialize" alive if code weren't properly loaded. */
  282. if (iwl_verify_ucode(priv)) {
  283. /* Runtime instruction load was bad;
  284. * take it all the way back down so we can try again */
  285. IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
  286. goto restart;
  287. }
  288. ret = priv->cfg->ops->lib->alive_notify(priv);
  289. if (ret) {
  290. IWL_WARN(priv,
  291. "Could not complete ALIVE transition: %d\n", ret);
  292. goto restart;
  293. }
  294. if (priv->cfg->bt_params &&
  295. priv->cfg->bt_params->advanced_bt_coexist) {
  296. /*
  297. * Tell uCode we are ready to perform calibration
  298. * need to perform this before any calibration
  299. * no need to close the envlope since we are going
  300. * to load the runtime uCode later.
  301. */
  302. iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
  303. BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
  304. }
  305. iwlagn_send_calib_cfg(priv);
  306. /**
  307. * temperature offset calibration is only needed for runtime ucode,
  308. * so prepare the value now.
  309. */
  310. if (priv->cfg->need_temp_offset_calib)
  311. iwlagn_set_temperature_offset_calib(priv);
  312. return;
  313. restart:
  314. /* real restart (first load init_ucode) */
  315. queue_work(priv->workqueue, &priv->restart);
  316. }
  317. static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
  318. {
  319. struct iwl_wimax_coex_cmd coex_cmd;
  320. if (priv->cfg->base_params->support_wimax_coexist) {
  321. /* UnMask wake up src at associated sleep */
  322. coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
  323. /* UnMask wake up src at unassociated sleep */
  324. coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
  325. memcpy(coex_cmd.sta_prio, cu_priorities,
  326. sizeof(struct iwl_wimax_coex_event_entry) *
  327. COEX_NUM_OF_EVENTS);
  328. /* enabling the coexistence feature */
  329. coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
  330. /* enabling the priorities tables */
  331. coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
  332. } else {
  333. /* coexistence is disabled */
  334. memset(&coex_cmd, 0, sizeof(coex_cmd));
  335. }
  336. return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
  337. sizeof(coex_cmd), &coex_cmd);
  338. }
  339. static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
  340. ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  341. (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  342. ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  343. (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  344. ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  345. (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  346. ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  347. (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  348. ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  349. (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  350. ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  351. (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  352. ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  353. (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  354. ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  355. (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  356. ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
  357. (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
  358. 0, 0, 0, 0, 0, 0, 0
  359. };
  360. void iwlagn_send_prio_tbl(struct iwl_priv *priv)
  361. {
  362. struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
  363. memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
  364. sizeof(iwlagn_bt_prio_tbl));
  365. if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
  366. sizeof(prio_tbl_cmd), &prio_tbl_cmd))
  367. IWL_ERR(priv, "failed to send BT prio tbl command\n");
  368. }
  369. void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
  370. {
  371. struct iwl_bt_coex_prot_env_cmd env_cmd;
  372. env_cmd.action = action;
  373. env_cmd.type = type;
  374. if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
  375. sizeof(env_cmd), &env_cmd))
  376. IWL_ERR(priv, "failed to send BT env command\n");
  377. }
  378. int iwlagn_alive_notify(struct iwl_priv *priv)
  379. {
  380. const s8 *queues;
  381. u32 a;
  382. unsigned long flags;
  383. int i, chan;
  384. u32 reg_val;
  385. spin_lock_irqsave(&priv->lock, flags);
  386. priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
  387. a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
  388. for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
  389. a += 4)
  390. iwl_write_targ_mem(priv, a, 0);
  391. for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
  392. a += 4)
  393. iwl_write_targ_mem(priv, a, 0);
  394. for (; a < priv->scd_base_addr +
  395. IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
  396. iwl_write_targ_mem(priv, a, 0);
  397. iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
  398. priv->scd_bc_tbls.dma >> 10);
  399. /* Enable DMA channel */
  400. for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
  401. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  402. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  403. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  404. /* Update FH chicken bits */
  405. reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
  406. iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
  407. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  408. iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
  409. IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
  410. iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
  411. /* initiate the queues */
  412. for (i = 0; i < priv->hw_params.max_txq_num; i++) {
  413. iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
  414. iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
  415. iwl_write_targ_mem(priv, priv->scd_base_addr +
  416. IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  417. iwl_write_targ_mem(priv, priv->scd_base_addr +
  418. IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
  419. sizeof(u32),
  420. ((SCD_WIN_SIZE <<
  421. IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  422. IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  423. ((SCD_FRAME_LIMIT <<
  424. IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  425. IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  426. }
  427. iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
  428. IWL_MASK(0, priv->hw_params.max_txq_num));
  429. /* Activate all Tx DMA/FIFO channels */
  430. priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
  431. /* map queues to FIFOs */
  432. if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
  433. queues = iwlagn_ipan_queue_to_tx_fifo;
  434. else
  435. queues = iwlagn_default_queue_to_tx_fifo;
  436. iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
  437. /* make sure all queue are not stopped */
  438. memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
  439. for (i = 0; i < 4; i++)
  440. atomic_set(&priv->queue_stop_count[i], 0);
  441. /* reset to 0 to enable all the queue first */
  442. priv->txq_ctx_active_msk = 0;
  443. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
  444. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
  445. for (i = 0; i < 10; i++) {
  446. int ac = queues[i];
  447. iwl_txq_ctx_activate(priv, i);
  448. if (ac == IWL_TX_FIFO_UNUSED)
  449. continue;
  450. iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
  451. }
  452. spin_unlock_irqrestore(&priv->lock, flags);
  453. iwlagn_send_wimax_coex(priv);
  454. iwlagn_set_Xtal_calib(priv);
  455. iwl_send_calib_results(priv);
  456. return 0;
  457. }
  458. /**
  459. * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
  460. * using sample data 100 bytes apart. If these sample points are good,
  461. * it's a pretty good bet that everything between them is good, too.
  462. */
  463. static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
  464. {
  465. u32 val;
  466. int ret = 0;
  467. u32 errcnt = 0;
  468. u32 i;
  469. IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
  470. for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
  471. /* read data comes through single port, auto-incr addr */
  472. /* NOTE: Use the debugless read so we don't flood kernel log
  473. * if IWL_DL_IO is set */
  474. iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
  475. i + IWLAGN_RTC_INST_LOWER_BOUND);
  476. val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
  477. if (val != le32_to_cpu(*image)) {
  478. ret = -EIO;
  479. errcnt++;
  480. if (errcnt >= 3)
  481. break;
  482. }
  483. }
  484. return ret;
  485. }
  486. /**
  487. * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
  488. * looking at all data.
  489. */
  490. static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
  491. u32 len)
  492. {
  493. u32 val;
  494. u32 save_len = len;
  495. int ret = 0;
  496. u32 errcnt;
  497. IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
  498. iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
  499. IWLAGN_RTC_INST_LOWER_BOUND);
  500. errcnt = 0;
  501. for (; len > 0; len -= sizeof(u32), image++) {
  502. /* read data comes through single port, auto-incr addr */
  503. /* NOTE: Use the debugless read so we don't flood kernel log
  504. * if IWL_DL_IO is set */
  505. val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
  506. if (val != le32_to_cpu(*image)) {
  507. IWL_ERR(priv, "uCode INST section is invalid at "
  508. "offset 0x%x, is 0x%x, s/b 0x%x\n",
  509. save_len - len, val, le32_to_cpu(*image));
  510. ret = -EIO;
  511. errcnt++;
  512. if (errcnt >= 20)
  513. break;
  514. }
  515. }
  516. if (!errcnt)
  517. IWL_DEBUG_INFO(priv,
  518. "ucode image in INSTRUCTION memory is good\n");
  519. return ret;
  520. }
  521. /**
  522. * iwl_verify_ucode - determine which instruction image is in SRAM,
  523. * and verify its contents
  524. */
  525. int iwl_verify_ucode(struct iwl_priv *priv)
  526. {
  527. __le32 *image;
  528. u32 len;
  529. int ret;
  530. /* Try bootstrap */
  531. image = (__le32 *)priv->ucode_boot.v_addr;
  532. len = priv->ucode_boot.len;
  533. ret = iwlcore_verify_inst_sparse(priv, image, len);
  534. if (!ret) {
  535. IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
  536. return 0;
  537. }
  538. /* Try initialize */
  539. image = (__le32 *)priv->ucode_init.v_addr;
  540. len = priv->ucode_init.len;
  541. ret = iwlcore_verify_inst_sparse(priv, image, len);
  542. if (!ret) {
  543. IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
  544. return 0;
  545. }
  546. /* Try runtime/protocol */
  547. image = (__le32 *)priv->ucode_code.v_addr;
  548. len = priv->ucode_code.len;
  549. ret = iwlcore_verify_inst_sparse(priv, image, len);
  550. if (!ret) {
  551. IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
  552. return 0;
  553. }
  554. IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
  555. /* Since nothing seems to match, show first several data entries in
  556. * instruction SRAM, so maybe visual inspection will give a clue.
  557. * Selection of bootstrap image (vs. other images) is arbitrary. */
  558. image = (__le32 *)priv->ucode_boot.v_addr;
  559. len = priv->ucode_boot.len;
  560. ret = iwl_verify_inst_full(priv, image, len);
  561. return ret;
  562. }