trans.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/pci.h>
  64. #include <linux/pci-aspm.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/debugfs.h>
  67. #include <linux/sched.h>
  68. #include <linux/bitops.h>
  69. #include <linux/gfp.h>
  70. #include "iwl-drv.h"
  71. #include "iwl-trans.h"
  72. #include "iwl-csr.h"
  73. #include "iwl-prph.h"
  74. #include "iwl-agn-hw.h"
  75. #include "internal.h"
  76. static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
  77. {
  78. /*
  79. * (for documentation purposes)
  80. * to set power to V_AUX, do:
  81. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  82. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  83. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  84. ~APMG_PS_CTRL_MSK_PWR_SRC);
  85. */
  86. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  87. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  88. ~APMG_PS_CTRL_MSK_PWR_SRC);
  89. }
  90. /* PCI registers */
  91. #define PCI_CFG_RETRY_TIMEOUT 0x041
  92. static void iwl_pcie_apm_config(struct iwl_trans *trans)
  93. {
  94. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  95. u16 lctl;
  96. /*
  97. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  98. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  99. * If so (likely), disable L0S, so device moves directly L0->L1;
  100. * costs negligible amount of power savings.
  101. * If not (unlikely), enable L0S, so there is at least some
  102. * power savings, even without L1.
  103. */
  104. pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
  105. if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
  106. /* L1-ASPM enabled; disable(!) L0S */
  107. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  108. dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
  109. } else {
  110. /* L1-ASPM disabled; enable(!) L0S */
  111. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  112. dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
  113. }
  114. trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
  115. }
  116. /*
  117. * Start up NIC's basic functionality after it has been reset
  118. * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
  119. * NOTE: This does not load uCode nor start the embedded processor
  120. */
  121. static int iwl_pcie_apm_init(struct iwl_trans *trans)
  122. {
  123. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  124. int ret = 0;
  125. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  126. /*
  127. * Use "set_bit" below rather than "write", to preserve any hardware
  128. * bits already set by default after reset.
  129. */
  130. /* Disable L0S exit timer (platform NMI Work/Around) */
  131. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  132. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  133. /*
  134. * Disable L0s without affecting L1;
  135. * don't wait for ICH L0s (ICH bug W/A)
  136. */
  137. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  138. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  139. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  140. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  141. /*
  142. * Enable HAP INTA (interrupt from management bus) to
  143. * wake device's PCI Express link L1a -> L0s
  144. */
  145. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  146. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  147. iwl_pcie_apm_config(trans);
  148. /* Configure analog phase-lock-loop before activating to D0A */
  149. if (trans->cfg->base_params->pll_cfg_val)
  150. iwl_set_bit(trans, CSR_ANA_PLL_CFG,
  151. trans->cfg->base_params->pll_cfg_val);
  152. /*
  153. * Set "initialization complete" bit to move adapter from
  154. * D0U* --> D0A* (powered-up active) state.
  155. */
  156. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  157. /*
  158. * Wait for clock stabilization; once stabilized, access to
  159. * device-internal resources is supported, e.g. iwl_write_prph()
  160. * and accesses to uCode SRAM.
  161. */
  162. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  163. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  164. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  165. if (ret < 0) {
  166. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  167. goto out;
  168. }
  169. /*
  170. * Enable DMA clock and wait for it to stabilize.
  171. *
  172. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
  173. * do not disable clocks. This preserves any hardware bits already
  174. * set by default in "CLK_CTRL_REG" after reset.
  175. */
  176. iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
  177. udelay(20);
  178. /* Disable L1-Active */
  179. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  180. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  181. set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  182. out:
  183. return ret;
  184. }
  185. static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
  186. {
  187. int ret = 0;
  188. /* stop device's busmaster DMA activity */
  189. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  190. ret = iwl_poll_bit(trans, CSR_RESET,
  191. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  192. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  193. if (ret)
  194. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  195. IWL_DEBUG_INFO(trans, "stop master\n");
  196. return ret;
  197. }
  198. static void iwl_pcie_apm_stop(struct iwl_trans *trans)
  199. {
  200. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  201. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  202. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  203. /* Stop device's DMA activity */
  204. iwl_pcie_apm_stop_master(trans);
  205. /* Reset the entire device */
  206. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  207. udelay(10);
  208. /*
  209. * Clear "initialization complete" bit to move adapter from
  210. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  211. */
  212. iwl_clear_bit(trans, CSR_GP_CNTRL,
  213. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  214. }
  215. static int iwl_pcie_nic_init(struct iwl_trans *trans)
  216. {
  217. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  218. unsigned long flags;
  219. /* nic_init */
  220. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  221. iwl_pcie_apm_init(trans);
  222. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  223. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  224. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  225. iwl_pcie_set_pwr_vmain(trans);
  226. iwl_op_mode_nic_config(trans->op_mode);
  227. /* Allocate the RX queue, or reset if it is already allocated */
  228. iwl_pcie_rx_init(trans);
  229. /* Allocate or reset and init all Tx and Command queues */
  230. if (iwl_pcie_tx_init(trans))
  231. return -ENOMEM;
  232. if (trans->cfg->base_params->shadow_reg_enable) {
  233. /* enable shadow regs in HW */
  234. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
  235. IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
  236. }
  237. return 0;
  238. }
  239. #define HW_READY_TIMEOUT (50)
  240. /* Note: returns poll_bit return value, which is >= 0 if success */
  241. static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
  242. {
  243. int ret;
  244. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  245. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  246. /* See if we got it */
  247. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  248. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  249. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  250. HW_READY_TIMEOUT);
  251. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  252. return ret;
  253. }
  254. /* Note: returns standard 0/-ERROR code */
  255. static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
  256. {
  257. int ret;
  258. int t = 0;
  259. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  260. ret = iwl_pcie_set_hw_ready(trans);
  261. /* If the card is ready, exit 0 */
  262. if (ret >= 0)
  263. return 0;
  264. /* If HW is not ready, prepare the conditions to check again */
  265. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  266. CSR_HW_IF_CONFIG_REG_PREPARE);
  267. do {
  268. ret = iwl_pcie_set_hw_ready(trans);
  269. if (ret >= 0)
  270. return 0;
  271. usleep_range(200, 1000);
  272. t += 200;
  273. } while (t < 150000);
  274. return ret;
  275. }
  276. /*
  277. * ucode
  278. */
  279. static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
  280. dma_addr_t phy_addr, u32 byte_cnt)
  281. {
  282. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  283. int ret;
  284. trans_pcie->ucode_write_complete = false;
  285. iwl_write_direct32(trans,
  286. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  287. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  288. iwl_write_direct32(trans,
  289. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
  290. dst_addr);
  291. iwl_write_direct32(trans,
  292. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  293. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  294. iwl_write_direct32(trans,
  295. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  296. (iwl_get_dma_hi_addr(phy_addr)
  297. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  298. iwl_write_direct32(trans,
  299. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  300. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  301. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  302. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  303. iwl_write_direct32(trans,
  304. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  305. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  306. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  307. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  308. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  309. trans_pcie->ucode_write_complete, 5 * HZ);
  310. if (!ret) {
  311. IWL_ERR(trans, "Failed to load firmware chunk!\n");
  312. return -ETIMEDOUT;
  313. }
  314. return 0;
  315. }
  316. static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
  317. const struct fw_desc *section)
  318. {
  319. u8 *v_addr;
  320. dma_addr_t p_addr;
  321. u32 offset;
  322. int ret = 0;
  323. IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
  324. section_num);
  325. v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
  326. if (!v_addr)
  327. return -ENOMEM;
  328. for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
  329. u32 copy_size;
  330. copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
  331. memcpy(v_addr, (u8 *)section->data + offset, copy_size);
  332. ret = iwl_pcie_load_firmware_chunk(trans,
  333. section->offset + offset,
  334. p_addr, copy_size);
  335. if (ret) {
  336. IWL_ERR(trans,
  337. "Could not load the [%d] uCode section\n",
  338. section_num);
  339. break;
  340. }
  341. }
  342. dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
  343. return ret;
  344. }
  345. static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
  346. const struct fw_img *image)
  347. {
  348. int i, ret = 0;
  349. for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
  350. if (!image->sec[i].data)
  351. break;
  352. ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
  353. if (ret)
  354. return ret;
  355. }
  356. /* Remove all resets to allow NIC to operate */
  357. iwl_write32(trans, CSR_RESET, 0);
  358. return 0;
  359. }
  360. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  361. const struct fw_img *fw, bool run_in_rfkill)
  362. {
  363. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  364. int ret;
  365. bool hw_rfkill;
  366. /* This may fail if AMT took ownership of the device */
  367. if (iwl_pcie_prepare_card_hw(trans)) {
  368. IWL_WARN(trans, "Exit HW not ready\n");
  369. return -EIO;
  370. }
  371. clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
  372. iwl_enable_rfkill_int(trans);
  373. /* If platform's RF_KILL switch is NOT set to KILL */
  374. hw_rfkill = iwl_is_rfkill_set(trans);
  375. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  376. if (hw_rfkill && !run_in_rfkill)
  377. return -ERFKILL;
  378. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  379. ret = iwl_pcie_nic_init(trans);
  380. if (ret) {
  381. IWL_ERR(trans, "Unable to init nic\n");
  382. return ret;
  383. }
  384. /* make sure rfkill handshake bits are cleared */
  385. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  386. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  387. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  388. /* clear (again), then enable host interrupts */
  389. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  390. iwl_enable_interrupts(trans);
  391. /* really make sure rfkill handshake bits are cleared */
  392. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  393. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  394. /* Load the given image to the HW */
  395. return iwl_pcie_load_given_ucode(trans, fw);
  396. }
  397. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
  398. {
  399. iwl_pcie_reset_ict(trans);
  400. iwl_pcie_tx_start(trans, scd_addr);
  401. }
  402. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
  403. {
  404. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  405. unsigned long flags;
  406. /* tell the device to stop sending interrupts */
  407. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  408. iwl_disable_interrupts(trans);
  409. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  410. /* device going down, Stop using ICT table */
  411. iwl_pcie_disable_ict(trans);
  412. /*
  413. * If a HW restart happens during firmware loading,
  414. * then the firmware loading might call this function
  415. * and later it might be called again due to the
  416. * restart. So don't process again if the device is
  417. * already dead.
  418. */
  419. if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
  420. iwl_pcie_tx_stop(trans);
  421. iwl_pcie_rx_stop(trans);
  422. /* Power-down device's busmaster DMA clocks */
  423. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  424. APMG_CLK_VAL_DMA_CLK_RQT);
  425. udelay(5);
  426. }
  427. /* Make sure (redundant) we've released our request to stay awake */
  428. iwl_clear_bit(trans, CSR_GP_CNTRL,
  429. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  430. /* Stop the device, and put it in low power state */
  431. iwl_pcie_apm_stop(trans);
  432. /* Upon stop, the APM issues an interrupt if HW RF kill is set.
  433. * Clean again the interrupt here
  434. */
  435. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  436. iwl_disable_interrupts(trans);
  437. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  438. iwl_enable_rfkill_int(trans);
  439. /* stop and reset the on-board processor */
  440. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  441. /* clear all status bits */
  442. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  443. clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
  444. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  445. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  446. clear_bit(STATUS_RFKILL, &trans_pcie->status);
  447. }
  448. static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
  449. {
  450. /* let the ucode operate on its own */
  451. iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
  452. CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
  453. iwl_disable_interrupts(trans);
  454. iwl_clear_bit(trans, CSR_GP_CNTRL,
  455. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  456. }
  457. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
  458. {
  459. bool hw_rfkill;
  460. int err;
  461. err = iwl_pcie_prepare_card_hw(trans);
  462. if (err) {
  463. IWL_ERR(trans, "Error while preparing HW: %d\n", err);
  464. return err;
  465. }
  466. iwl_pcie_apm_init(trans);
  467. /* From now on, the op_mode will be kept updated about RF kill state */
  468. iwl_enable_rfkill_int(trans);
  469. hw_rfkill = iwl_is_rfkill_set(trans);
  470. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  471. return 0;
  472. }
  473. static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
  474. bool op_mode_leaving)
  475. {
  476. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  477. bool hw_rfkill;
  478. unsigned long flags;
  479. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  480. iwl_disable_interrupts(trans);
  481. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  482. iwl_pcie_apm_stop(trans);
  483. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  484. iwl_disable_interrupts(trans);
  485. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  486. iwl_pcie_disable_ict(trans);
  487. if (!op_mode_leaving) {
  488. /*
  489. * Even if we stop the HW, we still want the RF kill
  490. * interrupt
  491. */
  492. iwl_enable_rfkill_int(trans);
  493. /*
  494. * Check again since the RF kill state may have changed while
  495. * all the interrupts were disabled, in this case we couldn't
  496. * receive the RF kill interrupt and update the state in the
  497. * op_mode.
  498. */
  499. hw_rfkill = iwl_is_rfkill_set(trans);
  500. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  501. }
  502. }
  503. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  504. {
  505. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  506. }
  507. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  508. {
  509. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  510. }
  511. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  512. {
  513. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  514. }
  515. static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
  516. {
  517. iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
  518. return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
  519. }
  520. static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
  521. u32 val)
  522. {
  523. iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
  524. ((addr & 0x0000FFFF) | (3 << 24)));
  525. iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
  526. }
  527. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  528. const struct iwl_trans_config *trans_cfg)
  529. {
  530. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  531. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  532. trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
  533. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  534. trans_pcie->n_no_reclaim_cmds = 0;
  535. else
  536. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  537. if (trans_pcie->n_no_reclaim_cmds)
  538. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  539. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  540. trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
  541. if (trans_pcie->rx_buf_size_8k)
  542. trans_pcie->rx_page_order = get_order(8 * 1024);
  543. else
  544. trans_pcie->rx_page_order = get_order(4 * 1024);
  545. trans_pcie->wd_timeout =
  546. msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
  547. trans_pcie->command_names = trans_cfg->command_names;
  548. trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
  549. }
  550. void iwl_trans_pcie_free(struct iwl_trans *trans)
  551. {
  552. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  553. synchronize_irq(trans_pcie->pci_dev->irq);
  554. tasklet_kill(&trans_pcie->irq_tasklet);
  555. iwl_pcie_tx_free(trans);
  556. iwl_pcie_rx_free(trans);
  557. free_irq(trans_pcie->pci_dev->irq, trans);
  558. iwl_pcie_free_ict(trans);
  559. pci_disable_msi(trans_pcie->pci_dev);
  560. iounmap(trans_pcie->hw_base);
  561. pci_release_regions(trans_pcie->pci_dev);
  562. pci_disable_device(trans_pcie->pci_dev);
  563. kmem_cache_destroy(trans->dev_cmd_pool);
  564. kfree(trans);
  565. }
  566. static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
  567. {
  568. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  569. if (state)
  570. set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  571. else
  572. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  573. }
  574. #ifdef CONFIG_PM_SLEEP
  575. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  576. {
  577. return 0;
  578. }
  579. static int iwl_trans_pcie_resume(struct iwl_trans *trans)
  580. {
  581. bool hw_rfkill;
  582. iwl_enable_rfkill_int(trans);
  583. hw_rfkill = iwl_is_rfkill_set(trans);
  584. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  585. if (!hw_rfkill)
  586. iwl_enable_interrupts(trans);
  587. return 0;
  588. }
  589. #endif /* CONFIG_PM_SLEEP */
  590. static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
  591. {
  592. int ret;
  593. lockdep_assert_held(&trans->reg_lock);
  594. /* this bit wakes up the NIC */
  595. __iwl_set_bit(trans, CSR_GP_CNTRL,
  596. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  597. /*
  598. * These bits say the device is running, and should keep running for
  599. * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
  600. * but they do not indicate that embedded SRAM is restored yet;
  601. * 3945 and 4965 have volatile SRAM, and must save/restore contents
  602. * to/from host DRAM when sleeping/waking for power-saving.
  603. * Each direction takes approximately 1/4 millisecond; with this
  604. * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
  605. * series of register accesses are expected (e.g. reading Event Log),
  606. * to keep device from sleeping.
  607. *
  608. * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
  609. * SRAM is okay/restored. We don't check that here because this call
  610. * is just for hardware register access; but GP1 MAC_SLEEP check is a
  611. * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
  612. *
  613. * 5000 series and later (including 1000 series) have non-volatile SRAM,
  614. * and do not save/restore SRAM when power cycling.
  615. */
  616. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  617. CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
  618. (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
  619. CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
  620. if (unlikely(ret < 0)) {
  621. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
  622. if (!silent) {
  623. u32 val = iwl_read32(trans, CSR_GP_CNTRL);
  624. WARN_ONCE(1,
  625. "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
  626. val);
  627. return false;
  628. }
  629. }
  630. return true;
  631. }
  632. static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
  633. {
  634. lockdep_assert_held(&trans->reg_lock);
  635. __iwl_clear_bit(trans, CSR_GP_CNTRL,
  636. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  637. /*
  638. * Above we read the CSR_GP_CNTRL register, which will flush
  639. * any previous writes, but we need the write that clears the
  640. * MAC_ACCESS_REQ bit to be performed before any other writes
  641. * scheduled on different CPUs (after we drop reg_lock).
  642. */
  643. mmiowb();
  644. }
  645. static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
  646. void *buf, int dwords)
  647. {
  648. unsigned long flags;
  649. int offs, ret = 0;
  650. u32 *vals = buf;
  651. spin_lock_irqsave(&trans->reg_lock, flags);
  652. if (iwl_trans_grab_nic_access(trans, false)) {
  653. iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
  654. for (offs = 0; offs < dwords; offs++)
  655. vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
  656. iwl_trans_release_nic_access(trans);
  657. } else {
  658. ret = -EBUSY;
  659. }
  660. spin_unlock_irqrestore(&trans->reg_lock, flags);
  661. return ret;
  662. }
  663. static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
  664. void *buf, int dwords)
  665. {
  666. unsigned long flags;
  667. int offs, ret = 0;
  668. u32 *vals = buf;
  669. spin_lock_irqsave(&trans->reg_lock, flags);
  670. if (iwl_trans_grab_nic_access(trans, false)) {
  671. iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
  672. for (offs = 0; offs < dwords; offs++)
  673. iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
  674. iwl_trans_release_nic_access(trans);
  675. } else {
  676. ret = -EBUSY;
  677. }
  678. spin_unlock_irqrestore(&trans->reg_lock, flags);
  679. return ret;
  680. }
  681. #define IWL_FLUSH_WAIT_MS 2000
  682. static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
  683. {
  684. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  685. struct iwl_txq *txq;
  686. struct iwl_queue *q;
  687. int cnt;
  688. unsigned long now = jiffies;
  689. int ret = 0;
  690. /* waiting for all the tx frames complete might take a while */
  691. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  692. if (cnt == trans_pcie->cmd_queue)
  693. continue;
  694. txq = &trans_pcie->txq[cnt];
  695. q = &txq->q;
  696. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  697. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  698. msleep(1);
  699. if (q->read_ptr != q->write_ptr) {
  700. IWL_ERR(trans, "fail to flush all tx fifo queues\n");
  701. ret = -ETIMEDOUT;
  702. break;
  703. }
  704. }
  705. return ret;
  706. }
  707. static const char *get_fh_string(int cmd)
  708. {
  709. #define IWL_CMD(x) case x: return #x
  710. switch (cmd) {
  711. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  712. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  713. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  714. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  715. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  716. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  717. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  718. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  719. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  720. default:
  721. return "UNKNOWN";
  722. }
  723. #undef IWL_CMD
  724. }
  725. int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
  726. {
  727. int i;
  728. static const u32 fh_tbl[] = {
  729. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  730. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  731. FH_RSCSR_CHNL0_WPTR,
  732. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  733. FH_MEM_RSSR_SHARED_CTRL_REG,
  734. FH_MEM_RSSR_RX_STATUS_REG,
  735. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  736. FH_TSSR_TX_STATUS_REG,
  737. FH_TSSR_TX_ERROR_REG
  738. };
  739. #ifdef CONFIG_IWLWIFI_DEBUGFS
  740. if (buf) {
  741. int pos = 0;
  742. size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  743. *buf = kmalloc(bufsz, GFP_KERNEL);
  744. if (!*buf)
  745. return -ENOMEM;
  746. pos += scnprintf(*buf + pos, bufsz - pos,
  747. "FH register values:\n");
  748. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
  749. pos += scnprintf(*buf + pos, bufsz - pos,
  750. " %34s: 0X%08x\n",
  751. get_fh_string(fh_tbl[i]),
  752. iwl_read_direct32(trans, fh_tbl[i]));
  753. return pos;
  754. }
  755. #endif
  756. IWL_ERR(trans, "FH register values:\n");
  757. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
  758. IWL_ERR(trans, " %34s: 0X%08x\n",
  759. get_fh_string(fh_tbl[i]),
  760. iwl_read_direct32(trans, fh_tbl[i]));
  761. return 0;
  762. }
  763. static const char *get_csr_string(int cmd)
  764. {
  765. #define IWL_CMD(x) case x: return #x
  766. switch (cmd) {
  767. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  768. IWL_CMD(CSR_INT_COALESCING);
  769. IWL_CMD(CSR_INT);
  770. IWL_CMD(CSR_INT_MASK);
  771. IWL_CMD(CSR_FH_INT_STATUS);
  772. IWL_CMD(CSR_GPIO_IN);
  773. IWL_CMD(CSR_RESET);
  774. IWL_CMD(CSR_GP_CNTRL);
  775. IWL_CMD(CSR_HW_REV);
  776. IWL_CMD(CSR_EEPROM_REG);
  777. IWL_CMD(CSR_EEPROM_GP);
  778. IWL_CMD(CSR_OTP_GP_REG);
  779. IWL_CMD(CSR_GIO_REG);
  780. IWL_CMD(CSR_GP_UCODE_REG);
  781. IWL_CMD(CSR_GP_DRIVER_REG);
  782. IWL_CMD(CSR_UCODE_DRV_GP1);
  783. IWL_CMD(CSR_UCODE_DRV_GP2);
  784. IWL_CMD(CSR_LED_REG);
  785. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  786. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  787. IWL_CMD(CSR_ANA_PLL_CFG);
  788. IWL_CMD(CSR_HW_REV_WA_REG);
  789. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  790. default:
  791. return "UNKNOWN";
  792. }
  793. #undef IWL_CMD
  794. }
  795. void iwl_pcie_dump_csr(struct iwl_trans *trans)
  796. {
  797. int i;
  798. static const u32 csr_tbl[] = {
  799. CSR_HW_IF_CONFIG_REG,
  800. CSR_INT_COALESCING,
  801. CSR_INT,
  802. CSR_INT_MASK,
  803. CSR_FH_INT_STATUS,
  804. CSR_GPIO_IN,
  805. CSR_RESET,
  806. CSR_GP_CNTRL,
  807. CSR_HW_REV,
  808. CSR_EEPROM_REG,
  809. CSR_EEPROM_GP,
  810. CSR_OTP_GP_REG,
  811. CSR_GIO_REG,
  812. CSR_GP_UCODE_REG,
  813. CSR_GP_DRIVER_REG,
  814. CSR_UCODE_DRV_GP1,
  815. CSR_UCODE_DRV_GP2,
  816. CSR_LED_REG,
  817. CSR_DRAM_INT_TBL_REG,
  818. CSR_GIO_CHICKEN_BITS,
  819. CSR_ANA_PLL_CFG,
  820. CSR_HW_REV_WA_REG,
  821. CSR_DBG_HPET_MEM_REG
  822. };
  823. IWL_ERR(trans, "CSR values:\n");
  824. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  825. "CSR_INT_PERIODIC_REG)\n");
  826. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  827. IWL_ERR(trans, " %25s: 0X%08x\n",
  828. get_csr_string(csr_tbl[i]),
  829. iwl_read32(trans, csr_tbl[i]));
  830. }
  831. }
  832. #ifdef CONFIG_IWLWIFI_DEBUGFS
  833. /* create and remove of files */
  834. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  835. if (!debugfs_create_file(#name, mode, parent, trans, \
  836. &iwl_dbgfs_##name##_ops)) \
  837. goto err; \
  838. } while (0)
  839. /* file operation */
  840. #define DEBUGFS_READ_FUNC(name) \
  841. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  842. char __user *user_buf, \
  843. size_t count, loff_t *ppos);
  844. #define DEBUGFS_WRITE_FUNC(name) \
  845. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  846. const char __user *user_buf, \
  847. size_t count, loff_t *ppos);
  848. #define DEBUGFS_READ_FILE_OPS(name) \
  849. DEBUGFS_READ_FUNC(name); \
  850. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  851. .read = iwl_dbgfs_##name##_read, \
  852. .open = simple_open, \
  853. .llseek = generic_file_llseek, \
  854. };
  855. #define DEBUGFS_WRITE_FILE_OPS(name) \
  856. DEBUGFS_WRITE_FUNC(name); \
  857. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  858. .write = iwl_dbgfs_##name##_write, \
  859. .open = simple_open, \
  860. .llseek = generic_file_llseek, \
  861. };
  862. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  863. DEBUGFS_READ_FUNC(name); \
  864. DEBUGFS_WRITE_FUNC(name); \
  865. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  866. .write = iwl_dbgfs_##name##_write, \
  867. .read = iwl_dbgfs_##name##_read, \
  868. .open = simple_open, \
  869. .llseek = generic_file_llseek, \
  870. };
  871. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  872. char __user *user_buf,
  873. size_t count, loff_t *ppos)
  874. {
  875. struct iwl_trans *trans = file->private_data;
  876. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  877. struct iwl_txq *txq;
  878. struct iwl_queue *q;
  879. char *buf;
  880. int pos = 0;
  881. int cnt;
  882. int ret;
  883. size_t bufsz;
  884. bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
  885. if (!trans_pcie->txq)
  886. return -EAGAIN;
  887. buf = kzalloc(bufsz, GFP_KERNEL);
  888. if (!buf)
  889. return -ENOMEM;
  890. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  891. txq = &trans_pcie->txq[cnt];
  892. q = &txq->q;
  893. pos += scnprintf(buf + pos, bufsz - pos,
  894. "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
  895. cnt, q->read_ptr, q->write_ptr,
  896. !!test_bit(cnt, trans_pcie->queue_used),
  897. !!test_bit(cnt, trans_pcie->queue_stopped));
  898. }
  899. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  900. kfree(buf);
  901. return ret;
  902. }
  903. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  904. char __user *user_buf,
  905. size_t count, loff_t *ppos)
  906. {
  907. struct iwl_trans *trans = file->private_data;
  908. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  909. struct iwl_rxq *rxq = &trans_pcie->rxq;
  910. char buf[256];
  911. int pos = 0;
  912. const size_t bufsz = sizeof(buf);
  913. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  914. rxq->read);
  915. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  916. rxq->write);
  917. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  918. rxq->free_count);
  919. if (rxq->rb_stts) {
  920. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  921. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  922. } else {
  923. pos += scnprintf(buf + pos, bufsz - pos,
  924. "closed_rb_num: Not Allocated\n");
  925. }
  926. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  927. }
  928. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  929. char __user *user_buf,
  930. size_t count, loff_t *ppos)
  931. {
  932. struct iwl_trans *trans = file->private_data;
  933. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  934. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  935. int pos = 0;
  936. char *buf;
  937. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  938. ssize_t ret;
  939. buf = kzalloc(bufsz, GFP_KERNEL);
  940. if (!buf)
  941. return -ENOMEM;
  942. pos += scnprintf(buf + pos, bufsz - pos,
  943. "Interrupt Statistics Report:\n");
  944. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  945. isr_stats->hw);
  946. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  947. isr_stats->sw);
  948. if (isr_stats->sw || isr_stats->hw) {
  949. pos += scnprintf(buf + pos, bufsz - pos,
  950. "\tLast Restarting Code: 0x%X\n",
  951. isr_stats->err_code);
  952. }
  953. #ifdef CONFIG_IWLWIFI_DEBUG
  954. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  955. isr_stats->sch);
  956. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  957. isr_stats->alive);
  958. #endif
  959. pos += scnprintf(buf + pos, bufsz - pos,
  960. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  961. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  962. isr_stats->ctkill);
  963. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  964. isr_stats->wakeup);
  965. pos += scnprintf(buf + pos, bufsz - pos,
  966. "Rx command responses:\t\t %u\n", isr_stats->rx);
  967. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  968. isr_stats->tx);
  969. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  970. isr_stats->unhandled);
  971. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  972. kfree(buf);
  973. return ret;
  974. }
  975. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  976. const char __user *user_buf,
  977. size_t count, loff_t *ppos)
  978. {
  979. struct iwl_trans *trans = file->private_data;
  980. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  981. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  982. char buf[8];
  983. int buf_size;
  984. u32 reset_flag;
  985. memset(buf, 0, sizeof(buf));
  986. buf_size = min(count, sizeof(buf) - 1);
  987. if (copy_from_user(buf, user_buf, buf_size))
  988. return -EFAULT;
  989. if (sscanf(buf, "%x", &reset_flag) != 1)
  990. return -EFAULT;
  991. if (reset_flag == 0)
  992. memset(isr_stats, 0, sizeof(*isr_stats));
  993. return count;
  994. }
  995. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  996. const char __user *user_buf,
  997. size_t count, loff_t *ppos)
  998. {
  999. struct iwl_trans *trans = file->private_data;
  1000. char buf[8];
  1001. int buf_size;
  1002. int csr;
  1003. memset(buf, 0, sizeof(buf));
  1004. buf_size = min(count, sizeof(buf) - 1);
  1005. if (copy_from_user(buf, user_buf, buf_size))
  1006. return -EFAULT;
  1007. if (sscanf(buf, "%d", &csr) != 1)
  1008. return -EFAULT;
  1009. iwl_pcie_dump_csr(trans);
  1010. return count;
  1011. }
  1012. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  1013. char __user *user_buf,
  1014. size_t count, loff_t *ppos)
  1015. {
  1016. struct iwl_trans *trans = file->private_data;
  1017. char *buf = NULL;
  1018. int pos = 0;
  1019. ssize_t ret = -EFAULT;
  1020. ret = pos = iwl_pcie_dump_fh(trans, &buf);
  1021. if (buf) {
  1022. ret = simple_read_from_buffer(user_buf,
  1023. count, ppos, buf, pos);
  1024. kfree(buf);
  1025. }
  1026. return ret;
  1027. }
  1028. static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
  1029. const char __user *user_buf,
  1030. size_t count, loff_t *ppos)
  1031. {
  1032. struct iwl_trans *trans = file->private_data;
  1033. if (!trans->op_mode)
  1034. return -EAGAIN;
  1035. local_bh_disable();
  1036. iwl_op_mode_nic_error(trans->op_mode);
  1037. local_bh_enable();
  1038. return count;
  1039. }
  1040. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  1041. DEBUGFS_READ_FILE_OPS(fh_reg);
  1042. DEBUGFS_READ_FILE_OPS(rx_queue);
  1043. DEBUGFS_READ_FILE_OPS(tx_queue);
  1044. DEBUGFS_WRITE_FILE_OPS(csr);
  1045. DEBUGFS_WRITE_FILE_OPS(fw_restart);
  1046. /*
  1047. * Create the debugfs files and directories
  1048. *
  1049. */
  1050. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1051. struct dentry *dir)
  1052. {
  1053. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1054. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1055. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  1056. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  1057. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  1058. DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
  1059. return 0;
  1060. err:
  1061. IWL_ERR(trans, "failed to create the trans debugfs entry\n");
  1062. return -ENOMEM;
  1063. }
  1064. #else
  1065. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1066. struct dentry *dir)
  1067. {
  1068. return 0;
  1069. }
  1070. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1071. static const struct iwl_trans_ops trans_ops_pcie = {
  1072. .start_hw = iwl_trans_pcie_start_hw,
  1073. .stop_hw = iwl_trans_pcie_stop_hw,
  1074. .fw_alive = iwl_trans_pcie_fw_alive,
  1075. .start_fw = iwl_trans_pcie_start_fw,
  1076. .stop_device = iwl_trans_pcie_stop_device,
  1077. .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
  1078. .send_cmd = iwl_trans_pcie_send_hcmd,
  1079. .tx = iwl_trans_pcie_tx,
  1080. .reclaim = iwl_trans_pcie_reclaim,
  1081. .txq_disable = iwl_trans_pcie_txq_disable,
  1082. .txq_enable = iwl_trans_pcie_txq_enable,
  1083. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1084. .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
  1085. #ifdef CONFIG_PM_SLEEP
  1086. .suspend = iwl_trans_pcie_suspend,
  1087. .resume = iwl_trans_pcie_resume,
  1088. #endif
  1089. .write8 = iwl_trans_pcie_write8,
  1090. .write32 = iwl_trans_pcie_write32,
  1091. .read32 = iwl_trans_pcie_read32,
  1092. .read_prph = iwl_trans_pcie_read_prph,
  1093. .write_prph = iwl_trans_pcie_write_prph,
  1094. .read_mem = iwl_trans_pcie_read_mem,
  1095. .write_mem = iwl_trans_pcie_write_mem,
  1096. .configure = iwl_trans_pcie_configure,
  1097. .set_pmi = iwl_trans_pcie_set_pmi,
  1098. .grab_nic_access = iwl_trans_pcie_grab_nic_access,
  1099. .release_nic_access = iwl_trans_pcie_release_nic_access
  1100. };
  1101. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  1102. const struct pci_device_id *ent,
  1103. const struct iwl_cfg *cfg)
  1104. {
  1105. struct iwl_trans_pcie *trans_pcie;
  1106. struct iwl_trans *trans;
  1107. u16 pci_cmd;
  1108. int err;
  1109. trans = kzalloc(sizeof(struct iwl_trans) +
  1110. sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  1111. if (!trans)
  1112. return NULL;
  1113. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1114. trans->ops = &trans_ops_pcie;
  1115. trans->cfg = cfg;
  1116. trans_pcie->trans = trans;
  1117. spin_lock_init(&trans_pcie->irq_lock);
  1118. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  1119. /* W/A - seems to solve weird behavior. We need to remove this if we
  1120. * don't want to stay in L1 all the time. This wastes a lot of power */
  1121. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1122. PCIE_LINK_STATE_CLKPM);
  1123. if (pci_enable_device(pdev)) {
  1124. err = -ENODEV;
  1125. goto out_no_pci;
  1126. }
  1127. pci_set_master(pdev);
  1128. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
  1129. if (!err)
  1130. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
  1131. if (err) {
  1132. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1133. if (!err)
  1134. err = pci_set_consistent_dma_mask(pdev,
  1135. DMA_BIT_MASK(32));
  1136. /* both attempts failed: */
  1137. if (err) {
  1138. dev_err(&pdev->dev, "No suitable DMA available\n");
  1139. goto out_pci_disable_device;
  1140. }
  1141. }
  1142. err = pci_request_regions(pdev, DRV_NAME);
  1143. if (err) {
  1144. dev_err(&pdev->dev, "pci_request_regions failed\n");
  1145. goto out_pci_disable_device;
  1146. }
  1147. trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
  1148. if (!trans_pcie->hw_base) {
  1149. dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
  1150. err = -ENODEV;
  1151. goto out_pci_release_regions;
  1152. }
  1153. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  1154. * PCI Tx retries from interfering with C3 CPU state */
  1155. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  1156. err = pci_enable_msi(pdev);
  1157. if (err) {
  1158. dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
  1159. /* enable rfkill interrupt: hw bug w/a */
  1160. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1161. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1162. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1163. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1164. }
  1165. }
  1166. trans->dev = &pdev->dev;
  1167. trans_pcie->pci_dev = pdev;
  1168. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  1169. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  1170. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  1171. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  1172. /* Initialize the wait queue for commands */
  1173. init_waitqueue_head(&trans_pcie->wait_command_queue);
  1174. spin_lock_init(&trans->reg_lock);
  1175. snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
  1176. "iwl_cmd_pool:%s", dev_name(trans->dev));
  1177. trans->dev_cmd_headroom = 0;
  1178. trans->dev_cmd_pool =
  1179. kmem_cache_create(trans->dev_cmd_pool_name,
  1180. sizeof(struct iwl_device_cmd)
  1181. + trans->dev_cmd_headroom,
  1182. sizeof(void *),
  1183. SLAB_HWCACHE_ALIGN,
  1184. NULL);
  1185. if (!trans->dev_cmd_pool)
  1186. goto out_pci_disable_msi;
  1187. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  1188. tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
  1189. iwl_pcie_tasklet, (unsigned long)trans);
  1190. if (iwl_pcie_alloc_ict(trans))
  1191. goto out_free_cmd_pool;
  1192. err = request_irq(pdev->irq, iwl_pcie_isr_ict,
  1193. IRQF_SHARED, DRV_NAME, trans);
  1194. if (err) {
  1195. IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
  1196. goto out_free_ict;
  1197. }
  1198. return trans;
  1199. out_free_ict:
  1200. iwl_pcie_free_ict(trans);
  1201. out_free_cmd_pool:
  1202. kmem_cache_destroy(trans->dev_cmd_pool);
  1203. out_pci_disable_msi:
  1204. pci_disable_msi(pdev);
  1205. out_pci_release_regions:
  1206. pci_release_regions(pdev);
  1207. out_pci_disable_device:
  1208. pci_disable_device(pdev);
  1209. out_no_pci:
  1210. kfree(trans);
  1211. return NULL;
  1212. }