trans.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/pci.h>
  64. #include <linux/pci-aspm.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/debugfs.h>
  67. #include <linux/sched.h>
  68. #include <linux/bitops.h>
  69. #include <linux/gfp.h>
  70. #include "iwl-drv.h"
  71. #include "iwl-trans.h"
  72. #include "iwl-csr.h"
  73. #include "iwl-prph.h"
  74. #include "iwl-agn-hw.h"
  75. #include "internal.h"
  76. /* FIXME: need to abstract out TX command (once we know what it looks like) */
  77. #include "dvm/commands.h"
  78. #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
  79. (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
  80. (~(1<<(trans_pcie)->cmd_queue)))
  81. static int iwl_trans_rx_alloc(struct iwl_trans *trans)
  82. {
  83. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  84. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  85. struct device *dev = trans->dev;
  86. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  87. spin_lock_init(&rxq->lock);
  88. if (WARN_ON(rxq->bd || rxq->rb_stts))
  89. return -EINVAL;
  90. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  91. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  92. &rxq->bd_dma, GFP_KERNEL);
  93. if (!rxq->bd)
  94. goto err_bd;
  95. /*Allocate the driver's pointer to receive buffer status */
  96. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  97. &rxq->rb_stts_dma, GFP_KERNEL);
  98. if (!rxq->rb_stts)
  99. goto err_rb_stts;
  100. return 0;
  101. err_rb_stts:
  102. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  103. rxq->bd, rxq->bd_dma);
  104. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  105. rxq->bd = NULL;
  106. err_bd:
  107. return -ENOMEM;
  108. }
  109. static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
  110. {
  111. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  112. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  113. int i;
  114. /* Fill the rx_used queue with _all_ of the Rx buffers */
  115. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  116. /* In the reset function, these buffers may have been allocated
  117. * to an SKB, so we need to unmap and free potential storage */
  118. if (rxq->pool[i].page != NULL) {
  119. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  120. PAGE_SIZE << trans_pcie->rx_page_order,
  121. DMA_FROM_DEVICE);
  122. __free_pages(rxq->pool[i].page,
  123. trans_pcie->rx_page_order);
  124. rxq->pool[i].page = NULL;
  125. }
  126. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  127. }
  128. }
  129. static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
  130. struct iwl_rx_queue *rxq)
  131. {
  132. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  133. u32 rb_size;
  134. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  135. u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
  136. if (trans_pcie->rx_buf_size_8k)
  137. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  138. else
  139. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  140. /* Stop Rx DMA */
  141. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  142. /* Reset driver's Rx queue write index */
  143. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  144. /* Tell device where to find RBD circular buffer in DRAM */
  145. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  146. (u32)(rxq->bd_dma >> 8));
  147. /* Tell device where in DRAM to update its Rx status */
  148. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  149. rxq->rb_stts_dma >> 4);
  150. /* Enable Rx DMA
  151. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  152. * the credit mechanism in 5000 HW RX FIFO
  153. * Direct rx interrupts to hosts
  154. * Rx buffer size 4 or 8k
  155. * RB timeout 0x10
  156. * 256 RBDs
  157. */
  158. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  159. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  160. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  161. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  162. rb_size|
  163. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  164. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  165. /* Set interrupt coalescing timer to default (2048 usecs) */
  166. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  167. }
  168. static int iwl_rx_init(struct iwl_trans *trans)
  169. {
  170. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  171. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  172. int i, err;
  173. unsigned long flags;
  174. if (!rxq->bd) {
  175. err = iwl_trans_rx_alloc(trans);
  176. if (err)
  177. return err;
  178. }
  179. spin_lock_irqsave(&rxq->lock, flags);
  180. INIT_LIST_HEAD(&rxq->rx_free);
  181. INIT_LIST_HEAD(&rxq->rx_used);
  182. iwl_trans_rxq_free_rx_bufs(trans);
  183. for (i = 0; i < RX_QUEUE_SIZE; i++)
  184. rxq->queue[i] = NULL;
  185. /* Set us so that we have processed and used all buffers, but have
  186. * not restocked the Rx queue with fresh buffers */
  187. rxq->read = rxq->write = 0;
  188. rxq->write_actual = 0;
  189. rxq->free_count = 0;
  190. spin_unlock_irqrestore(&rxq->lock, flags);
  191. iwlagn_rx_replenish(trans);
  192. iwl_trans_rx_hw_init(trans, rxq);
  193. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  194. rxq->need_update = 1;
  195. iwl_rx_queue_update_write_ptr(trans, rxq);
  196. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  197. return 0;
  198. }
  199. static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
  200. {
  201. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  202. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  203. unsigned long flags;
  204. /*if rxq->bd is NULL, it means that nothing has been allocated,
  205. * exit now */
  206. if (!rxq->bd) {
  207. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  208. return;
  209. }
  210. spin_lock_irqsave(&rxq->lock, flags);
  211. iwl_trans_rxq_free_rx_bufs(trans);
  212. spin_unlock_irqrestore(&rxq->lock, flags);
  213. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  214. rxq->bd, rxq->bd_dma);
  215. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  216. rxq->bd = NULL;
  217. if (rxq->rb_stts)
  218. dma_free_coherent(trans->dev,
  219. sizeof(struct iwl_rb_status),
  220. rxq->rb_stts, rxq->rb_stts_dma);
  221. else
  222. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  223. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  224. rxq->rb_stts = NULL;
  225. }
  226. static int iwl_trans_rx_stop(struct iwl_trans *trans)
  227. {
  228. /* stop Rx DMA */
  229. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  230. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  231. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  232. }
  233. static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
  234. struct iwl_dma_ptr *ptr, size_t size)
  235. {
  236. if (WARN_ON(ptr->addr))
  237. return -EINVAL;
  238. ptr->addr = dma_alloc_coherent(trans->dev, size,
  239. &ptr->dma, GFP_KERNEL);
  240. if (!ptr->addr)
  241. return -ENOMEM;
  242. ptr->size = size;
  243. return 0;
  244. }
  245. static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
  246. struct iwl_dma_ptr *ptr)
  247. {
  248. if (unlikely(!ptr->addr))
  249. return;
  250. dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
  251. memset(ptr, 0, sizeof(*ptr));
  252. }
  253. static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
  254. {
  255. struct iwl_tx_queue *txq = (void *)data;
  256. struct iwl_queue *q = &txq->q;
  257. struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
  258. struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
  259. u32 scd_sram_addr = trans_pcie->scd_base_addr +
  260. SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
  261. u8 buf[16];
  262. int i;
  263. spin_lock(&txq->lock);
  264. /* check if triggered erroneously */
  265. if (txq->q.read_ptr == txq->q.write_ptr) {
  266. spin_unlock(&txq->lock);
  267. return;
  268. }
  269. spin_unlock(&txq->lock);
  270. IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
  271. jiffies_to_msecs(trans_pcie->wd_timeout));
  272. IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
  273. txq->q.read_ptr, txq->q.write_ptr);
  274. iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
  275. iwl_print_hex_error(trans, buf, sizeof(buf));
  276. for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
  277. IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
  278. iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
  279. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
  280. u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
  281. u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
  282. bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
  283. u32 tbl_dw =
  284. iwl_read_targ_mem(trans,
  285. trans_pcie->scd_base_addr +
  286. SCD_TRANS_TBL_OFFSET_QUEUE(i));
  287. if (i & 0x1)
  288. tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
  289. else
  290. tbl_dw = tbl_dw & 0x0000FFFF;
  291. IWL_ERR(trans,
  292. "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
  293. i, active ? "" : "in", fifo, tbl_dw,
  294. iwl_read_prph(trans,
  295. SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
  296. iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
  297. }
  298. for (i = q->read_ptr; i != q->write_ptr;
  299. i = iwl_queue_inc_wrap(i, q->n_bd)) {
  300. struct iwl_tx_cmd *tx_cmd =
  301. (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
  302. IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
  303. get_unaligned_le32(&tx_cmd->scratch));
  304. }
  305. iwl_op_mode_nic_error(trans->op_mode);
  306. }
  307. static int iwl_trans_txq_alloc(struct iwl_trans *trans,
  308. struct iwl_tx_queue *txq, int slots_num,
  309. u32 txq_id)
  310. {
  311. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  312. size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
  313. int i;
  314. if (WARN_ON(txq->entries || txq->tfds))
  315. return -EINVAL;
  316. setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
  317. (unsigned long)txq);
  318. txq->trans_pcie = trans_pcie;
  319. txq->q.n_window = slots_num;
  320. txq->entries = kcalloc(slots_num,
  321. sizeof(struct iwl_pcie_tx_queue_entry),
  322. GFP_KERNEL);
  323. if (!txq->entries)
  324. goto error;
  325. if (txq_id == trans_pcie->cmd_queue)
  326. for (i = 0; i < slots_num; i++) {
  327. txq->entries[i].cmd =
  328. kmalloc(sizeof(struct iwl_device_cmd),
  329. GFP_KERNEL);
  330. if (!txq->entries[i].cmd)
  331. goto error;
  332. }
  333. /* Circular buffer of transmit frame descriptors (TFDs),
  334. * shared with device */
  335. txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
  336. &txq->q.dma_addr, GFP_KERNEL);
  337. if (!txq->tfds) {
  338. IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  339. goto error;
  340. }
  341. txq->q.id = txq_id;
  342. return 0;
  343. error:
  344. if (txq->entries && txq_id == trans_pcie->cmd_queue)
  345. for (i = 0; i < slots_num; i++)
  346. kfree(txq->entries[i].cmd);
  347. kfree(txq->entries);
  348. txq->entries = NULL;
  349. return -ENOMEM;
  350. }
  351. static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
  352. int slots_num, u32 txq_id)
  353. {
  354. int ret;
  355. txq->need_update = 0;
  356. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  357. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  358. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  359. /* Initialize queue's high/low-water marks, and head/tail indexes */
  360. ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  361. txq_id);
  362. if (ret)
  363. return ret;
  364. spin_lock_init(&txq->lock);
  365. /*
  366. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  367. * given Tx queue, and enable the DMA channel used for that queue.
  368. * Circular buffer (TFD queue in DRAM) physical base address */
  369. iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
  370. txq->q.dma_addr >> 8);
  371. return 0;
  372. }
  373. /**
  374. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  375. */
  376. static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
  377. {
  378. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  379. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  380. struct iwl_queue *q = &txq->q;
  381. enum dma_data_direction dma_dir;
  382. if (!q->n_bd)
  383. return;
  384. /* In the command queue, all the TBs are mapped as BIDI
  385. * so unmap them as such.
  386. */
  387. if (txq_id == trans_pcie->cmd_queue)
  388. dma_dir = DMA_BIDIRECTIONAL;
  389. else
  390. dma_dir = DMA_TO_DEVICE;
  391. spin_lock_bh(&txq->lock);
  392. while (q->write_ptr != q->read_ptr) {
  393. iwl_txq_free_tfd(trans, txq, dma_dir);
  394. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  395. }
  396. spin_unlock_bh(&txq->lock);
  397. }
  398. /**
  399. * iwl_tx_queue_free - Deallocate DMA queue.
  400. * @txq: Transmit queue to deallocate.
  401. *
  402. * Empty queue by removing and destroying all BD's.
  403. * Free all buffers.
  404. * 0-fill, but do not free "txq" descriptor structure.
  405. */
  406. static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
  407. {
  408. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  409. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  410. struct device *dev = trans->dev;
  411. int i;
  412. if (WARN_ON(!txq))
  413. return;
  414. iwl_tx_queue_unmap(trans, txq_id);
  415. /* De-alloc array of command/tx buffers */
  416. if (txq_id == trans_pcie->cmd_queue)
  417. for (i = 0; i < txq->q.n_window; i++)
  418. kfree(txq->entries[i].cmd);
  419. /* De-alloc circular buffer of TFDs */
  420. if (txq->q.n_bd) {
  421. dma_free_coherent(dev, sizeof(struct iwl_tfd) *
  422. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  423. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  424. }
  425. kfree(txq->entries);
  426. txq->entries = NULL;
  427. del_timer_sync(&txq->stuck_timer);
  428. /* 0-fill queue descriptor structure */
  429. memset(txq, 0, sizeof(*txq));
  430. }
  431. /**
  432. * iwl_trans_tx_free - Free TXQ Context
  433. *
  434. * Destroy all TX DMA queues and structures
  435. */
  436. static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
  437. {
  438. int txq_id;
  439. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  440. /* Tx queues */
  441. if (trans_pcie->txq) {
  442. for (txq_id = 0;
  443. txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
  444. iwl_tx_queue_free(trans, txq_id);
  445. }
  446. kfree(trans_pcie->txq);
  447. trans_pcie->txq = NULL;
  448. iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
  449. iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
  450. }
  451. /**
  452. * iwl_trans_tx_alloc - allocate TX context
  453. * Allocate all Tx DMA structures and initialize them
  454. *
  455. * @param priv
  456. * @return error code
  457. */
  458. static int iwl_trans_tx_alloc(struct iwl_trans *trans)
  459. {
  460. int ret;
  461. int txq_id, slots_num;
  462. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  463. u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
  464. sizeof(struct iwlagn_scd_bc_tbl);
  465. /*It is not allowed to alloc twice, so warn when this happens.
  466. * We cannot rely on the previous allocation, so free and fail */
  467. if (WARN_ON(trans_pcie->txq)) {
  468. ret = -EINVAL;
  469. goto error;
  470. }
  471. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
  472. scd_bc_tbls_size);
  473. if (ret) {
  474. IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
  475. goto error;
  476. }
  477. /* Alloc keep-warm buffer */
  478. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
  479. if (ret) {
  480. IWL_ERR(trans, "Keep Warm allocation failed\n");
  481. goto error;
  482. }
  483. trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
  484. sizeof(struct iwl_tx_queue), GFP_KERNEL);
  485. if (!trans_pcie->txq) {
  486. IWL_ERR(trans, "Not enough memory for txq\n");
  487. ret = ENOMEM;
  488. goto error;
  489. }
  490. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  491. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  492. txq_id++) {
  493. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  494. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  495. ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
  496. slots_num, txq_id);
  497. if (ret) {
  498. IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
  499. goto error;
  500. }
  501. }
  502. return 0;
  503. error:
  504. iwl_trans_pcie_tx_free(trans);
  505. return ret;
  506. }
  507. static int iwl_tx_init(struct iwl_trans *trans)
  508. {
  509. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  510. int ret;
  511. int txq_id, slots_num;
  512. unsigned long flags;
  513. bool alloc = false;
  514. if (!trans_pcie->txq) {
  515. ret = iwl_trans_tx_alloc(trans);
  516. if (ret)
  517. goto error;
  518. alloc = true;
  519. }
  520. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  521. /* Turn off all Tx DMA fifos */
  522. iwl_write_prph(trans, SCD_TXFACT, 0);
  523. /* Tell NIC where to find the "keep warm" buffer */
  524. iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
  525. trans_pcie->kw.dma >> 4);
  526. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  527. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  528. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  529. txq_id++) {
  530. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  531. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  532. ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
  533. slots_num, txq_id);
  534. if (ret) {
  535. IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
  536. goto error;
  537. }
  538. }
  539. return 0;
  540. error:
  541. /*Upon error, free only if we allocated something */
  542. if (alloc)
  543. iwl_trans_pcie_tx_free(trans);
  544. return ret;
  545. }
  546. static void iwl_set_pwr_vmain(struct iwl_trans *trans)
  547. {
  548. /*
  549. * (for documentation purposes)
  550. * to set power to V_AUX, do:
  551. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  552. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  553. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  554. ~APMG_PS_CTRL_MSK_PWR_SRC);
  555. */
  556. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  557. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  558. ~APMG_PS_CTRL_MSK_PWR_SRC);
  559. }
  560. /* PCI registers */
  561. #define PCI_CFG_RETRY_TIMEOUT 0x041
  562. #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
  563. #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
  564. static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
  565. {
  566. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  567. u16 pci_lnk_ctl;
  568. pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
  569. &pci_lnk_ctl);
  570. return pci_lnk_ctl;
  571. }
  572. static void iwl_apm_config(struct iwl_trans *trans)
  573. {
  574. /*
  575. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  576. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  577. * If so (likely), disable L0S, so device moves directly L0->L1;
  578. * costs negligible amount of power savings.
  579. * If not (unlikely), enable L0S, so there is at least some
  580. * power savings, even without L1.
  581. */
  582. u16 lctl = iwl_pciexp_link_ctrl(trans);
  583. if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
  584. PCI_CFG_LINK_CTRL_VAL_L1_EN) {
  585. /* L1-ASPM enabled; disable(!) L0S */
  586. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  587. dev_printk(KERN_INFO, trans->dev,
  588. "L1 Enabled; Disabling L0S\n");
  589. } else {
  590. /* L1-ASPM disabled; enable(!) L0S */
  591. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  592. dev_printk(KERN_INFO, trans->dev,
  593. "L1 Disabled; Enabling L0S\n");
  594. }
  595. trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
  596. }
  597. /*
  598. * Start up NIC's basic functionality after it has been reset
  599. * (e.g. after platform boot, or shutdown via iwl_apm_stop())
  600. * NOTE: This does not load uCode nor start the embedded processor
  601. */
  602. static int iwl_apm_init(struct iwl_trans *trans)
  603. {
  604. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  605. int ret = 0;
  606. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  607. /*
  608. * Use "set_bit" below rather than "write", to preserve any hardware
  609. * bits already set by default after reset.
  610. */
  611. /* Disable L0S exit timer (platform NMI Work/Around) */
  612. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  613. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  614. /*
  615. * Disable L0s without affecting L1;
  616. * don't wait for ICH L0s (ICH bug W/A)
  617. */
  618. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  619. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  620. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  621. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  622. /*
  623. * Enable HAP INTA (interrupt from management bus) to
  624. * wake device's PCI Express link L1a -> L0s
  625. */
  626. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  627. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  628. iwl_apm_config(trans);
  629. /* Configure analog phase-lock-loop before activating to D0A */
  630. if (trans->cfg->base_params->pll_cfg_val)
  631. iwl_set_bit(trans, CSR_ANA_PLL_CFG,
  632. trans->cfg->base_params->pll_cfg_val);
  633. /*
  634. * Set "initialization complete" bit to move adapter from
  635. * D0U* --> D0A* (powered-up active) state.
  636. */
  637. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  638. /*
  639. * Wait for clock stabilization; once stabilized, access to
  640. * device-internal resources is supported, e.g. iwl_write_prph()
  641. * and accesses to uCode SRAM.
  642. */
  643. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  644. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  645. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  646. if (ret < 0) {
  647. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  648. goto out;
  649. }
  650. /*
  651. * Enable DMA clock and wait for it to stabilize.
  652. *
  653. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
  654. * do not disable clocks. This preserves any hardware bits already
  655. * set by default in "CLK_CTRL_REG" after reset.
  656. */
  657. iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
  658. udelay(20);
  659. /* Disable L1-Active */
  660. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  661. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  662. set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  663. out:
  664. return ret;
  665. }
  666. static int iwl_apm_stop_master(struct iwl_trans *trans)
  667. {
  668. int ret = 0;
  669. /* stop device's busmaster DMA activity */
  670. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  671. ret = iwl_poll_bit(trans, CSR_RESET,
  672. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  673. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  674. if (ret)
  675. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  676. IWL_DEBUG_INFO(trans, "stop master\n");
  677. return ret;
  678. }
  679. static void iwl_apm_stop(struct iwl_trans *trans)
  680. {
  681. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  682. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  683. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  684. /* Stop device's DMA activity */
  685. iwl_apm_stop_master(trans);
  686. /* Reset the entire device */
  687. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  688. udelay(10);
  689. /*
  690. * Clear "initialization complete" bit to move adapter from
  691. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  692. */
  693. iwl_clear_bit(trans, CSR_GP_CNTRL,
  694. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  695. }
  696. static int iwl_nic_init(struct iwl_trans *trans)
  697. {
  698. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  699. unsigned long flags;
  700. /* nic_init */
  701. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  702. iwl_apm_init(trans);
  703. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  704. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  705. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  706. iwl_set_pwr_vmain(trans);
  707. iwl_op_mode_nic_config(trans->op_mode);
  708. #ifndef CONFIG_IWLWIFI_IDI
  709. /* Allocate the RX queue, or reset if it is already allocated */
  710. iwl_rx_init(trans);
  711. #endif
  712. /* Allocate or reset and init all Tx and Command queues */
  713. if (iwl_tx_init(trans))
  714. return -ENOMEM;
  715. if (trans->cfg->base_params->shadow_reg_enable) {
  716. /* enable shadow regs in HW */
  717. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
  718. IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
  719. }
  720. return 0;
  721. }
  722. #define HW_READY_TIMEOUT (50)
  723. /* Note: returns poll_bit return value, which is >= 0 if success */
  724. static int iwl_set_hw_ready(struct iwl_trans *trans)
  725. {
  726. int ret;
  727. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  728. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  729. /* See if we got it */
  730. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  731. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  732. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  733. HW_READY_TIMEOUT);
  734. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  735. return ret;
  736. }
  737. /* Note: returns standard 0/-ERROR code */
  738. static int iwl_prepare_card_hw(struct iwl_trans *trans)
  739. {
  740. int ret;
  741. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  742. ret = iwl_set_hw_ready(trans);
  743. /* If the card is ready, exit 0 */
  744. if (ret >= 0)
  745. return 0;
  746. /* If HW is not ready, prepare the conditions to check again */
  747. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  748. CSR_HW_IF_CONFIG_REG_PREPARE);
  749. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  750. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  751. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  752. if (ret < 0)
  753. return ret;
  754. /* HW should be ready by now, check again. */
  755. ret = iwl_set_hw_ready(trans);
  756. if (ret >= 0)
  757. return 0;
  758. return ret;
  759. }
  760. /*
  761. * ucode
  762. */
  763. static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
  764. const struct fw_desc *section)
  765. {
  766. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  767. dma_addr_t phy_addr = section->p_addr;
  768. u32 byte_cnt = section->len;
  769. u32 dst_addr = section->offset;
  770. int ret;
  771. trans_pcie->ucode_write_complete = false;
  772. iwl_write_direct32(trans,
  773. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  774. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  775. iwl_write_direct32(trans,
  776. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
  777. dst_addr);
  778. iwl_write_direct32(trans,
  779. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  780. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  781. iwl_write_direct32(trans,
  782. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  783. (iwl_get_dma_hi_addr(phy_addr)
  784. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  785. iwl_write_direct32(trans,
  786. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  787. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  788. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  789. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  790. iwl_write_direct32(trans,
  791. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  792. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  793. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  794. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  795. IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
  796. section_num);
  797. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  798. trans_pcie->ucode_write_complete, 5 * HZ);
  799. if (!ret) {
  800. IWL_ERR(trans, "Could not load the [%d] uCode section\n",
  801. section_num);
  802. return -ETIMEDOUT;
  803. }
  804. return 0;
  805. }
  806. static int iwl_load_given_ucode(struct iwl_trans *trans,
  807. const struct fw_img *image)
  808. {
  809. int ret = 0;
  810. int i;
  811. for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
  812. if (!image->sec[i].p_addr)
  813. break;
  814. ret = iwl_load_section(trans, i, &image->sec[i]);
  815. if (ret)
  816. return ret;
  817. }
  818. /* Remove all resets to allow NIC to operate */
  819. iwl_write32(trans, CSR_RESET, 0);
  820. return 0;
  821. }
  822. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  823. const struct fw_img *fw)
  824. {
  825. int ret;
  826. bool hw_rfkill;
  827. /* This may fail if AMT took ownership of the device */
  828. if (iwl_prepare_card_hw(trans)) {
  829. IWL_WARN(trans, "Exit HW not ready\n");
  830. return -EIO;
  831. }
  832. iwl_enable_rfkill_int(trans);
  833. /* If platform's RF_KILL switch is NOT set to KILL */
  834. hw_rfkill = iwl_is_rfkill_set(trans);
  835. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  836. if (hw_rfkill)
  837. return -ERFKILL;
  838. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  839. ret = iwl_nic_init(trans);
  840. if (ret) {
  841. IWL_ERR(trans, "Unable to init nic\n");
  842. return ret;
  843. }
  844. /* make sure rfkill handshake bits are cleared */
  845. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  846. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  847. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  848. /* clear (again), then enable host interrupts */
  849. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  850. iwl_enable_interrupts(trans);
  851. /* really make sure rfkill handshake bits are cleared */
  852. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  853. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  854. /* Load the given image to the HW */
  855. return iwl_load_given_ucode(trans, fw);
  856. }
  857. /*
  858. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  859. */
  860. static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
  861. {
  862. struct iwl_trans_pcie __maybe_unused *trans_pcie =
  863. IWL_TRANS_GET_PCIE_TRANS(trans);
  864. iwl_write_prph(trans, SCD_TXFACT, mask);
  865. }
  866. static void iwl_tx_start(struct iwl_trans *trans)
  867. {
  868. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  869. u32 a;
  870. int chan;
  871. u32 reg_val;
  872. /* make sure all queue are not stopped/used */
  873. memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  874. memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  875. trans_pcie->scd_base_addr =
  876. iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
  877. a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  878. /* reset conext data memory */
  879. for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  880. a += 4)
  881. iwl_write_targ_mem(trans, a, 0);
  882. /* reset tx status memory */
  883. for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  884. a += 4)
  885. iwl_write_targ_mem(trans, a, 0);
  886. for (; a < trans_pcie->scd_base_addr +
  887. SCD_TRANS_TBL_OFFSET_QUEUE(
  888. trans->cfg->base_params->num_of_queues);
  889. a += 4)
  890. iwl_write_targ_mem(trans, a, 0);
  891. iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
  892. trans_pcie->scd_bc_tbls.dma >> 10);
  893. /* The chain extension of the SCD doesn't work well. This feature is
  894. * enabled by default by the HW, so we need to disable it manually.
  895. */
  896. iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
  897. iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
  898. trans_pcie->cmd_fifo);
  899. /* Activate all Tx DMA/FIFO channels */
  900. iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
  901. /* Enable DMA channel */
  902. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  903. iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  904. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  905. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  906. /* Update FH chicken bits */
  907. reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
  908. iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
  909. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  910. /* Enable L1-Active */
  911. iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
  912. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  913. }
  914. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
  915. {
  916. iwl_reset_ict(trans);
  917. iwl_tx_start(trans);
  918. }
  919. /**
  920. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  921. */
  922. static int iwl_trans_tx_stop(struct iwl_trans *trans)
  923. {
  924. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  925. int ch, txq_id, ret;
  926. unsigned long flags;
  927. /* Turn off all Tx DMA fifos */
  928. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  929. iwl_trans_txq_set_sched(trans, 0);
  930. /* Stop each Tx DMA channel, and wait for it to be idle */
  931. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  932. iwl_write_direct32(trans,
  933. FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  934. ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
  935. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
  936. if (ret < 0)
  937. IWL_ERR(trans,
  938. "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
  939. ch,
  940. iwl_read_direct32(trans,
  941. FH_TSSR_TX_STATUS_REG));
  942. }
  943. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  944. if (!trans_pcie->txq) {
  945. IWL_WARN(trans,
  946. "Stopping tx queues that aren't allocated...\n");
  947. return 0;
  948. }
  949. /* Unmap DMA from host system and free skb's */
  950. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  951. txq_id++)
  952. iwl_tx_queue_unmap(trans, txq_id);
  953. return 0;
  954. }
  955. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
  956. {
  957. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  958. unsigned long flags;
  959. /* tell the device to stop sending interrupts */
  960. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  961. iwl_disable_interrupts(trans);
  962. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  963. /* device going down, Stop using ICT table */
  964. iwl_disable_ict(trans);
  965. /*
  966. * If a HW restart happens during firmware loading,
  967. * then the firmware loading might call this function
  968. * and later it might be called again due to the
  969. * restart. So don't process again if the device is
  970. * already dead.
  971. */
  972. if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
  973. iwl_trans_tx_stop(trans);
  974. #ifndef CONFIG_IWLWIFI_IDI
  975. iwl_trans_rx_stop(trans);
  976. #endif
  977. /* Power-down device's busmaster DMA clocks */
  978. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  979. APMG_CLK_VAL_DMA_CLK_RQT);
  980. udelay(5);
  981. }
  982. /* Make sure (redundant) we've released our request to stay awake */
  983. iwl_clear_bit(trans, CSR_GP_CNTRL,
  984. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  985. /* Stop the device, and put it in low power state */
  986. iwl_apm_stop(trans);
  987. /* Upon stop, the APM issues an interrupt if HW RF kill is set.
  988. * Clean again the interrupt here
  989. */
  990. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  991. iwl_disable_interrupts(trans);
  992. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  993. iwl_enable_rfkill_int(trans);
  994. /* wait to make sure we flush pending tasklet*/
  995. synchronize_irq(trans_pcie->irq);
  996. tasklet_kill(&trans_pcie->irq_tasklet);
  997. cancel_work_sync(&trans_pcie->rx_replenish);
  998. /* stop and reset the on-board processor */
  999. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  1000. /* clear all status bits */
  1001. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  1002. clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
  1003. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  1004. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1005. }
  1006. static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
  1007. {
  1008. /* let the ucode operate on its own */
  1009. iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
  1010. CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
  1011. iwl_disable_interrupts(trans);
  1012. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1013. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1014. }
  1015. static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
  1016. struct iwl_device_cmd *dev_cmd, int txq_id)
  1017. {
  1018. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1019. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1020. struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  1021. struct iwl_cmd_meta *out_meta;
  1022. struct iwl_tx_queue *txq;
  1023. struct iwl_queue *q;
  1024. dma_addr_t phys_addr = 0;
  1025. dma_addr_t txcmd_phys;
  1026. dma_addr_t scratch_phys;
  1027. u16 len, firstlen, secondlen;
  1028. u8 wait_write_ptr = 0;
  1029. __le16 fc = hdr->frame_control;
  1030. u8 hdr_len = ieee80211_hdrlen(fc);
  1031. u16 __maybe_unused wifi_seq;
  1032. txq = &trans_pcie->txq[txq_id];
  1033. q = &txq->q;
  1034. if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
  1035. WARN_ON_ONCE(1);
  1036. return -EINVAL;
  1037. }
  1038. spin_lock(&txq->lock);
  1039. /* In AGG mode, the index in the ring must correspond to the WiFi
  1040. * sequence number. This is a HW requirements to help the SCD to parse
  1041. * the BA.
  1042. * Check here that the packets are in the right place on the ring.
  1043. */
  1044. #ifdef CONFIG_IWLWIFI_DEBUG
  1045. wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
  1046. WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
  1047. ((wifi_seq & 0xff) != q->write_ptr),
  1048. "Q: %d WiFi Seq %d tfdNum %d",
  1049. txq_id, wifi_seq, q->write_ptr);
  1050. #endif
  1051. /* Set up driver data for this TFD */
  1052. txq->entries[q->write_ptr].skb = skb;
  1053. txq->entries[q->write_ptr].cmd = dev_cmd;
  1054. dev_cmd->hdr.cmd = REPLY_TX;
  1055. dev_cmd->hdr.sequence =
  1056. cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  1057. INDEX_TO_SEQ(q->write_ptr)));
  1058. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  1059. out_meta = &txq->entries[q->write_ptr].meta;
  1060. /*
  1061. * Use the first empty entry in this queue's command buffer array
  1062. * to contain the Tx command and MAC header concatenated together
  1063. * (payload data will be in another buffer).
  1064. * Size of this varies, due to varying MAC header length.
  1065. * If end is not dword aligned, we'll have 2 extra bytes at the end
  1066. * of the MAC header (device reads on dword boundaries).
  1067. * We'll tell device about this padding later.
  1068. */
  1069. len = sizeof(struct iwl_tx_cmd) +
  1070. sizeof(struct iwl_cmd_header) + hdr_len;
  1071. firstlen = (len + 3) & ~3;
  1072. /* Tell NIC about any 2-byte padding after MAC header */
  1073. if (firstlen != len)
  1074. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  1075. /* Physical address of this Tx command's header (not MAC header!),
  1076. * within command buffer array. */
  1077. txcmd_phys = dma_map_single(trans->dev,
  1078. &dev_cmd->hdr, firstlen,
  1079. DMA_BIDIRECTIONAL);
  1080. if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
  1081. goto out_err;
  1082. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  1083. dma_unmap_len_set(out_meta, len, firstlen);
  1084. if (!ieee80211_has_morefrags(fc)) {
  1085. txq->need_update = 1;
  1086. } else {
  1087. wait_write_ptr = 1;
  1088. txq->need_update = 0;
  1089. }
  1090. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  1091. * if any (802.11 null frames have no payload). */
  1092. secondlen = skb->len - hdr_len;
  1093. if (secondlen > 0) {
  1094. phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
  1095. secondlen, DMA_TO_DEVICE);
  1096. if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
  1097. dma_unmap_single(trans->dev,
  1098. dma_unmap_addr(out_meta, mapping),
  1099. dma_unmap_len(out_meta, len),
  1100. DMA_BIDIRECTIONAL);
  1101. goto out_err;
  1102. }
  1103. }
  1104. /* Attach buffers to TFD */
  1105. iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
  1106. if (secondlen > 0)
  1107. iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
  1108. secondlen, 0);
  1109. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  1110. offsetof(struct iwl_tx_cmd, scratch);
  1111. /* take back ownership of DMA buffer to enable update */
  1112. dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
  1113. DMA_BIDIRECTIONAL);
  1114. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  1115. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  1116. IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
  1117. le16_to_cpu(dev_cmd->hdr.sequence));
  1118. IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  1119. /* Set up entry for this TFD in Tx byte-count array */
  1120. iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
  1121. dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
  1122. DMA_BIDIRECTIONAL);
  1123. trace_iwlwifi_dev_tx(trans->dev,
  1124. &txq->tfds[txq->q.write_ptr],
  1125. sizeof(struct iwl_tfd),
  1126. &dev_cmd->hdr, firstlen,
  1127. skb->data + hdr_len, secondlen);
  1128. /* start timer if queue currently empty */
  1129. if (txq->need_update && q->read_ptr == q->write_ptr &&
  1130. trans_pcie->wd_timeout)
  1131. mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
  1132. /* Tell device the write index *just past* this latest filled TFD */
  1133. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  1134. iwl_txq_update_write_ptr(trans, txq);
  1135. /*
  1136. * At this point the frame is "transmitted" successfully
  1137. * and we will get a TX status notification eventually,
  1138. * regardless of the value of ret. "ret" only indicates
  1139. * whether or not we should update the write pointer.
  1140. */
  1141. if (iwl_queue_space(q) < q->high_mark) {
  1142. if (wait_write_ptr) {
  1143. txq->need_update = 1;
  1144. iwl_txq_update_write_ptr(trans, txq);
  1145. } else {
  1146. iwl_stop_queue(trans, txq);
  1147. }
  1148. }
  1149. spin_unlock(&txq->lock);
  1150. return 0;
  1151. out_err:
  1152. spin_unlock(&txq->lock);
  1153. return -1;
  1154. }
  1155. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
  1156. {
  1157. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1158. int err;
  1159. bool hw_rfkill;
  1160. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  1161. if (!trans_pcie->irq_requested) {
  1162. tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
  1163. iwl_irq_tasklet, (unsigned long)trans);
  1164. iwl_alloc_isr_ict(trans);
  1165. err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
  1166. DRV_NAME, trans);
  1167. if (err) {
  1168. IWL_ERR(trans, "Error allocating IRQ %d\n",
  1169. trans_pcie->irq);
  1170. goto error;
  1171. }
  1172. INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
  1173. trans_pcie->irq_requested = true;
  1174. }
  1175. err = iwl_prepare_card_hw(trans);
  1176. if (err) {
  1177. IWL_ERR(trans, "Error while preparing HW: %d\n", err);
  1178. goto err_free_irq;
  1179. }
  1180. iwl_apm_init(trans);
  1181. /* From now on, the op_mode will be kept updated about RF kill state */
  1182. iwl_enable_rfkill_int(trans);
  1183. hw_rfkill = iwl_is_rfkill_set(trans);
  1184. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1185. return err;
  1186. err_free_irq:
  1187. free_irq(trans_pcie->irq, trans);
  1188. error:
  1189. iwl_free_isr_ict(trans);
  1190. tasklet_kill(&trans_pcie->irq_tasklet);
  1191. return err;
  1192. }
  1193. static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
  1194. bool op_mode_leaving)
  1195. {
  1196. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1197. bool hw_rfkill;
  1198. unsigned long flags;
  1199. iwl_apm_stop(trans);
  1200. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1201. iwl_disable_interrupts(trans);
  1202. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1203. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1204. if (!op_mode_leaving) {
  1205. /*
  1206. * Even if we stop the HW, we still want the RF kill
  1207. * interrupt
  1208. */
  1209. iwl_enable_rfkill_int(trans);
  1210. /*
  1211. * Check again since the RF kill state may have changed while
  1212. * all the interrupts were disabled, in this case we couldn't
  1213. * receive the RF kill interrupt and update the state in the
  1214. * op_mode.
  1215. */
  1216. hw_rfkill = iwl_is_rfkill_set(trans);
  1217. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1218. }
  1219. }
  1220. static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
  1221. struct sk_buff_head *skbs)
  1222. {
  1223. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1224. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  1225. /* n_bd is usually 256 => n_bd - 1 = 0xff */
  1226. int tfd_num = ssn & (txq->q.n_bd - 1);
  1227. int freed = 0;
  1228. spin_lock(&txq->lock);
  1229. if (txq->q.read_ptr != tfd_num) {
  1230. IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
  1231. txq_id, txq->q.read_ptr, tfd_num, ssn);
  1232. freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
  1233. if (iwl_queue_space(&txq->q) > txq->q.low_mark)
  1234. iwl_wake_queue(trans, txq);
  1235. }
  1236. spin_unlock(&txq->lock);
  1237. }
  1238. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  1239. {
  1240. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1241. }
  1242. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  1243. {
  1244. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1245. }
  1246. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  1247. {
  1248. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1249. }
  1250. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  1251. const struct iwl_trans_config *trans_cfg)
  1252. {
  1253. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1254. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  1255. trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
  1256. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  1257. trans_pcie->n_no_reclaim_cmds = 0;
  1258. else
  1259. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  1260. if (trans_pcie->n_no_reclaim_cmds)
  1261. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  1262. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  1263. trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
  1264. if (trans_pcie->rx_buf_size_8k)
  1265. trans_pcie->rx_page_order = get_order(8 * 1024);
  1266. else
  1267. trans_pcie->rx_page_order = get_order(4 * 1024);
  1268. trans_pcie->wd_timeout =
  1269. msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
  1270. trans_pcie->command_names = trans_cfg->command_names;
  1271. }
  1272. void iwl_trans_pcie_free(struct iwl_trans *trans)
  1273. {
  1274. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1275. iwl_trans_pcie_tx_free(trans);
  1276. #ifndef CONFIG_IWLWIFI_IDI
  1277. iwl_trans_pcie_rx_free(trans);
  1278. #endif
  1279. if (trans_pcie->irq_requested == true) {
  1280. free_irq(trans_pcie->irq, trans);
  1281. iwl_free_isr_ict(trans);
  1282. }
  1283. pci_disable_msi(trans_pcie->pci_dev);
  1284. iounmap(trans_pcie->hw_base);
  1285. pci_release_regions(trans_pcie->pci_dev);
  1286. pci_disable_device(trans_pcie->pci_dev);
  1287. kmem_cache_destroy(trans->dev_cmd_pool);
  1288. kfree(trans);
  1289. }
  1290. static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
  1291. {
  1292. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1293. if (state)
  1294. set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1295. else
  1296. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1297. }
  1298. #ifdef CONFIG_PM_SLEEP
  1299. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  1300. {
  1301. return 0;
  1302. }
  1303. static int iwl_trans_pcie_resume(struct iwl_trans *trans)
  1304. {
  1305. bool hw_rfkill;
  1306. iwl_enable_rfkill_int(trans);
  1307. hw_rfkill = iwl_is_rfkill_set(trans);
  1308. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1309. if (!hw_rfkill)
  1310. iwl_enable_interrupts(trans);
  1311. return 0;
  1312. }
  1313. #endif /* CONFIG_PM_SLEEP */
  1314. #define IWL_FLUSH_WAIT_MS 2000
  1315. static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
  1316. {
  1317. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1318. struct iwl_tx_queue *txq;
  1319. struct iwl_queue *q;
  1320. int cnt;
  1321. unsigned long now = jiffies;
  1322. int ret = 0;
  1323. /* waiting for all the tx frames complete might take a while */
  1324. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1325. if (cnt == trans_pcie->cmd_queue)
  1326. continue;
  1327. txq = &trans_pcie->txq[cnt];
  1328. q = &txq->q;
  1329. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  1330. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  1331. msleep(1);
  1332. if (q->read_ptr != q->write_ptr) {
  1333. IWL_ERR(trans, "fail to flush all tx fifo queues\n");
  1334. ret = -ETIMEDOUT;
  1335. break;
  1336. }
  1337. }
  1338. return ret;
  1339. }
  1340. static const char *get_fh_string(int cmd)
  1341. {
  1342. #define IWL_CMD(x) case x: return #x
  1343. switch (cmd) {
  1344. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  1345. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  1346. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  1347. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  1348. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  1349. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  1350. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  1351. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  1352. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  1353. default:
  1354. return "UNKNOWN";
  1355. }
  1356. #undef IWL_CMD
  1357. }
  1358. int iwl_dump_fh(struct iwl_trans *trans, char **buf)
  1359. {
  1360. int i;
  1361. static const u32 fh_tbl[] = {
  1362. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  1363. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  1364. FH_RSCSR_CHNL0_WPTR,
  1365. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  1366. FH_MEM_RSSR_SHARED_CTRL_REG,
  1367. FH_MEM_RSSR_RX_STATUS_REG,
  1368. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  1369. FH_TSSR_TX_STATUS_REG,
  1370. FH_TSSR_TX_ERROR_REG
  1371. };
  1372. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1373. if (buf) {
  1374. int pos = 0;
  1375. size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  1376. *buf = kmalloc(bufsz, GFP_KERNEL);
  1377. if (!*buf)
  1378. return -ENOMEM;
  1379. pos += scnprintf(*buf + pos, bufsz - pos,
  1380. "FH register values:\n");
  1381. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
  1382. pos += scnprintf(*buf + pos, bufsz - pos,
  1383. " %34s: 0X%08x\n",
  1384. get_fh_string(fh_tbl[i]),
  1385. iwl_read_direct32(trans, fh_tbl[i]));
  1386. return pos;
  1387. }
  1388. #endif
  1389. IWL_ERR(trans, "FH register values:\n");
  1390. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
  1391. IWL_ERR(trans, " %34s: 0X%08x\n",
  1392. get_fh_string(fh_tbl[i]),
  1393. iwl_read_direct32(trans, fh_tbl[i]));
  1394. return 0;
  1395. }
  1396. static const char *get_csr_string(int cmd)
  1397. {
  1398. #define IWL_CMD(x) case x: return #x
  1399. switch (cmd) {
  1400. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1401. IWL_CMD(CSR_INT_COALESCING);
  1402. IWL_CMD(CSR_INT);
  1403. IWL_CMD(CSR_INT_MASK);
  1404. IWL_CMD(CSR_FH_INT_STATUS);
  1405. IWL_CMD(CSR_GPIO_IN);
  1406. IWL_CMD(CSR_RESET);
  1407. IWL_CMD(CSR_GP_CNTRL);
  1408. IWL_CMD(CSR_HW_REV);
  1409. IWL_CMD(CSR_EEPROM_REG);
  1410. IWL_CMD(CSR_EEPROM_GP);
  1411. IWL_CMD(CSR_OTP_GP_REG);
  1412. IWL_CMD(CSR_GIO_REG);
  1413. IWL_CMD(CSR_GP_UCODE_REG);
  1414. IWL_CMD(CSR_GP_DRIVER_REG);
  1415. IWL_CMD(CSR_UCODE_DRV_GP1);
  1416. IWL_CMD(CSR_UCODE_DRV_GP2);
  1417. IWL_CMD(CSR_LED_REG);
  1418. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  1419. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  1420. IWL_CMD(CSR_ANA_PLL_CFG);
  1421. IWL_CMD(CSR_HW_REV_WA_REG);
  1422. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  1423. default:
  1424. return "UNKNOWN";
  1425. }
  1426. #undef IWL_CMD
  1427. }
  1428. void iwl_dump_csr(struct iwl_trans *trans)
  1429. {
  1430. int i;
  1431. static const u32 csr_tbl[] = {
  1432. CSR_HW_IF_CONFIG_REG,
  1433. CSR_INT_COALESCING,
  1434. CSR_INT,
  1435. CSR_INT_MASK,
  1436. CSR_FH_INT_STATUS,
  1437. CSR_GPIO_IN,
  1438. CSR_RESET,
  1439. CSR_GP_CNTRL,
  1440. CSR_HW_REV,
  1441. CSR_EEPROM_REG,
  1442. CSR_EEPROM_GP,
  1443. CSR_OTP_GP_REG,
  1444. CSR_GIO_REG,
  1445. CSR_GP_UCODE_REG,
  1446. CSR_GP_DRIVER_REG,
  1447. CSR_UCODE_DRV_GP1,
  1448. CSR_UCODE_DRV_GP2,
  1449. CSR_LED_REG,
  1450. CSR_DRAM_INT_TBL_REG,
  1451. CSR_GIO_CHICKEN_BITS,
  1452. CSR_ANA_PLL_CFG,
  1453. CSR_HW_REV_WA_REG,
  1454. CSR_DBG_HPET_MEM_REG
  1455. };
  1456. IWL_ERR(trans, "CSR values:\n");
  1457. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  1458. "CSR_INT_PERIODIC_REG)\n");
  1459. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  1460. IWL_ERR(trans, " %25s: 0X%08x\n",
  1461. get_csr_string(csr_tbl[i]),
  1462. iwl_read32(trans, csr_tbl[i]));
  1463. }
  1464. }
  1465. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1466. /* create and remove of files */
  1467. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  1468. if (!debugfs_create_file(#name, mode, parent, trans, \
  1469. &iwl_dbgfs_##name##_ops)) \
  1470. return -ENOMEM; \
  1471. } while (0)
  1472. /* file operation */
  1473. #define DEBUGFS_READ_FUNC(name) \
  1474. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  1475. char __user *user_buf, \
  1476. size_t count, loff_t *ppos);
  1477. #define DEBUGFS_WRITE_FUNC(name) \
  1478. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  1479. const char __user *user_buf, \
  1480. size_t count, loff_t *ppos);
  1481. #define DEBUGFS_READ_FILE_OPS(name) \
  1482. DEBUGFS_READ_FUNC(name); \
  1483. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1484. .read = iwl_dbgfs_##name##_read, \
  1485. .open = simple_open, \
  1486. .llseek = generic_file_llseek, \
  1487. };
  1488. #define DEBUGFS_WRITE_FILE_OPS(name) \
  1489. DEBUGFS_WRITE_FUNC(name); \
  1490. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1491. .write = iwl_dbgfs_##name##_write, \
  1492. .open = simple_open, \
  1493. .llseek = generic_file_llseek, \
  1494. };
  1495. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1496. DEBUGFS_READ_FUNC(name); \
  1497. DEBUGFS_WRITE_FUNC(name); \
  1498. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1499. .write = iwl_dbgfs_##name##_write, \
  1500. .read = iwl_dbgfs_##name##_read, \
  1501. .open = simple_open, \
  1502. .llseek = generic_file_llseek, \
  1503. };
  1504. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1505. char __user *user_buf,
  1506. size_t count, loff_t *ppos)
  1507. {
  1508. struct iwl_trans *trans = file->private_data;
  1509. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1510. struct iwl_tx_queue *txq;
  1511. struct iwl_queue *q;
  1512. char *buf;
  1513. int pos = 0;
  1514. int cnt;
  1515. int ret;
  1516. size_t bufsz;
  1517. bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
  1518. if (!trans_pcie->txq)
  1519. return -EAGAIN;
  1520. buf = kzalloc(bufsz, GFP_KERNEL);
  1521. if (!buf)
  1522. return -ENOMEM;
  1523. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1524. txq = &trans_pcie->txq[cnt];
  1525. q = &txq->q;
  1526. pos += scnprintf(buf + pos, bufsz - pos,
  1527. "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
  1528. cnt, q->read_ptr, q->write_ptr,
  1529. !!test_bit(cnt, trans_pcie->queue_used),
  1530. !!test_bit(cnt, trans_pcie->queue_stopped));
  1531. }
  1532. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1533. kfree(buf);
  1534. return ret;
  1535. }
  1536. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1537. char __user *user_buf,
  1538. size_t count, loff_t *ppos)
  1539. {
  1540. struct iwl_trans *trans = file->private_data;
  1541. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1542. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1543. char buf[256];
  1544. int pos = 0;
  1545. const size_t bufsz = sizeof(buf);
  1546. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  1547. rxq->read);
  1548. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  1549. rxq->write);
  1550. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  1551. rxq->free_count);
  1552. if (rxq->rb_stts) {
  1553. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  1554. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  1555. } else {
  1556. pos += scnprintf(buf + pos, bufsz - pos,
  1557. "closed_rb_num: Not Allocated\n");
  1558. }
  1559. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1560. }
  1561. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  1562. char __user *user_buf,
  1563. size_t count, loff_t *ppos)
  1564. {
  1565. struct iwl_trans *trans = file->private_data;
  1566. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1567. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1568. int pos = 0;
  1569. char *buf;
  1570. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  1571. ssize_t ret;
  1572. buf = kzalloc(bufsz, GFP_KERNEL);
  1573. if (!buf)
  1574. return -ENOMEM;
  1575. pos += scnprintf(buf + pos, bufsz - pos,
  1576. "Interrupt Statistics Report:\n");
  1577. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  1578. isr_stats->hw);
  1579. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  1580. isr_stats->sw);
  1581. if (isr_stats->sw || isr_stats->hw) {
  1582. pos += scnprintf(buf + pos, bufsz - pos,
  1583. "\tLast Restarting Code: 0x%X\n",
  1584. isr_stats->err_code);
  1585. }
  1586. #ifdef CONFIG_IWLWIFI_DEBUG
  1587. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  1588. isr_stats->sch);
  1589. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  1590. isr_stats->alive);
  1591. #endif
  1592. pos += scnprintf(buf + pos, bufsz - pos,
  1593. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  1594. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  1595. isr_stats->ctkill);
  1596. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  1597. isr_stats->wakeup);
  1598. pos += scnprintf(buf + pos, bufsz - pos,
  1599. "Rx command responses:\t\t %u\n", isr_stats->rx);
  1600. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  1601. isr_stats->tx);
  1602. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  1603. isr_stats->unhandled);
  1604. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1605. kfree(buf);
  1606. return ret;
  1607. }
  1608. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  1609. const char __user *user_buf,
  1610. size_t count, loff_t *ppos)
  1611. {
  1612. struct iwl_trans *trans = file->private_data;
  1613. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1614. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1615. char buf[8];
  1616. int buf_size;
  1617. u32 reset_flag;
  1618. memset(buf, 0, sizeof(buf));
  1619. buf_size = min(count, sizeof(buf) - 1);
  1620. if (copy_from_user(buf, user_buf, buf_size))
  1621. return -EFAULT;
  1622. if (sscanf(buf, "%x", &reset_flag) != 1)
  1623. return -EFAULT;
  1624. if (reset_flag == 0)
  1625. memset(isr_stats, 0, sizeof(*isr_stats));
  1626. return count;
  1627. }
  1628. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  1629. const char __user *user_buf,
  1630. size_t count, loff_t *ppos)
  1631. {
  1632. struct iwl_trans *trans = file->private_data;
  1633. char buf[8];
  1634. int buf_size;
  1635. int csr;
  1636. memset(buf, 0, sizeof(buf));
  1637. buf_size = min(count, sizeof(buf) - 1);
  1638. if (copy_from_user(buf, user_buf, buf_size))
  1639. return -EFAULT;
  1640. if (sscanf(buf, "%d", &csr) != 1)
  1641. return -EFAULT;
  1642. iwl_dump_csr(trans);
  1643. return count;
  1644. }
  1645. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  1646. char __user *user_buf,
  1647. size_t count, loff_t *ppos)
  1648. {
  1649. struct iwl_trans *trans = file->private_data;
  1650. char *buf = NULL;
  1651. int pos = 0;
  1652. ssize_t ret = -EFAULT;
  1653. ret = pos = iwl_dump_fh(trans, &buf);
  1654. if (buf) {
  1655. ret = simple_read_from_buffer(user_buf,
  1656. count, ppos, buf, pos);
  1657. kfree(buf);
  1658. }
  1659. return ret;
  1660. }
  1661. static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
  1662. const char __user *user_buf,
  1663. size_t count, loff_t *ppos)
  1664. {
  1665. struct iwl_trans *trans = file->private_data;
  1666. if (!trans->op_mode)
  1667. return -EAGAIN;
  1668. local_bh_disable();
  1669. iwl_op_mode_nic_error(trans->op_mode);
  1670. local_bh_enable();
  1671. return count;
  1672. }
  1673. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  1674. DEBUGFS_READ_FILE_OPS(fh_reg);
  1675. DEBUGFS_READ_FILE_OPS(rx_queue);
  1676. DEBUGFS_READ_FILE_OPS(tx_queue);
  1677. DEBUGFS_WRITE_FILE_OPS(csr);
  1678. DEBUGFS_WRITE_FILE_OPS(fw_restart);
  1679. /*
  1680. * Create the debugfs files and directories
  1681. *
  1682. */
  1683. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1684. struct dentry *dir)
  1685. {
  1686. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1687. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1688. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  1689. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  1690. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  1691. DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
  1692. return 0;
  1693. }
  1694. #else
  1695. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1696. struct dentry *dir)
  1697. {
  1698. return 0;
  1699. }
  1700. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1701. static const struct iwl_trans_ops trans_ops_pcie = {
  1702. .start_hw = iwl_trans_pcie_start_hw,
  1703. .stop_hw = iwl_trans_pcie_stop_hw,
  1704. .fw_alive = iwl_trans_pcie_fw_alive,
  1705. .start_fw = iwl_trans_pcie_start_fw,
  1706. .stop_device = iwl_trans_pcie_stop_device,
  1707. .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
  1708. .send_cmd = iwl_trans_pcie_send_cmd,
  1709. .tx = iwl_trans_pcie_tx,
  1710. .reclaim = iwl_trans_pcie_reclaim,
  1711. .txq_disable = iwl_trans_pcie_txq_disable,
  1712. .txq_enable = iwl_trans_pcie_txq_enable,
  1713. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1714. .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
  1715. #ifdef CONFIG_PM_SLEEP
  1716. .suspend = iwl_trans_pcie_suspend,
  1717. .resume = iwl_trans_pcie_resume,
  1718. #endif
  1719. .write8 = iwl_trans_pcie_write8,
  1720. .write32 = iwl_trans_pcie_write32,
  1721. .read32 = iwl_trans_pcie_read32,
  1722. .configure = iwl_trans_pcie_configure,
  1723. .set_pmi = iwl_trans_pcie_set_pmi,
  1724. };
  1725. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  1726. const struct pci_device_id *ent,
  1727. const struct iwl_cfg *cfg)
  1728. {
  1729. struct iwl_trans_pcie *trans_pcie;
  1730. struct iwl_trans *trans;
  1731. u16 pci_cmd;
  1732. int err;
  1733. trans = kzalloc(sizeof(struct iwl_trans) +
  1734. sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  1735. if (WARN_ON(!trans))
  1736. return NULL;
  1737. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1738. trans->ops = &trans_ops_pcie;
  1739. trans->cfg = cfg;
  1740. trans_pcie->trans = trans;
  1741. spin_lock_init(&trans_pcie->irq_lock);
  1742. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  1743. /* W/A - seems to solve weird behavior. We need to remove this if we
  1744. * don't want to stay in L1 all the time. This wastes a lot of power */
  1745. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1746. PCIE_LINK_STATE_CLKPM);
  1747. if (pci_enable_device(pdev)) {
  1748. err = -ENODEV;
  1749. goto out_no_pci;
  1750. }
  1751. pci_set_master(pdev);
  1752. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
  1753. if (!err)
  1754. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
  1755. if (err) {
  1756. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1757. if (!err)
  1758. err = pci_set_consistent_dma_mask(pdev,
  1759. DMA_BIT_MASK(32));
  1760. /* both attempts failed: */
  1761. if (err) {
  1762. dev_printk(KERN_ERR, &pdev->dev,
  1763. "No suitable DMA available.\n");
  1764. goto out_pci_disable_device;
  1765. }
  1766. }
  1767. err = pci_request_regions(pdev, DRV_NAME);
  1768. if (err) {
  1769. dev_printk(KERN_ERR, &pdev->dev,
  1770. "pci_request_regions failed\n");
  1771. goto out_pci_disable_device;
  1772. }
  1773. trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
  1774. if (!trans_pcie->hw_base) {
  1775. dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
  1776. err = -ENODEV;
  1777. goto out_pci_release_regions;
  1778. }
  1779. dev_printk(KERN_INFO, &pdev->dev,
  1780. "pci_resource_len = 0x%08llx\n",
  1781. (unsigned long long) pci_resource_len(pdev, 0));
  1782. dev_printk(KERN_INFO, &pdev->dev,
  1783. "pci_resource_base = %p\n", trans_pcie->hw_base);
  1784. dev_printk(KERN_INFO, &pdev->dev,
  1785. "HW Revision ID = 0x%X\n", pdev->revision);
  1786. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  1787. * PCI Tx retries from interfering with C3 CPU state */
  1788. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  1789. err = pci_enable_msi(pdev);
  1790. if (err)
  1791. dev_printk(KERN_ERR, &pdev->dev,
  1792. "pci_enable_msi failed(0X%x)\n", err);
  1793. trans->dev = &pdev->dev;
  1794. trans_pcie->irq = pdev->irq;
  1795. trans_pcie->pci_dev = pdev;
  1796. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  1797. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  1798. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  1799. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  1800. /* TODO: Move this away, not needed if not MSI */
  1801. /* enable rfkill interrupt: hw bug w/a */
  1802. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1803. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1804. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1805. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1806. }
  1807. /* Initialize the wait queue for commands */
  1808. init_waitqueue_head(&trans->wait_command_queue);
  1809. spin_lock_init(&trans->reg_lock);
  1810. snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
  1811. "iwl_cmd_pool:%s", dev_name(trans->dev));
  1812. trans->dev_cmd_headroom = 0;
  1813. trans->dev_cmd_pool =
  1814. kmem_cache_create(trans->dev_cmd_pool_name,
  1815. sizeof(struct iwl_device_cmd)
  1816. + trans->dev_cmd_headroom,
  1817. sizeof(void *),
  1818. SLAB_HWCACHE_ALIGN,
  1819. NULL);
  1820. if (!trans->dev_cmd_pool)
  1821. goto out_pci_disable_msi;
  1822. return trans;
  1823. out_pci_disable_msi:
  1824. pci_disable_msi(pdev);
  1825. out_pci_release_regions:
  1826. pci_release_regions(pdev);
  1827. out_pci_disable_device:
  1828. pci_disable_device(pdev);
  1829. out_no_pci:
  1830. kfree(trans);
  1831. return NULL;
  1832. }