iwl-trans-pcie.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/pci.h>
  64. #include <linux/pci-aspm.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/debugfs.h>
  67. #include <linux/sched.h>
  68. #include <linux/bitops.h>
  69. #include <linux/gfp.h>
  70. #include "iwl-drv.h"
  71. #include "iwl-trans.h"
  72. #include "iwl-trans-pcie-int.h"
  73. #include "iwl-csr.h"
  74. #include "iwl-prph.h"
  75. #include "iwl-eeprom.h"
  76. #include "iwl-agn-hw.h"
  77. /* FIXME: need to abstract out TX command (once we know what it looks like) */
  78. #include "iwl-commands.h"
  79. #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
  80. #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
  81. (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
  82. (~(1<<(trans_pcie)->cmd_queue)))
  83. static int iwl_trans_rx_alloc(struct iwl_trans *trans)
  84. {
  85. struct iwl_trans_pcie *trans_pcie =
  86. IWL_TRANS_GET_PCIE_TRANS(trans);
  87. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  88. struct device *dev = trans->dev;
  89. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  90. spin_lock_init(&rxq->lock);
  91. if (WARN_ON(rxq->bd || rxq->rb_stts))
  92. return -EINVAL;
  93. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  94. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  95. &rxq->bd_dma, GFP_KERNEL);
  96. if (!rxq->bd)
  97. goto err_bd;
  98. /*Allocate the driver's pointer to receive buffer status */
  99. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  100. &rxq->rb_stts_dma, GFP_KERNEL);
  101. if (!rxq->rb_stts)
  102. goto err_rb_stts;
  103. return 0;
  104. err_rb_stts:
  105. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  106. rxq->bd, rxq->bd_dma);
  107. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  108. rxq->bd = NULL;
  109. err_bd:
  110. return -ENOMEM;
  111. }
  112. static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
  113. {
  114. struct iwl_trans_pcie *trans_pcie =
  115. IWL_TRANS_GET_PCIE_TRANS(trans);
  116. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  117. int i;
  118. /* Fill the rx_used queue with _all_ of the Rx buffers */
  119. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  120. /* In the reset function, these buffers may have been allocated
  121. * to an SKB, so we need to unmap and free potential storage */
  122. if (rxq->pool[i].page != NULL) {
  123. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  124. PAGE_SIZE << trans_pcie->rx_page_order,
  125. DMA_FROM_DEVICE);
  126. __free_pages(rxq->pool[i].page,
  127. trans_pcie->rx_page_order);
  128. rxq->pool[i].page = NULL;
  129. }
  130. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  131. }
  132. }
  133. static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
  134. struct iwl_rx_queue *rxq)
  135. {
  136. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  137. u32 rb_size;
  138. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  139. u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
  140. if (trans_pcie->rx_buf_size_8k)
  141. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  142. else
  143. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  144. /* Stop Rx DMA */
  145. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  146. /* Reset driver's Rx queue write index */
  147. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  148. /* Tell device where to find RBD circular buffer in DRAM */
  149. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  150. (u32)(rxq->bd_dma >> 8));
  151. /* Tell device where in DRAM to update its Rx status */
  152. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  153. rxq->rb_stts_dma >> 4);
  154. /* Enable Rx DMA
  155. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  156. * the credit mechanism in 5000 HW RX FIFO
  157. * Direct rx interrupts to hosts
  158. * Rx buffer size 4 or 8k
  159. * RB timeout 0x10
  160. * 256 RBDs
  161. */
  162. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  163. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  164. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  165. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  166. rb_size|
  167. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  168. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  169. /* Set interrupt coalescing timer to default (2048 usecs) */
  170. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  171. }
  172. static int iwl_rx_init(struct iwl_trans *trans)
  173. {
  174. struct iwl_trans_pcie *trans_pcie =
  175. IWL_TRANS_GET_PCIE_TRANS(trans);
  176. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  177. int i, err;
  178. unsigned long flags;
  179. if (!rxq->bd) {
  180. err = iwl_trans_rx_alloc(trans);
  181. if (err)
  182. return err;
  183. }
  184. spin_lock_irqsave(&rxq->lock, flags);
  185. INIT_LIST_HEAD(&rxq->rx_free);
  186. INIT_LIST_HEAD(&rxq->rx_used);
  187. iwl_trans_rxq_free_rx_bufs(trans);
  188. for (i = 0; i < RX_QUEUE_SIZE; i++)
  189. rxq->queue[i] = NULL;
  190. /* Set us so that we have processed and used all buffers, but have
  191. * not restocked the Rx queue with fresh buffers */
  192. rxq->read = rxq->write = 0;
  193. rxq->write_actual = 0;
  194. rxq->free_count = 0;
  195. spin_unlock_irqrestore(&rxq->lock, flags);
  196. iwlagn_rx_replenish(trans);
  197. iwl_trans_rx_hw_init(trans, rxq);
  198. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  199. rxq->need_update = 1;
  200. iwl_rx_queue_update_write_ptr(trans, rxq);
  201. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  202. return 0;
  203. }
  204. static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
  205. {
  206. struct iwl_trans_pcie *trans_pcie =
  207. IWL_TRANS_GET_PCIE_TRANS(trans);
  208. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  209. unsigned long flags;
  210. /*if rxq->bd is NULL, it means that nothing has been allocated,
  211. * exit now */
  212. if (!rxq->bd) {
  213. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  214. return;
  215. }
  216. spin_lock_irqsave(&rxq->lock, flags);
  217. iwl_trans_rxq_free_rx_bufs(trans);
  218. spin_unlock_irqrestore(&rxq->lock, flags);
  219. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  220. rxq->bd, rxq->bd_dma);
  221. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  222. rxq->bd = NULL;
  223. if (rxq->rb_stts)
  224. dma_free_coherent(trans->dev,
  225. sizeof(struct iwl_rb_status),
  226. rxq->rb_stts, rxq->rb_stts_dma);
  227. else
  228. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  229. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  230. rxq->rb_stts = NULL;
  231. }
  232. static int iwl_trans_rx_stop(struct iwl_trans *trans)
  233. {
  234. /* stop Rx DMA */
  235. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  236. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  237. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  238. }
  239. static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
  240. struct iwl_dma_ptr *ptr, size_t size)
  241. {
  242. if (WARN_ON(ptr->addr))
  243. return -EINVAL;
  244. ptr->addr = dma_alloc_coherent(trans->dev, size,
  245. &ptr->dma, GFP_KERNEL);
  246. if (!ptr->addr)
  247. return -ENOMEM;
  248. ptr->size = size;
  249. return 0;
  250. }
  251. static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
  252. struct iwl_dma_ptr *ptr)
  253. {
  254. if (unlikely(!ptr->addr))
  255. return;
  256. dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
  257. memset(ptr, 0, sizeof(*ptr));
  258. }
  259. static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
  260. {
  261. struct iwl_tx_queue *txq = (void *)data;
  262. struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
  263. struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
  264. spin_lock(&txq->lock);
  265. /* check if triggered erroneously */
  266. if (txq->q.read_ptr == txq->q.write_ptr) {
  267. spin_unlock(&txq->lock);
  268. return;
  269. }
  270. spin_unlock(&txq->lock);
  271. IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
  272. jiffies_to_msecs(trans_pcie->wd_timeout));
  273. IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
  274. txq->q.read_ptr, txq->q.write_ptr);
  275. IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
  276. iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
  277. & (TFD_QUEUE_SIZE_MAX - 1),
  278. iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
  279. iwl_op_mode_nic_error(trans->op_mode);
  280. }
  281. static int iwl_trans_txq_alloc(struct iwl_trans *trans,
  282. struct iwl_tx_queue *txq, int slots_num,
  283. u32 txq_id)
  284. {
  285. size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
  286. int i;
  287. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  288. if (WARN_ON(txq->entries || txq->tfds))
  289. return -EINVAL;
  290. setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
  291. (unsigned long)txq);
  292. txq->trans_pcie = trans_pcie;
  293. txq->q.n_window = slots_num;
  294. txq->entries = kcalloc(slots_num,
  295. sizeof(struct iwl_pcie_tx_queue_entry),
  296. GFP_KERNEL);
  297. if (!txq->entries)
  298. goto error;
  299. if (txq_id == trans_pcie->cmd_queue)
  300. for (i = 0; i < slots_num; i++) {
  301. txq->entries[i].cmd =
  302. kmalloc(sizeof(struct iwl_device_cmd),
  303. GFP_KERNEL);
  304. if (!txq->entries[i].cmd)
  305. goto error;
  306. }
  307. /* Circular buffer of transmit frame descriptors (TFDs),
  308. * shared with device */
  309. txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
  310. &txq->q.dma_addr, GFP_KERNEL);
  311. if (!txq->tfds) {
  312. IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  313. goto error;
  314. }
  315. txq->q.id = txq_id;
  316. return 0;
  317. error:
  318. if (txq->entries && txq_id == trans_pcie->cmd_queue)
  319. for (i = 0; i < slots_num; i++)
  320. kfree(txq->entries[i].cmd);
  321. kfree(txq->entries);
  322. txq->entries = NULL;
  323. return -ENOMEM;
  324. }
  325. static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
  326. int slots_num, u32 txq_id)
  327. {
  328. int ret;
  329. txq->need_update = 0;
  330. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  331. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  332. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  333. /* Initialize queue's high/low-water marks, and head/tail indexes */
  334. ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  335. txq_id);
  336. if (ret)
  337. return ret;
  338. spin_lock_init(&txq->lock);
  339. /*
  340. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  341. * given Tx queue, and enable the DMA channel used for that queue.
  342. * Circular buffer (TFD queue in DRAM) physical base address */
  343. iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
  344. txq->q.dma_addr >> 8);
  345. return 0;
  346. }
  347. /**
  348. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  349. */
  350. static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
  351. {
  352. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  353. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  354. struct iwl_queue *q = &txq->q;
  355. enum dma_data_direction dma_dir;
  356. if (!q->n_bd)
  357. return;
  358. /* In the command queue, all the TBs are mapped as BIDI
  359. * so unmap them as such.
  360. */
  361. if (txq_id == trans_pcie->cmd_queue)
  362. dma_dir = DMA_BIDIRECTIONAL;
  363. else
  364. dma_dir = DMA_TO_DEVICE;
  365. spin_lock_bh(&txq->lock);
  366. while (q->write_ptr != q->read_ptr) {
  367. iwlagn_txq_free_tfd(trans, txq, dma_dir);
  368. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  369. }
  370. spin_unlock_bh(&txq->lock);
  371. }
  372. /**
  373. * iwl_tx_queue_free - Deallocate DMA queue.
  374. * @txq: Transmit queue to deallocate.
  375. *
  376. * Empty queue by removing and destroying all BD's.
  377. * Free all buffers.
  378. * 0-fill, but do not free "txq" descriptor structure.
  379. */
  380. static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
  381. {
  382. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  383. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  384. struct device *dev = trans->dev;
  385. int i;
  386. if (WARN_ON(!txq))
  387. return;
  388. iwl_tx_queue_unmap(trans, txq_id);
  389. /* De-alloc array of command/tx buffers */
  390. if (txq_id == trans_pcie->cmd_queue)
  391. for (i = 0; i < txq->q.n_window; i++)
  392. kfree(txq->entries[i].cmd);
  393. /* De-alloc circular buffer of TFDs */
  394. if (txq->q.n_bd) {
  395. dma_free_coherent(dev, sizeof(struct iwl_tfd) *
  396. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  397. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  398. }
  399. kfree(txq->entries);
  400. txq->entries = NULL;
  401. del_timer_sync(&txq->stuck_timer);
  402. /* 0-fill queue descriptor structure */
  403. memset(txq, 0, sizeof(*txq));
  404. }
  405. /**
  406. * iwl_trans_tx_free - Free TXQ Context
  407. *
  408. * Destroy all TX DMA queues and structures
  409. */
  410. static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
  411. {
  412. int txq_id;
  413. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  414. /* Tx queues */
  415. if (trans_pcie->txq) {
  416. for (txq_id = 0;
  417. txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
  418. iwl_tx_queue_free(trans, txq_id);
  419. }
  420. kfree(trans_pcie->txq);
  421. trans_pcie->txq = NULL;
  422. iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
  423. iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
  424. }
  425. /**
  426. * iwl_trans_tx_alloc - allocate TX context
  427. * Allocate all Tx DMA structures and initialize them
  428. *
  429. * @param priv
  430. * @return error code
  431. */
  432. static int iwl_trans_tx_alloc(struct iwl_trans *trans)
  433. {
  434. int ret;
  435. int txq_id, slots_num;
  436. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  437. u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
  438. sizeof(struct iwlagn_scd_bc_tbl);
  439. /*It is not allowed to alloc twice, so warn when this happens.
  440. * We cannot rely on the previous allocation, so free and fail */
  441. if (WARN_ON(trans_pcie->txq)) {
  442. ret = -EINVAL;
  443. goto error;
  444. }
  445. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
  446. scd_bc_tbls_size);
  447. if (ret) {
  448. IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
  449. goto error;
  450. }
  451. /* Alloc keep-warm buffer */
  452. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
  453. if (ret) {
  454. IWL_ERR(trans, "Keep Warm allocation failed\n");
  455. goto error;
  456. }
  457. trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
  458. sizeof(struct iwl_tx_queue), GFP_KERNEL);
  459. if (!trans_pcie->txq) {
  460. IWL_ERR(trans, "Not enough memory for txq\n");
  461. ret = ENOMEM;
  462. goto error;
  463. }
  464. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  465. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  466. txq_id++) {
  467. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  468. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  469. ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
  470. slots_num, txq_id);
  471. if (ret) {
  472. IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
  473. goto error;
  474. }
  475. }
  476. return 0;
  477. error:
  478. iwl_trans_pcie_tx_free(trans);
  479. return ret;
  480. }
  481. static int iwl_tx_init(struct iwl_trans *trans)
  482. {
  483. int ret;
  484. int txq_id, slots_num;
  485. unsigned long flags;
  486. bool alloc = false;
  487. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  488. if (!trans_pcie->txq) {
  489. ret = iwl_trans_tx_alloc(trans);
  490. if (ret)
  491. goto error;
  492. alloc = true;
  493. }
  494. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  495. /* Turn off all Tx DMA fifos */
  496. iwl_write_prph(trans, SCD_TXFACT, 0);
  497. /* Tell NIC where to find the "keep warm" buffer */
  498. iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
  499. trans_pcie->kw.dma >> 4);
  500. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  501. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  502. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  503. txq_id++) {
  504. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  505. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  506. ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
  507. slots_num, txq_id);
  508. if (ret) {
  509. IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
  510. goto error;
  511. }
  512. }
  513. return 0;
  514. error:
  515. /*Upon error, free only if we allocated something */
  516. if (alloc)
  517. iwl_trans_pcie_tx_free(trans);
  518. return ret;
  519. }
  520. static void iwl_set_pwr_vmain(struct iwl_trans *trans)
  521. {
  522. /*
  523. * (for documentation purposes)
  524. * to set power to V_AUX, do:
  525. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  526. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  527. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  528. ~APMG_PS_CTRL_MSK_PWR_SRC);
  529. */
  530. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  531. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  532. ~APMG_PS_CTRL_MSK_PWR_SRC);
  533. }
  534. /* PCI registers */
  535. #define PCI_CFG_RETRY_TIMEOUT 0x041
  536. #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
  537. #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
  538. static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
  539. {
  540. int pos;
  541. u16 pci_lnk_ctl;
  542. struct iwl_trans_pcie *trans_pcie =
  543. IWL_TRANS_GET_PCIE_TRANS(trans);
  544. struct pci_dev *pci_dev = trans_pcie->pci_dev;
  545. pos = pci_pcie_cap(pci_dev);
  546. pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
  547. return pci_lnk_ctl;
  548. }
  549. static void iwl_apm_config(struct iwl_trans *trans)
  550. {
  551. /*
  552. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  553. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  554. * If so (likely), disable L0S, so device moves directly L0->L1;
  555. * costs negligible amount of power savings.
  556. * If not (unlikely), enable L0S, so there is at least some
  557. * power savings, even without L1.
  558. */
  559. u16 lctl = iwl_pciexp_link_ctrl(trans);
  560. if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
  561. PCI_CFG_LINK_CTRL_VAL_L1_EN) {
  562. /* L1-ASPM enabled; disable(!) L0S */
  563. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  564. dev_printk(KERN_INFO, trans->dev,
  565. "L1 Enabled; Disabling L0S\n");
  566. } else {
  567. /* L1-ASPM disabled; enable(!) L0S */
  568. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  569. dev_printk(KERN_INFO, trans->dev,
  570. "L1 Disabled; Enabling L0S\n");
  571. }
  572. trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
  573. }
  574. /*
  575. * Start up NIC's basic functionality after it has been reset
  576. * (e.g. after platform boot, or shutdown via iwl_apm_stop())
  577. * NOTE: This does not load uCode nor start the embedded processor
  578. */
  579. static int iwl_apm_init(struct iwl_trans *trans)
  580. {
  581. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  582. int ret = 0;
  583. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  584. /*
  585. * Use "set_bit" below rather than "write", to preserve any hardware
  586. * bits already set by default after reset.
  587. */
  588. /* Disable L0S exit timer (platform NMI Work/Around) */
  589. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  590. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  591. /*
  592. * Disable L0s without affecting L1;
  593. * don't wait for ICH L0s (ICH bug W/A)
  594. */
  595. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  596. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  597. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  598. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  599. /*
  600. * Enable HAP INTA (interrupt from management bus) to
  601. * wake device's PCI Express link L1a -> L0s
  602. */
  603. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  604. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  605. iwl_apm_config(trans);
  606. /* Configure analog phase-lock-loop before activating to D0A */
  607. if (trans->cfg->base_params->pll_cfg_val)
  608. iwl_set_bit(trans, CSR_ANA_PLL_CFG,
  609. trans->cfg->base_params->pll_cfg_val);
  610. /*
  611. * Set "initialization complete" bit to move adapter from
  612. * D0U* --> D0A* (powered-up active) state.
  613. */
  614. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  615. /*
  616. * Wait for clock stabilization; once stabilized, access to
  617. * device-internal resources is supported, e.g. iwl_write_prph()
  618. * and accesses to uCode SRAM.
  619. */
  620. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  621. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  622. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  623. if (ret < 0) {
  624. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  625. goto out;
  626. }
  627. /*
  628. * Enable DMA clock and wait for it to stabilize.
  629. *
  630. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
  631. * do not disable clocks. This preserves any hardware bits already
  632. * set by default in "CLK_CTRL_REG" after reset.
  633. */
  634. iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
  635. udelay(20);
  636. /* Disable L1-Active */
  637. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  638. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  639. set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  640. out:
  641. return ret;
  642. }
  643. static int iwl_apm_stop_master(struct iwl_trans *trans)
  644. {
  645. int ret = 0;
  646. /* stop device's busmaster DMA activity */
  647. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  648. ret = iwl_poll_bit(trans, CSR_RESET,
  649. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  650. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  651. if (ret)
  652. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  653. IWL_DEBUG_INFO(trans, "stop master\n");
  654. return ret;
  655. }
  656. static void iwl_apm_stop(struct iwl_trans *trans)
  657. {
  658. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  659. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  660. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  661. /* Stop device's DMA activity */
  662. iwl_apm_stop_master(trans);
  663. /* Reset the entire device */
  664. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  665. udelay(10);
  666. /*
  667. * Clear "initialization complete" bit to move adapter from
  668. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  669. */
  670. iwl_clear_bit(trans, CSR_GP_CNTRL,
  671. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  672. }
  673. static int iwl_nic_init(struct iwl_trans *trans)
  674. {
  675. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  676. unsigned long flags;
  677. /* nic_init */
  678. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  679. iwl_apm_init(trans);
  680. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  681. iwl_write8(trans, CSR_INT_COALESCING,
  682. IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  683. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  684. iwl_set_pwr_vmain(trans);
  685. iwl_op_mode_nic_config(trans->op_mode);
  686. #ifndef CONFIG_IWLWIFI_IDI
  687. /* Allocate the RX queue, or reset if it is already allocated */
  688. iwl_rx_init(trans);
  689. #endif
  690. /* Allocate or reset and init all Tx and Command queues */
  691. if (iwl_tx_init(trans))
  692. return -ENOMEM;
  693. if (trans->cfg->base_params->shadow_reg_enable) {
  694. /* enable shadow regs in HW */
  695. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
  696. 0x800FFFFF);
  697. }
  698. return 0;
  699. }
  700. #define HW_READY_TIMEOUT (50)
  701. /* Note: returns poll_bit return value, which is >= 0 if success */
  702. static int iwl_set_hw_ready(struct iwl_trans *trans)
  703. {
  704. int ret;
  705. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  706. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  707. /* See if we got it */
  708. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  709. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  710. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  711. HW_READY_TIMEOUT);
  712. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  713. return ret;
  714. }
  715. /* Note: returns standard 0/-ERROR code */
  716. static int iwl_prepare_card_hw(struct iwl_trans *trans)
  717. {
  718. int ret;
  719. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  720. ret = iwl_set_hw_ready(trans);
  721. /* If the card is ready, exit 0 */
  722. if (ret >= 0)
  723. return 0;
  724. /* If HW is not ready, prepare the conditions to check again */
  725. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  726. CSR_HW_IF_CONFIG_REG_PREPARE);
  727. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  728. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  729. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  730. if (ret < 0)
  731. return ret;
  732. /* HW should be ready by now, check again. */
  733. ret = iwl_set_hw_ready(trans);
  734. if (ret >= 0)
  735. return 0;
  736. return ret;
  737. }
  738. /*
  739. * ucode
  740. */
  741. static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
  742. const struct fw_desc *section)
  743. {
  744. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  745. dma_addr_t phy_addr = section->p_addr;
  746. u32 byte_cnt = section->len;
  747. u32 dst_addr = section->offset;
  748. int ret;
  749. trans_pcie->ucode_write_complete = false;
  750. iwl_write_direct32(trans,
  751. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  752. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  753. iwl_write_direct32(trans,
  754. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
  755. iwl_write_direct32(trans,
  756. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  757. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  758. iwl_write_direct32(trans,
  759. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  760. (iwl_get_dma_hi_addr(phy_addr)
  761. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  762. iwl_write_direct32(trans,
  763. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  764. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  765. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  766. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  767. iwl_write_direct32(trans,
  768. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  769. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  770. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  771. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  772. IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
  773. section_num);
  774. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  775. trans_pcie->ucode_write_complete, 5 * HZ);
  776. if (!ret) {
  777. IWL_ERR(trans, "Could not load the [%d] uCode section\n",
  778. section_num);
  779. return -ETIMEDOUT;
  780. }
  781. return 0;
  782. }
  783. static int iwl_load_given_ucode(struct iwl_trans *trans,
  784. const struct fw_img *image)
  785. {
  786. int ret = 0;
  787. int i;
  788. for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
  789. if (!image->sec[i].p_addr)
  790. break;
  791. ret = iwl_load_section(trans, i, &image->sec[i]);
  792. if (ret)
  793. return ret;
  794. }
  795. /* Remove all resets to allow NIC to operate */
  796. iwl_write32(trans, CSR_RESET, 0);
  797. return 0;
  798. }
  799. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  800. const struct fw_img *fw)
  801. {
  802. int ret;
  803. bool hw_rfkill;
  804. /* This may fail if AMT took ownership of the device */
  805. if (iwl_prepare_card_hw(trans)) {
  806. IWL_WARN(trans, "Exit HW not ready\n");
  807. return -EIO;
  808. }
  809. iwl_enable_rfkill_int(trans);
  810. /* If platform's RF_KILL switch is NOT set to KILL */
  811. hw_rfkill = iwl_is_rfkill_set(trans);
  812. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  813. if (hw_rfkill)
  814. return -ERFKILL;
  815. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  816. ret = iwl_nic_init(trans);
  817. if (ret) {
  818. IWL_ERR(trans, "Unable to init nic\n");
  819. return ret;
  820. }
  821. /* make sure rfkill handshake bits are cleared */
  822. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  823. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  824. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  825. /* clear (again), then enable host interrupts */
  826. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  827. iwl_enable_interrupts(trans);
  828. /* really make sure rfkill handshake bits are cleared */
  829. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  830. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  831. /* Load the given image to the HW */
  832. return iwl_load_given_ucode(trans, fw);
  833. }
  834. /*
  835. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  836. * must be called under the irq lock and with MAC access
  837. */
  838. static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
  839. {
  840. struct iwl_trans_pcie __maybe_unused *trans_pcie =
  841. IWL_TRANS_GET_PCIE_TRANS(trans);
  842. lockdep_assert_held(&trans_pcie->irq_lock);
  843. iwl_write_prph(trans, SCD_TXFACT, mask);
  844. }
  845. static void iwl_tx_start(struct iwl_trans *trans)
  846. {
  847. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  848. u32 a;
  849. unsigned long flags;
  850. int i, chan;
  851. u32 reg_val;
  852. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  853. trans_pcie->scd_base_addr =
  854. iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
  855. a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  856. /* reset conext data memory */
  857. for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  858. a += 4)
  859. iwl_write_targ_mem(trans, a, 0);
  860. /* reset tx status memory */
  861. for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  862. a += 4)
  863. iwl_write_targ_mem(trans, a, 0);
  864. for (; a < trans_pcie->scd_base_addr +
  865. SCD_TRANS_TBL_OFFSET_QUEUE(
  866. trans->cfg->base_params->num_of_queues);
  867. a += 4)
  868. iwl_write_targ_mem(trans, a, 0);
  869. iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
  870. trans_pcie->scd_bc_tbls.dma >> 10);
  871. /* The chain extension of the SCD doesn't work well. This feature is
  872. * enabled by default by the HW, so we need to disable it manually.
  873. */
  874. iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
  875. /* Enable DMA channel */
  876. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  877. iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  878. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  879. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  880. /* Update FH chicken bits */
  881. reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
  882. iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
  883. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  884. iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
  885. SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
  886. iwl_write_prph(trans, SCD_AGGR_SEL, 0);
  887. /* initiate the queues */
  888. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
  889. iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
  890. iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
  891. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  892. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  893. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  894. SCD_CONTEXT_QUEUE_OFFSET(i) +
  895. sizeof(u32),
  896. ((SCD_WIN_SIZE <<
  897. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  898. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  899. ((SCD_FRAME_LIMIT <<
  900. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  901. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  902. }
  903. iwl_write_prph(trans, SCD_INTERRUPT_MASK,
  904. IWL_MASK(0, trans->cfg->base_params->num_of_queues));
  905. /* Activate all Tx DMA/FIFO channels */
  906. iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
  907. iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
  908. /* make sure all queue are not stopped/used */
  909. memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  910. memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  911. for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
  912. int fifo = trans_pcie->setup_q_to_fifo[i];
  913. set_bit(i, trans_pcie->queue_used);
  914. iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
  915. fifo, true);
  916. }
  917. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  918. /* Enable L1-Active */
  919. iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
  920. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  921. }
  922. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
  923. {
  924. iwl_reset_ict(trans);
  925. iwl_tx_start(trans);
  926. }
  927. /**
  928. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  929. */
  930. static int iwl_trans_tx_stop(struct iwl_trans *trans)
  931. {
  932. int ch, txq_id, ret;
  933. unsigned long flags;
  934. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  935. /* Turn off all Tx DMA fifos */
  936. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  937. iwl_trans_txq_set_sched(trans, 0);
  938. /* Stop each Tx DMA channel, and wait for it to be idle */
  939. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  940. iwl_write_direct32(trans,
  941. FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  942. ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
  943. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  944. 1000);
  945. if (ret < 0)
  946. IWL_ERR(trans, "Failing on timeout while stopping"
  947. " DMA channel %d [0x%08x]", ch,
  948. iwl_read_direct32(trans,
  949. FH_TSSR_TX_STATUS_REG));
  950. }
  951. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  952. if (!trans_pcie->txq) {
  953. IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
  954. return 0;
  955. }
  956. /* Unmap DMA from host system and free skb's */
  957. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  958. txq_id++)
  959. iwl_tx_queue_unmap(trans, txq_id);
  960. return 0;
  961. }
  962. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
  963. {
  964. unsigned long flags;
  965. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  966. /* tell the device to stop sending interrupts */
  967. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  968. iwl_disable_interrupts(trans);
  969. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  970. /* device going down, Stop using ICT table */
  971. iwl_disable_ict(trans);
  972. /*
  973. * If a HW restart happens during firmware loading,
  974. * then the firmware loading might call this function
  975. * and later it might be called again due to the
  976. * restart. So don't process again if the device is
  977. * already dead.
  978. */
  979. if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
  980. iwl_trans_tx_stop(trans);
  981. #ifndef CONFIG_IWLWIFI_IDI
  982. iwl_trans_rx_stop(trans);
  983. #endif
  984. /* Power-down device's busmaster DMA clocks */
  985. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  986. APMG_CLK_VAL_DMA_CLK_RQT);
  987. udelay(5);
  988. }
  989. /* Make sure (redundant) we've released our request to stay awake */
  990. iwl_clear_bit(trans, CSR_GP_CNTRL,
  991. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  992. /* Stop the device, and put it in low power state */
  993. iwl_apm_stop(trans);
  994. /* Upon stop, the APM issues an interrupt if HW RF kill is set.
  995. * Clean again the interrupt here
  996. */
  997. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  998. iwl_disable_interrupts(trans);
  999. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1000. iwl_enable_rfkill_int(trans);
  1001. /* wait to make sure we flush pending tasklet*/
  1002. synchronize_irq(trans_pcie->irq);
  1003. tasklet_kill(&trans_pcie->irq_tasklet);
  1004. cancel_work_sync(&trans_pcie->rx_replenish);
  1005. /* stop and reset the on-board processor */
  1006. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  1007. /* clear all status bits */
  1008. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  1009. clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
  1010. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  1011. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1012. }
  1013. static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
  1014. {
  1015. /* let the ucode operate on its own */
  1016. iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
  1017. CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
  1018. iwl_disable_interrupts(trans);
  1019. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1020. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1021. }
  1022. static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
  1023. struct iwl_device_cmd *dev_cmd, int txq_id)
  1024. {
  1025. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1026. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1027. struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  1028. struct iwl_cmd_meta *out_meta;
  1029. struct iwl_tx_queue *txq;
  1030. struct iwl_queue *q;
  1031. dma_addr_t phys_addr = 0;
  1032. dma_addr_t txcmd_phys;
  1033. dma_addr_t scratch_phys;
  1034. u16 len, firstlen, secondlen;
  1035. u8 wait_write_ptr = 0;
  1036. __le16 fc = hdr->frame_control;
  1037. u8 hdr_len = ieee80211_hdrlen(fc);
  1038. u16 __maybe_unused wifi_seq;
  1039. txq = &trans_pcie->txq[txq_id];
  1040. q = &txq->q;
  1041. if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
  1042. WARN_ON_ONCE(1);
  1043. return -EINVAL;
  1044. }
  1045. spin_lock(&txq->lock);
  1046. /* Set up driver data for this TFD */
  1047. txq->entries[q->write_ptr].skb = skb;
  1048. txq->entries[q->write_ptr].cmd = dev_cmd;
  1049. dev_cmd->hdr.cmd = REPLY_TX;
  1050. dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  1051. INDEX_TO_SEQ(q->write_ptr)));
  1052. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  1053. out_meta = &txq->entries[q->write_ptr].meta;
  1054. /*
  1055. * Use the first empty entry in this queue's command buffer array
  1056. * to contain the Tx command and MAC header concatenated together
  1057. * (payload data will be in another buffer).
  1058. * Size of this varies, due to varying MAC header length.
  1059. * If end is not dword aligned, we'll have 2 extra bytes at the end
  1060. * of the MAC header (device reads on dword boundaries).
  1061. * We'll tell device about this padding later.
  1062. */
  1063. len = sizeof(struct iwl_tx_cmd) +
  1064. sizeof(struct iwl_cmd_header) + hdr_len;
  1065. firstlen = (len + 3) & ~3;
  1066. /* Tell NIC about any 2-byte padding after MAC header */
  1067. if (firstlen != len)
  1068. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  1069. /* Physical address of this Tx command's header (not MAC header!),
  1070. * within command buffer array. */
  1071. txcmd_phys = dma_map_single(trans->dev,
  1072. &dev_cmd->hdr, firstlen,
  1073. DMA_BIDIRECTIONAL);
  1074. if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
  1075. goto out_err;
  1076. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  1077. dma_unmap_len_set(out_meta, len, firstlen);
  1078. if (!ieee80211_has_morefrags(fc)) {
  1079. txq->need_update = 1;
  1080. } else {
  1081. wait_write_ptr = 1;
  1082. txq->need_update = 0;
  1083. }
  1084. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  1085. * if any (802.11 null frames have no payload). */
  1086. secondlen = skb->len - hdr_len;
  1087. if (secondlen > 0) {
  1088. phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
  1089. secondlen, DMA_TO_DEVICE);
  1090. if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
  1091. dma_unmap_single(trans->dev,
  1092. dma_unmap_addr(out_meta, mapping),
  1093. dma_unmap_len(out_meta, len),
  1094. DMA_BIDIRECTIONAL);
  1095. goto out_err;
  1096. }
  1097. }
  1098. /* Attach buffers to TFD */
  1099. iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
  1100. if (secondlen > 0)
  1101. iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
  1102. secondlen, 0);
  1103. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  1104. offsetof(struct iwl_tx_cmd, scratch);
  1105. /* take back ownership of DMA buffer to enable update */
  1106. dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
  1107. DMA_BIDIRECTIONAL);
  1108. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  1109. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  1110. IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
  1111. le16_to_cpu(dev_cmd->hdr.sequence));
  1112. IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  1113. /* Set up entry for this TFD in Tx byte-count array */
  1114. iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
  1115. dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
  1116. DMA_BIDIRECTIONAL);
  1117. trace_iwlwifi_dev_tx(trans->dev,
  1118. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  1119. sizeof(struct iwl_tfd),
  1120. &dev_cmd->hdr, firstlen,
  1121. skb->data + hdr_len, secondlen);
  1122. /* start timer if queue currently empty */
  1123. if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
  1124. mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
  1125. /* Tell device the write index *just past* this latest filled TFD */
  1126. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  1127. iwl_txq_update_write_ptr(trans, txq);
  1128. /*
  1129. * At this point the frame is "transmitted" successfully
  1130. * and we will get a TX status notification eventually,
  1131. * regardless of the value of ret. "ret" only indicates
  1132. * whether or not we should update the write pointer.
  1133. */
  1134. if (iwl_queue_space(q) < q->high_mark) {
  1135. if (wait_write_ptr) {
  1136. txq->need_update = 1;
  1137. iwl_txq_update_write_ptr(trans, txq);
  1138. } else {
  1139. iwl_stop_queue(trans, txq);
  1140. }
  1141. }
  1142. spin_unlock(&txq->lock);
  1143. return 0;
  1144. out_err:
  1145. spin_unlock(&txq->lock);
  1146. return -1;
  1147. }
  1148. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
  1149. {
  1150. struct iwl_trans_pcie *trans_pcie =
  1151. IWL_TRANS_GET_PCIE_TRANS(trans);
  1152. int err;
  1153. bool hw_rfkill;
  1154. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  1155. if (!trans_pcie->irq_requested) {
  1156. tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
  1157. iwl_irq_tasklet, (unsigned long)trans);
  1158. iwl_alloc_isr_ict(trans);
  1159. err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
  1160. DRV_NAME, trans);
  1161. if (err) {
  1162. IWL_ERR(trans, "Error allocating IRQ %d\n",
  1163. trans_pcie->irq);
  1164. goto error;
  1165. }
  1166. INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
  1167. trans_pcie->irq_requested = true;
  1168. }
  1169. err = iwl_prepare_card_hw(trans);
  1170. if (err) {
  1171. IWL_ERR(trans, "Error while preparing HW: %d", err);
  1172. goto err_free_irq;
  1173. }
  1174. iwl_apm_init(trans);
  1175. /* From now on, the op_mode will be kept updated about RF kill state */
  1176. iwl_enable_rfkill_int(trans);
  1177. hw_rfkill = iwl_is_rfkill_set(trans);
  1178. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1179. return err;
  1180. err_free_irq:
  1181. free_irq(trans_pcie->irq, trans);
  1182. error:
  1183. iwl_free_isr_ict(trans);
  1184. tasklet_kill(&trans_pcie->irq_tasklet);
  1185. return err;
  1186. }
  1187. static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
  1188. bool op_mode_leaving)
  1189. {
  1190. bool hw_rfkill;
  1191. unsigned long flags;
  1192. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1193. iwl_apm_stop(trans);
  1194. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1195. iwl_disable_interrupts(trans);
  1196. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1197. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1198. if (!op_mode_leaving) {
  1199. /*
  1200. * Even if we stop the HW, we still want the RF kill
  1201. * interrupt
  1202. */
  1203. iwl_enable_rfkill_int(trans);
  1204. /*
  1205. * Check again since the RF kill state may have changed while
  1206. * all the interrupts were disabled, in this case we couldn't
  1207. * receive the RF kill interrupt and update the state in the
  1208. * op_mode.
  1209. */
  1210. hw_rfkill = iwl_is_rfkill_set(trans);
  1211. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1212. }
  1213. }
  1214. static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
  1215. struct sk_buff_head *skbs)
  1216. {
  1217. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1218. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  1219. /* n_bd is usually 256 => n_bd - 1 = 0xff */
  1220. int tfd_num = ssn & (txq->q.n_bd - 1);
  1221. int freed = 0;
  1222. spin_lock(&txq->lock);
  1223. if (txq->q.read_ptr != tfd_num) {
  1224. IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
  1225. txq_id, txq->q.read_ptr, tfd_num, ssn);
  1226. freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
  1227. if (iwl_queue_space(&txq->q) > txq->q.low_mark)
  1228. iwl_wake_queue(trans, txq);
  1229. }
  1230. spin_unlock(&txq->lock);
  1231. }
  1232. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  1233. {
  1234. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1235. }
  1236. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  1237. {
  1238. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1239. }
  1240. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  1241. {
  1242. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1243. }
  1244. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  1245. const struct iwl_trans_config *trans_cfg)
  1246. {
  1247. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1248. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  1249. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  1250. trans_pcie->n_no_reclaim_cmds = 0;
  1251. else
  1252. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  1253. if (trans_pcie->n_no_reclaim_cmds)
  1254. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  1255. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  1256. trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
  1257. if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
  1258. trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
  1259. /* at least the command queue must be mapped */
  1260. WARN_ON(!trans_pcie->n_q_to_fifo);
  1261. memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
  1262. trans_pcie->n_q_to_fifo * sizeof(u8));
  1263. trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
  1264. if (trans_pcie->rx_buf_size_8k)
  1265. trans_pcie->rx_page_order = get_order(8 * 1024);
  1266. else
  1267. trans_pcie->rx_page_order = get_order(4 * 1024);
  1268. trans_pcie->wd_timeout =
  1269. msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
  1270. trans_pcie->command_names = trans_cfg->command_names;
  1271. }
  1272. void iwl_trans_pcie_free(struct iwl_trans *trans)
  1273. {
  1274. struct iwl_trans_pcie *trans_pcie =
  1275. IWL_TRANS_GET_PCIE_TRANS(trans);
  1276. iwl_trans_pcie_tx_free(trans);
  1277. #ifndef CONFIG_IWLWIFI_IDI
  1278. iwl_trans_pcie_rx_free(trans);
  1279. #endif
  1280. if (trans_pcie->irq_requested == true) {
  1281. free_irq(trans_pcie->irq, trans);
  1282. iwl_free_isr_ict(trans);
  1283. }
  1284. pci_disable_msi(trans_pcie->pci_dev);
  1285. iounmap(trans_pcie->hw_base);
  1286. pci_release_regions(trans_pcie->pci_dev);
  1287. pci_disable_device(trans_pcie->pci_dev);
  1288. kfree(trans);
  1289. }
  1290. static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
  1291. {
  1292. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1293. if (state)
  1294. set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1295. else
  1296. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1297. }
  1298. #ifdef CONFIG_PM_SLEEP
  1299. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  1300. {
  1301. return 0;
  1302. }
  1303. static int iwl_trans_pcie_resume(struct iwl_trans *trans)
  1304. {
  1305. bool hw_rfkill;
  1306. iwl_enable_rfkill_int(trans);
  1307. hw_rfkill = iwl_is_rfkill_set(trans);
  1308. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1309. if (!hw_rfkill)
  1310. iwl_enable_interrupts(trans);
  1311. return 0;
  1312. }
  1313. #endif /* CONFIG_PM_SLEEP */
  1314. #define IWL_FLUSH_WAIT_MS 2000
  1315. static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
  1316. {
  1317. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1318. struct iwl_tx_queue *txq;
  1319. struct iwl_queue *q;
  1320. int cnt;
  1321. unsigned long now = jiffies;
  1322. int ret = 0;
  1323. /* waiting for all the tx frames complete might take a while */
  1324. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1325. if (cnt == trans_pcie->cmd_queue)
  1326. continue;
  1327. txq = &trans_pcie->txq[cnt];
  1328. q = &txq->q;
  1329. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  1330. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  1331. msleep(1);
  1332. if (q->read_ptr != q->write_ptr) {
  1333. IWL_ERR(trans, "fail to flush all tx fifo queues\n");
  1334. ret = -ETIMEDOUT;
  1335. break;
  1336. }
  1337. }
  1338. return ret;
  1339. }
  1340. static const char *get_fh_string(int cmd)
  1341. {
  1342. #define IWL_CMD(x) case x: return #x
  1343. switch (cmd) {
  1344. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  1345. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  1346. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  1347. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  1348. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  1349. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  1350. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  1351. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  1352. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  1353. default:
  1354. return "UNKNOWN";
  1355. }
  1356. #undef IWL_CMD
  1357. }
  1358. int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
  1359. {
  1360. int i;
  1361. #ifdef CONFIG_IWLWIFI_DEBUG
  1362. int pos = 0;
  1363. size_t bufsz = 0;
  1364. #endif
  1365. static const u32 fh_tbl[] = {
  1366. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  1367. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  1368. FH_RSCSR_CHNL0_WPTR,
  1369. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  1370. FH_MEM_RSSR_SHARED_CTRL_REG,
  1371. FH_MEM_RSSR_RX_STATUS_REG,
  1372. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  1373. FH_TSSR_TX_STATUS_REG,
  1374. FH_TSSR_TX_ERROR_REG
  1375. };
  1376. #ifdef CONFIG_IWLWIFI_DEBUG
  1377. if (display) {
  1378. bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  1379. *buf = kmalloc(bufsz, GFP_KERNEL);
  1380. if (!*buf)
  1381. return -ENOMEM;
  1382. pos += scnprintf(*buf + pos, bufsz - pos,
  1383. "FH register values:\n");
  1384. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1385. pos += scnprintf(*buf + pos, bufsz - pos,
  1386. " %34s: 0X%08x\n",
  1387. get_fh_string(fh_tbl[i]),
  1388. iwl_read_direct32(trans, fh_tbl[i]));
  1389. }
  1390. return pos;
  1391. }
  1392. #endif
  1393. IWL_ERR(trans, "FH register values:\n");
  1394. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1395. IWL_ERR(trans, " %34s: 0X%08x\n",
  1396. get_fh_string(fh_tbl[i]),
  1397. iwl_read_direct32(trans, fh_tbl[i]));
  1398. }
  1399. return 0;
  1400. }
  1401. static const char *get_csr_string(int cmd)
  1402. {
  1403. #define IWL_CMD(x) case x: return #x
  1404. switch (cmd) {
  1405. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1406. IWL_CMD(CSR_INT_COALESCING);
  1407. IWL_CMD(CSR_INT);
  1408. IWL_CMD(CSR_INT_MASK);
  1409. IWL_CMD(CSR_FH_INT_STATUS);
  1410. IWL_CMD(CSR_GPIO_IN);
  1411. IWL_CMD(CSR_RESET);
  1412. IWL_CMD(CSR_GP_CNTRL);
  1413. IWL_CMD(CSR_HW_REV);
  1414. IWL_CMD(CSR_EEPROM_REG);
  1415. IWL_CMD(CSR_EEPROM_GP);
  1416. IWL_CMD(CSR_OTP_GP_REG);
  1417. IWL_CMD(CSR_GIO_REG);
  1418. IWL_CMD(CSR_GP_UCODE_REG);
  1419. IWL_CMD(CSR_GP_DRIVER_REG);
  1420. IWL_CMD(CSR_UCODE_DRV_GP1);
  1421. IWL_CMD(CSR_UCODE_DRV_GP2);
  1422. IWL_CMD(CSR_LED_REG);
  1423. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  1424. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  1425. IWL_CMD(CSR_ANA_PLL_CFG);
  1426. IWL_CMD(CSR_HW_REV_WA_REG);
  1427. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  1428. default:
  1429. return "UNKNOWN";
  1430. }
  1431. #undef IWL_CMD
  1432. }
  1433. void iwl_dump_csr(struct iwl_trans *trans)
  1434. {
  1435. int i;
  1436. static const u32 csr_tbl[] = {
  1437. CSR_HW_IF_CONFIG_REG,
  1438. CSR_INT_COALESCING,
  1439. CSR_INT,
  1440. CSR_INT_MASK,
  1441. CSR_FH_INT_STATUS,
  1442. CSR_GPIO_IN,
  1443. CSR_RESET,
  1444. CSR_GP_CNTRL,
  1445. CSR_HW_REV,
  1446. CSR_EEPROM_REG,
  1447. CSR_EEPROM_GP,
  1448. CSR_OTP_GP_REG,
  1449. CSR_GIO_REG,
  1450. CSR_GP_UCODE_REG,
  1451. CSR_GP_DRIVER_REG,
  1452. CSR_UCODE_DRV_GP1,
  1453. CSR_UCODE_DRV_GP2,
  1454. CSR_LED_REG,
  1455. CSR_DRAM_INT_TBL_REG,
  1456. CSR_GIO_CHICKEN_BITS,
  1457. CSR_ANA_PLL_CFG,
  1458. CSR_HW_REV_WA_REG,
  1459. CSR_DBG_HPET_MEM_REG
  1460. };
  1461. IWL_ERR(trans, "CSR values:\n");
  1462. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  1463. "CSR_INT_PERIODIC_REG)\n");
  1464. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  1465. IWL_ERR(trans, " %25s: 0X%08x\n",
  1466. get_csr_string(csr_tbl[i]),
  1467. iwl_read32(trans, csr_tbl[i]));
  1468. }
  1469. }
  1470. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1471. /* create and remove of files */
  1472. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  1473. if (!debugfs_create_file(#name, mode, parent, trans, \
  1474. &iwl_dbgfs_##name##_ops)) \
  1475. return -ENOMEM; \
  1476. } while (0)
  1477. /* file operation */
  1478. #define DEBUGFS_READ_FUNC(name) \
  1479. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  1480. char __user *user_buf, \
  1481. size_t count, loff_t *ppos);
  1482. #define DEBUGFS_WRITE_FUNC(name) \
  1483. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  1484. const char __user *user_buf, \
  1485. size_t count, loff_t *ppos);
  1486. #define DEBUGFS_READ_FILE_OPS(name) \
  1487. DEBUGFS_READ_FUNC(name); \
  1488. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1489. .read = iwl_dbgfs_##name##_read, \
  1490. .open = simple_open, \
  1491. .llseek = generic_file_llseek, \
  1492. };
  1493. #define DEBUGFS_WRITE_FILE_OPS(name) \
  1494. DEBUGFS_WRITE_FUNC(name); \
  1495. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1496. .write = iwl_dbgfs_##name##_write, \
  1497. .open = simple_open, \
  1498. .llseek = generic_file_llseek, \
  1499. };
  1500. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1501. DEBUGFS_READ_FUNC(name); \
  1502. DEBUGFS_WRITE_FUNC(name); \
  1503. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1504. .write = iwl_dbgfs_##name##_write, \
  1505. .read = iwl_dbgfs_##name##_read, \
  1506. .open = simple_open, \
  1507. .llseek = generic_file_llseek, \
  1508. };
  1509. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1510. char __user *user_buf,
  1511. size_t count, loff_t *ppos)
  1512. {
  1513. struct iwl_trans *trans = file->private_data;
  1514. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1515. struct iwl_tx_queue *txq;
  1516. struct iwl_queue *q;
  1517. char *buf;
  1518. int pos = 0;
  1519. int cnt;
  1520. int ret;
  1521. size_t bufsz;
  1522. bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
  1523. if (!trans_pcie->txq)
  1524. return -EAGAIN;
  1525. buf = kzalloc(bufsz, GFP_KERNEL);
  1526. if (!buf)
  1527. return -ENOMEM;
  1528. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1529. txq = &trans_pcie->txq[cnt];
  1530. q = &txq->q;
  1531. pos += scnprintf(buf + pos, bufsz - pos,
  1532. "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
  1533. cnt, q->read_ptr, q->write_ptr,
  1534. !!test_bit(cnt, trans_pcie->queue_used),
  1535. !!test_bit(cnt, trans_pcie->queue_stopped));
  1536. }
  1537. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1538. kfree(buf);
  1539. return ret;
  1540. }
  1541. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1542. char __user *user_buf,
  1543. size_t count, loff_t *ppos) {
  1544. struct iwl_trans *trans = file->private_data;
  1545. struct iwl_trans_pcie *trans_pcie =
  1546. IWL_TRANS_GET_PCIE_TRANS(trans);
  1547. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1548. char buf[256];
  1549. int pos = 0;
  1550. const size_t bufsz = sizeof(buf);
  1551. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  1552. rxq->read);
  1553. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  1554. rxq->write);
  1555. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  1556. rxq->free_count);
  1557. if (rxq->rb_stts) {
  1558. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  1559. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  1560. } else {
  1561. pos += scnprintf(buf + pos, bufsz - pos,
  1562. "closed_rb_num: Not Allocated\n");
  1563. }
  1564. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1565. }
  1566. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  1567. char __user *user_buf,
  1568. size_t count, loff_t *ppos) {
  1569. struct iwl_trans *trans = file->private_data;
  1570. struct iwl_trans_pcie *trans_pcie =
  1571. IWL_TRANS_GET_PCIE_TRANS(trans);
  1572. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1573. int pos = 0;
  1574. char *buf;
  1575. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  1576. ssize_t ret;
  1577. buf = kzalloc(bufsz, GFP_KERNEL);
  1578. if (!buf)
  1579. return -ENOMEM;
  1580. pos += scnprintf(buf + pos, bufsz - pos,
  1581. "Interrupt Statistics Report:\n");
  1582. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  1583. isr_stats->hw);
  1584. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  1585. isr_stats->sw);
  1586. if (isr_stats->sw || isr_stats->hw) {
  1587. pos += scnprintf(buf + pos, bufsz - pos,
  1588. "\tLast Restarting Code: 0x%X\n",
  1589. isr_stats->err_code);
  1590. }
  1591. #ifdef CONFIG_IWLWIFI_DEBUG
  1592. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  1593. isr_stats->sch);
  1594. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  1595. isr_stats->alive);
  1596. #endif
  1597. pos += scnprintf(buf + pos, bufsz - pos,
  1598. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  1599. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  1600. isr_stats->ctkill);
  1601. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  1602. isr_stats->wakeup);
  1603. pos += scnprintf(buf + pos, bufsz - pos,
  1604. "Rx command responses:\t\t %u\n", isr_stats->rx);
  1605. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  1606. isr_stats->tx);
  1607. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  1608. isr_stats->unhandled);
  1609. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1610. kfree(buf);
  1611. return ret;
  1612. }
  1613. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  1614. const char __user *user_buf,
  1615. size_t count, loff_t *ppos)
  1616. {
  1617. struct iwl_trans *trans = file->private_data;
  1618. struct iwl_trans_pcie *trans_pcie =
  1619. IWL_TRANS_GET_PCIE_TRANS(trans);
  1620. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1621. char buf[8];
  1622. int buf_size;
  1623. u32 reset_flag;
  1624. memset(buf, 0, sizeof(buf));
  1625. buf_size = min(count, sizeof(buf) - 1);
  1626. if (copy_from_user(buf, user_buf, buf_size))
  1627. return -EFAULT;
  1628. if (sscanf(buf, "%x", &reset_flag) != 1)
  1629. return -EFAULT;
  1630. if (reset_flag == 0)
  1631. memset(isr_stats, 0, sizeof(*isr_stats));
  1632. return count;
  1633. }
  1634. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  1635. const char __user *user_buf,
  1636. size_t count, loff_t *ppos)
  1637. {
  1638. struct iwl_trans *trans = file->private_data;
  1639. char buf[8];
  1640. int buf_size;
  1641. int csr;
  1642. memset(buf, 0, sizeof(buf));
  1643. buf_size = min(count, sizeof(buf) - 1);
  1644. if (copy_from_user(buf, user_buf, buf_size))
  1645. return -EFAULT;
  1646. if (sscanf(buf, "%d", &csr) != 1)
  1647. return -EFAULT;
  1648. iwl_dump_csr(trans);
  1649. return count;
  1650. }
  1651. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  1652. char __user *user_buf,
  1653. size_t count, loff_t *ppos)
  1654. {
  1655. struct iwl_trans *trans = file->private_data;
  1656. char *buf;
  1657. int pos = 0;
  1658. ssize_t ret = -EFAULT;
  1659. ret = pos = iwl_dump_fh(trans, &buf, true);
  1660. if (buf) {
  1661. ret = simple_read_from_buffer(user_buf,
  1662. count, ppos, buf, pos);
  1663. kfree(buf);
  1664. }
  1665. return ret;
  1666. }
  1667. static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
  1668. const char __user *user_buf,
  1669. size_t count, loff_t *ppos)
  1670. {
  1671. struct iwl_trans *trans = file->private_data;
  1672. if (!trans->op_mode)
  1673. return -EAGAIN;
  1674. iwl_op_mode_nic_error(trans->op_mode);
  1675. return count;
  1676. }
  1677. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  1678. DEBUGFS_READ_FILE_OPS(fh_reg);
  1679. DEBUGFS_READ_FILE_OPS(rx_queue);
  1680. DEBUGFS_READ_FILE_OPS(tx_queue);
  1681. DEBUGFS_WRITE_FILE_OPS(csr);
  1682. DEBUGFS_WRITE_FILE_OPS(fw_restart);
  1683. /*
  1684. * Create the debugfs files and directories
  1685. *
  1686. */
  1687. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1688. struct dentry *dir)
  1689. {
  1690. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1691. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1692. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  1693. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  1694. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  1695. DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
  1696. return 0;
  1697. }
  1698. #else
  1699. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1700. struct dentry *dir)
  1701. { return 0; }
  1702. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1703. static const struct iwl_trans_ops trans_ops_pcie = {
  1704. .start_hw = iwl_trans_pcie_start_hw,
  1705. .stop_hw = iwl_trans_pcie_stop_hw,
  1706. .fw_alive = iwl_trans_pcie_fw_alive,
  1707. .start_fw = iwl_trans_pcie_start_fw,
  1708. .stop_device = iwl_trans_pcie_stop_device,
  1709. .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
  1710. .send_cmd = iwl_trans_pcie_send_cmd,
  1711. .tx = iwl_trans_pcie_tx,
  1712. .reclaim = iwl_trans_pcie_reclaim,
  1713. .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
  1714. .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
  1715. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1716. .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
  1717. #ifdef CONFIG_PM_SLEEP
  1718. .suspend = iwl_trans_pcie_suspend,
  1719. .resume = iwl_trans_pcie_resume,
  1720. #endif
  1721. .write8 = iwl_trans_pcie_write8,
  1722. .write32 = iwl_trans_pcie_write32,
  1723. .read32 = iwl_trans_pcie_read32,
  1724. .configure = iwl_trans_pcie_configure,
  1725. .set_pmi = iwl_trans_pcie_set_pmi,
  1726. };
  1727. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  1728. const struct pci_device_id *ent,
  1729. const struct iwl_cfg *cfg)
  1730. {
  1731. struct iwl_trans_pcie *trans_pcie;
  1732. struct iwl_trans *trans;
  1733. u16 pci_cmd;
  1734. int err;
  1735. trans = kzalloc(sizeof(struct iwl_trans) +
  1736. sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  1737. if (WARN_ON(!trans))
  1738. return NULL;
  1739. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1740. trans->ops = &trans_ops_pcie;
  1741. trans->cfg = cfg;
  1742. trans_pcie->trans = trans;
  1743. spin_lock_init(&trans_pcie->irq_lock);
  1744. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  1745. /* W/A - seems to solve weird behavior. We need to remove this if we
  1746. * don't want to stay in L1 all the time. This wastes a lot of power */
  1747. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1748. PCIE_LINK_STATE_CLKPM);
  1749. if (pci_enable_device(pdev)) {
  1750. err = -ENODEV;
  1751. goto out_no_pci;
  1752. }
  1753. pci_set_master(pdev);
  1754. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
  1755. if (!err)
  1756. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
  1757. if (err) {
  1758. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1759. if (!err)
  1760. err = pci_set_consistent_dma_mask(pdev,
  1761. DMA_BIT_MASK(32));
  1762. /* both attempts failed: */
  1763. if (err) {
  1764. dev_printk(KERN_ERR, &pdev->dev,
  1765. "No suitable DMA available.\n");
  1766. goto out_pci_disable_device;
  1767. }
  1768. }
  1769. err = pci_request_regions(pdev, DRV_NAME);
  1770. if (err) {
  1771. dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
  1772. goto out_pci_disable_device;
  1773. }
  1774. trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
  1775. if (!trans_pcie->hw_base) {
  1776. dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
  1777. err = -ENODEV;
  1778. goto out_pci_release_regions;
  1779. }
  1780. dev_printk(KERN_INFO, &pdev->dev,
  1781. "pci_resource_len = 0x%08llx\n",
  1782. (unsigned long long) pci_resource_len(pdev, 0));
  1783. dev_printk(KERN_INFO, &pdev->dev,
  1784. "pci_resource_base = %p\n", trans_pcie->hw_base);
  1785. dev_printk(KERN_INFO, &pdev->dev,
  1786. "HW Revision ID = 0x%X\n", pdev->revision);
  1787. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  1788. * PCI Tx retries from interfering with C3 CPU state */
  1789. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  1790. err = pci_enable_msi(pdev);
  1791. if (err)
  1792. dev_printk(KERN_ERR, &pdev->dev,
  1793. "pci_enable_msi failed(0X%x)", err);
  1794. trans->dev = &pdev->dev;
  1795. trans_pcie->irq = pdev->irq;
  1796. trans_pcie->pci_dev = pdev;
  1797. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  1798. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  1799. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  1800. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  1801. /* TODO: Move this away, not needed if not MSI */
  1802. /* enable rfkill interrupt: hw bug w/a */
  1803. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1804. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1805. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1806. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1807. }
  1808. /* Initialize the wait queue for commands */
  1809. init_waitqueue_head(&trans->wait_command_queue);
  1810. spin_lock_init(&trans->reg_lock);
  1811. return trans;
  1812. out_pci_release_regions:
  1813. pci_release_regions(pdev);
  1814. out_pci_disable_device:
  1815. pci_disable_device(pdev);
  1816. out_no_pci:
  1817. kfree(trans);
  1818. return NULL;
  1819. }