trans.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/pci.h>
  64. #include <linux/pci-aspm.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/debugfs.h>
  67. #include <linux/sched.h>
  68. #include <linux/bitops.h>
  69. #include <linux/gfp.h>
  70. #include "iwl-drv.h"
  71. #include "iwl-trans.h"
  72. #include "iwl-csr.h"
  73. #include "iwl-prph.h"
  74. #include "iwl-eeprom.h"
  75. #include "iwl-agn-hw.h"
  76. #include "internal.h"
  77. /* FIXME: need to abstract out TX command (once we know what it looks like) */
  78. #include "iwl-commands.h"
  79. #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
  80. (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
  81. (~(1<<(trans_pcie)->cmd_queue)))
  82. static int iwl_trans_rx_alloc(struct iwl_trans *trans)
  83. {
  84. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  85. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  86. struct device *dev = trans->dev;
  87. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  88. spin_lock_init(&rxq->lock);
  89. if (WARN_ON(rxq->bd || rxq->rb_stts))
  90. return -EINVAL;
  91. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  92. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  93. &rxq->bd_dma, GFP_KERNEL);
  94. if (!rxq->bd)
  95. goto err_bd;
  96. /*Allocate the driver's pointer to receive buffer status */
  97. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  98. &rxq->rb_stts_dma, GFP_KERNEL);
  99. if (!rxq->rb_stts)
  100. goto err_rb_stts;
  101. return 0;
  102. err_rb_stts:
  103. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  104. rxq->bd, rxq->bd_dma);
  105. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  106. rxq->bd = NULL;
  107. err_bd:
  108. return -ENOMEM;
  109. }
  110. static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
  111. {
  112. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  113. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  114. int i;
  115. /* Fill the rx_used queue with _all_ of the Rx buffers */
  116. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  117. /* In the reset function, these buffers may have been allocated
  118. * to an SKB, so we need to unmap and free potential storage */
  119. if (rxq->pool[i].page != NULL) {
  120. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  121. PAGE_SIZE << trans_pcie->rx_page_order,
  122. DMA_FROM_DEVICE);
  123. __free_pages(rxq->pool[i].page,
  124. trans_pcie->rx_page_order);
  125. rxq->pool[i].page = NULL;
  126. }
  127. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  128. }
  129. }
  130. static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
  131. struct iwl_rx_queue *rxq)
  132. {
  133. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  134. u32 rb_size;
  135. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  136. u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
  137. if (trans_pcie->rx_buf_size_8k)
  138. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  139. else
  140. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  141. /* Stop Rx DMA */
  142. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  143. /* Reset driver's Rx queue write index */
  144. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  145. /* Tell device where to find RBD circular buffer in DRAM */
  146. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  147. (u32)(rxq->bd_dma >> 8));
  148. /* Tell device where in DRAM to update its Rx status */
  149. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  150. rxq->rb_stts_dma >> 4);
  151. /* Enable Rx DMA
  152. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  153. * the credit mechanism in 5000 HW RX FIFO
  154. * Direct rx interrupts to hosts
  155. * Rx buffer size 4 or 8k
  156. * RB timeout 0x10
  157. * 256 RBDs
  158. */
  159. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  160. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  161. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  162. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  163. rb_size|
  164. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  165. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  166. /* Set interrupt coalescing timer to default (2048 usecs) */
  167. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  168. }
  169. static int iwl_rx_init(struct iwl_trans *trans)
  170. {
  171. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  172. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  173. int i, err;
  174. unsigned long flags;
  175. if (!rxq->bd) {
  176. err = iwl_trans_rx_alloc(trans);
  177. if (err)
  178. return err;
  179. }
  180. spin_lock_irqsave(&rxq->lock, flags);
  181. INIT_LIST_HEAD(&rxq->rx_free);
  182. INIT_LIST_HEAD(&rxq->rx_used);
  183. iwl_trans_rxq_free_rx_bufs(trans);
  184. for (i = 0; i < RX_QUEUE_SIZE; i++)
  185. rxq->queue[i] = NULL;
  186. /* Set us so that we have processed and used all buffers, but have
  187. * not restocked the Rx queue with fresh buffers */
  188. rxq->read = rxq->write = 0;
  189. rxq->write_actual = 0;
  190. rxq->free_count = 0;
  191. spin_unlock_irqrestore(&rxq->lock, flags);
  192. iwlagn_rx_replenish(trans);
  193. iwl_trans_rx_hw_init(trans, rxq);
  194. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  195. rxq->need_update = 1;
  196. iwl_rx_queue_update_write_ptr(trans, rxq);
  197. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  198. return 0;
  199. }
  200. static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
  201. {
  202. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  203. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  204. unsigned long flags;
  205. /*if rxq->bd is NULL, it means that nothing has been allocated,
  206. * exit now */
  207. if (!rxq->bd) {
  208. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  209. return;
  210. }
  211. spin_lock_irqsave(&rxq->lock, flags);
  212. iwl_trans_rxq_free_rx_bufs(trans);
  213. spin_unlock_irqrestore(&rxq->lock, flags);
  214. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  215. rxq->bd, rxq->bd_dma);
  216. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  217. rxq->bd = NULL;
  218. if (rxq->rb_stts)
  219. dma_free_coherent(trans->dev,
  220. sizeof(struct iwl_rb_status),
  221. rxq->rb_stts, rxq->rb_stts_dma);
  222. else
  223. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  224. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  225. rxq->rb_stts = NULL;
  226. }
  227. static int iwl_trans_rx_stop(struct iwl_trans *trans)
  228. {
  229. /* stop Rx DMA */
  230. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  231. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  232. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  233. }
  234. static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
  235. struct iwl_dma_ptr *ptr, size_t size)
  236. {
  237. if (WARN_ON(ptr->addr))
  238. return -EINVAL;
  239. ptr->addr = dma_alloc_coherent(trans->dev, size,
  240. &ptr->dma, GFP_KERNEL);
  241. if (!ptr->addr)
  242. return -ENOMEM;
  243. ptr->size = size;
  244. return 0;
  245. }
  246. static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
  247. struct iwl_dma_ptr *ptr)
  248. {
  249. if (unlikely(!ptr->addr))
  250. return;
  251. dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
  252. memset(ptr, 0, sizeof(*ptr));
  253. }
  254. static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
  255. {
  256. struct iwl_tx_queue *txq = (void *)data;
  257. struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
  258. struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
  259. spin_lock(&txq->lock);
  260. /* check if triggered erroneously */
  261. if (txq->q.read_ptr == txq->q.write_ptr) {
  262. spin_unlock(&txq->lock);
  263. return;
  264. }
  265. spin_unlock(&txq->lock);
  266. IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
  267. jiffies_to_msecs(trans_pcie->wd_timeout));
  268. IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
  269. txq->q.read_ptr, txq->q.write_ptr);
  270. IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
  271. iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
  272. & (TFD_QUEUE_SIZE_MAX - 1),
  273. iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
  274. iwl_op_mode_nic_error(trans->op_mode);
  275. }
  276. static int iwl_trans_txq_alloc(struct iwl_trans *trans,
  277. struct iwl_tx_queue *txq, int slots_num,
  278. u32 txq_id)
  279. {
  280. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  281. size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
  282. int i;
  283. if (WARN_ON(txq->entries || txq->tfds))
  284. return -EINVAL;
  285. setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
  286. (unsigned long)txq);
  287. txq->trans_pcie = trans_pcie;
  288. txq->q.n_window = slots_num;
  289. txq->entries = kcalloc(slots_num,
  290. sizeof(struct iwl_pcie_tx_queue_entry),
  291. GFP_KERNEL);
  292. if (!txq->entries)
  293. goto error;
  294. if (txq_id == trans_pcie->cmd_queue)
  295. for (i = 0; i < slots_num; i++) {
  296. txq->entries[i].cmd =
  297. kmalloc(sizeof(struct iwl_device_cmd),
  298. GFP_KERNEL);
  299. if (!txq->entries[i].cmd)
  300. goto error;
  301. }
  302. /* Circular buffer of transmit frame descriptors (TFDs),
  303. * shared with device */
  304. txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
  305. &txq->q.dma_addr, GFP_KERNEL);
  306. if (!txq->tfds) {
  307. IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  308. goto error;
  309. }
  310. txq->q.id = txq_id;
  311. return 0;
  312. error:
  313. if (txq->entries && txq_id == trans_pcie->cmd_queue)
  314. for (i = 0; i < slots_num; i++)
  315. kfree(txq->entries[i].cmd);
  316. kfree(txq->entries);
  317. txq->entries = NULL;
  318. return -ENOMEM;
  319. }
  320. static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
  321. int slots_num, u32 txq_id)
  322. {
  323. int ret;
  324. txq->need_update = 0;
  325. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  326. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  327. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  328. /* Initialize queue's high/low-water marks, and head/tail indexes */
  329. ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  330. txq_id);
  331. if (ret)
  332. return ret;
  333. spin_lock_init(&txq->lock);
  334. /*
  335. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  336. * given Tx queue, and enable the DMA channel used for that queue.
  337. * Circular buffer (TFD queue in DRAM) physical base address */
  338. iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
  339. txq->q.dma_addr >> 8);
  340. return 0;
  341. }
  342. /**
  343. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  344. */
  345. static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
  346. {
  347. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  348. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  349. struct iwl_queue *q = &txq->q;
  350. enum dma_data_direction dma_dir;
  351. if (!q->n_bd)
  352. return;
  353. /* In the command queue, all the TBs are mapped as BIDI
  354. * so unmap them as such.
  355. */
  356. if (txq_id == trans_pcie->cmd_queue)
  357. dma_dir = DMA_BIDIRECTIONAL;
  358. else
  359. dma_dir = DMA_TO_DEVICE;
  360. spin_lock_bh(&txq->lock);
  361. while (q->write_ptr != q->read_ptr) {
  362. iwl_txq_free_tfd(trans, txq, dma_dir);
  363. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  364. }
  365. spin_unlock_bh(&txq->lock);
  366. }
  367. /**
  368. * iwl_tx_queue_free - Deallocate DMA queue.
  369. * @txq: Transmit queue to deallocate.
  370. *
  371. * Empty queue by removing and destroying all BD's.
  372. * Free all buffers.
  373. * 0-fill, but do not free "txq" descriptor structure.
  374. */
  375. static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
  376. {
  377. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  378. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  379. struct device *dev = trans->dev;
  380. int i;
  381. if (WARN_ON(!txq))
  382. return;
  383. iwl_tx_queue_unmap(trans, txq_id);
  384. /* De-alloc array of command/tx buffers */
  385. if (txq_id == trans_pcie->cmd_queue)
  386. for (i = 0; i < txq->q.n_window; i++)
  387. kfree(txq->entries[i].cmd);
  388. /* De-alloc circular buffer of TFDs */
  389. if (txq->q.n_bd) {
  390. dma_free_coherent(dev, sizeof(struct iwl_tfd) *
  391. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  392. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  393. }
  394. kfree(txq->entries);
  395. txq->entries = NULL;
  396. del_timer_sync(&txq->stuck_timer);
  397. /* 0-fill queue descriptor structure */
  398. memset(txq, 0, sizeof(*txq));
  399. }
  400. /**
  401. * iwl_trans_tx_free - Free TXQ Context
  402. *
  403. * Destroy all TX DMA queues and structures
  404. */
  405. static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
  406. {
  407. int txq_id;
  408. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  409. /* Tx queues */
  410. if (trans_pcie->txq) {
  411. for (txq_id = 0;
  412. txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
  413. iwl_tx_queue_free(trans, txq_id);
  414. }
  415. kfree(trans_pcie->txq);
  416. trans_pcie->txq = NULL;
  417. iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
  418. iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
  419. }
  420. /**
  421. * iwl_trans_tx_alloc - allocate TX context
  422. * Allocate all Tx DMA structures and initialize them
  423. *
  424. * @param priv
  425. * @return error code
  426. */
  427. static int iwl_trans_tx_alloc(struct iwl_trans *trans)
  428. {
  429. int ret;
  430. int txq_id, slots_num;
  431. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  432. u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
  433. sizeof(struct iwlagn_scd_bc_tbl);
  434. /*It is not allowed to alloc twice, so warn when this happens.
  435. * We cannot rely on the previous allocation, so free and fail */
  436. if (WARN_ON(trans_pcie->txq)) {
  437. ret = -EINVAL;
  438. goto error;
  439. }
  440. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
  441. scd_bc_tbls_size);
  442. if (ret) {
  443. IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
  444. goto error;
  445. }
  446. /* Alloc keep-warm buffer */
  447. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
  448. if (ret) {
  449. IWL_ERR(trans, "Keep Warm allocation failed\n");
  450. goto error;
  451. }
  452. trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
  453. sizeof(struct iwl_tx_queue), GFP_KERNEL);
  454. if (!trans_pcie->txq) {
  455. IWL_ERR(trans, "Not enough memory for txq\n");
  456. ret = ENOMEM;
  457. goto error;
  458. }
  459. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  460. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  461. txq_id++) {
  462. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  463. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  464. ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
  465. slots_num, txq_id);
  466. if (ret) {
  467. IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
  468. goto error;
  469. }
  470. }
  471. return 0;
  472. error:
  473. iwl_trans_pcie_tx_free(trans);
  474. return ret;
  475. }
  476. static int iwl_tx_init(struct iwl_trans *trans)
  477. {
  478. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  479. int ret;
  480. int txq_id, slots_num;
  481. unsigned long flags;
  482. bool alloc = false;
  483. if (!trans_pcie->txq) {
  484. ret = iwl_trans_tx_alloc(trans);
  485. if (ret)
  486. goto error;
  487. alloc = true;
  488. }
  489. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  490. /* Turn off all Tx DMA fifos */
  491. iwl_write_prph(trans, SCD_TXFACT, 0);
  492. /* Tell NIC where to find the "keep warm" buffer */
  493. iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
  494. trans_pcie->kw.dma >> 4);
  495. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  496. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  497. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  498. txq_id++) {
  499. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  500. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  501. ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
  502. slots_num, txq_id);
  503. if (ret) {
  504. IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
  505. goto error;
  506. }
  507. }
  508. return 0;
  509. error:
  510. /*Upon error, free only if we allocated something */
  511. if (alloc)
  512. iwl_trans_pcie_tx_free(trans);
  513. return ret;
  514. }
  515. static void iwl_set_pwr_vmain(struct iwl_trans *trans)
  516. {
  517. /*
  518. * (for documentation purposes)
  519. * to set power to V_AUX, do:
  520. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  521. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  522. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  523. ~APMG_PS_CTRL_MSK_PWR_SRC);
  524. */
  525. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  526. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  527. ~APMG_PS_CTRL_MSK_PWR_SRC);
  528. }
  529. /* PCI registers */
  530. #define PCI_CFG_RETRY_TIMEOUT 0x041
  531. #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
  532. #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
  533. static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
  534. {
  535. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  536. int pos;
  537. u16 pci_lnk_ctl;
  538. struct pci_dev *pci_dev = trans_pcie->pci_dev;
  539. pos = pci_pcie_cap(pci_dev);
  540. pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
  541. return pci_lnk_ctl;
  542. }
  543. static void iwl_apm_config(struct iwl_trans *trans)
  544. {
  545. /*
  546. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  547. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  548. * If so (likely), disable L0S, so device moves directly L0->L1;
  549. * costs negligible amount of power savings.
  550. * If not (unlikely), enable L0S, so there is at least some
  551. * power savings, even without L1.
  552. */
  553. u16 lctl = iwl_pciexp_link_ctrl(trans);
  554. if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
  555. PCI_CFG_LINK_CTRL_VAL_L1_EN) {
  556. /* L1-ASPM enabled; disable(!) L0S */
  557. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  558. dev_printk(KERN_INFO, trans->dev,
  559. "L1 Enabled; Disabling L0S\n");
  560. } else {
  561. /* L1-ASPM disabled; enable(!) L0S */
  562. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  563. dev_printk(KERN_INFO, trans->dev,
  564. "L1 Disabled; Enabling L0S\n");
  565. }
  566. trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
  567. }
  568. /*
  569. * Start up NIC's basic functionality after it has been reset
  570. * (e.g. after platform boot, or shutdown via iwl_apm_stop())
  571. * NOTE: This does not load uCode nor start the embedded processor
  572. */
  573. static int iwl_apm_init(struct iwl_trans *trans)
  574. {
  575. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  576. int ret = 0;
  577. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  578. /*
  579. * Use "set_bit" below rather than "write", to preserve any hardware
  580. * bits already set by default after reset.
  581. */
  582. /* Disable L0S exit timer (platform NMI Work/Around) */
  583. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  584. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  585. /*
  586. * Disable L0s without affecting L1;
  587. * don't wait for ICH L0s (ICH bug W/A)
  588. */
  589. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  590. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  591. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  592. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  593. /*
  594. * Enable HAP INTA (interrupt from management bus) to
  595. * wake device's PCI Express link L1a -> L0s
  596. */
  597. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  598. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  599. iwl_apm_config(trans);
  600. /* Configure analog phase-lock-loop before activating to D0A */
  601. if (trans->cfg->base_params->pll_cfg_val)
  602. iwl_set_bit(trans, CSR_ANA_PLL_CFG,
  603. trans->cfg->base_params->pll_cfg_val);
  604. /*
  605. * Set "initialization complete" bit to move adapter from
  606. * D0U* --> D0A* (powered-up active) state.
  607. */
  608. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  609. /*
  610. * Wait for clock stabilization; once stabilized, access to
  611. * device-internal resources is supported, e.g. iwl_write_prph()
  612. * and accesses to uCode SRAM.
  613. */
  614. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  615. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  616. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  617. if (ret < 0) {
  618. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  619. goto out;
  620. }
  621. /*
  622. * Enable DMA clock and wait for it to stabilize.
  623. *
  624. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
  625. * do not disable clocks. This preserves any hardware bits already
  626. * set by default in "CLK_CTRL_REG" after reset.
  627. */
  628. iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
  629. udelay(20);
  630. /* Disable L1-Active */
  631. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  632. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  633. set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  634. out:
  635. return ret;
  636. }
  637. static int iwl_apm_stop_master(struct iwl_trans *trans)
  638. {
  639. int ret = 0;
  640. /* stop device's busmaster DMA activity */
  641. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  642. ret = iwl_poll_bit(trans, CSR_RESET,
  643. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  644. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  645. if (ret)
  646. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  647. IWL_DEBUG_INFO(trans, "stop master\n");
  648. return ret;
  649. }
  650. static void iwl_apm_stop(struct iwl_trans *trans)
  651. {
  652. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  653. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  654. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  655. /* Stop device's DMA activity */
  656. iwl_apm_stop_master(trans);
  657. /* Reset the entire device */
  658. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  659. udelay(10);
  660. /*
  661. * Clear "initialization complete" bit to move adapter from
  662. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  663. */
  664. iwl_clear_bit(trans, CSR_GP_CNTRL,
  665. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  666. }
  667. static int iwl_nic_init(struct iwl_trans *trans)
  668. {
  669. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  670. unsigned long flags;
  671. /* nic_init */
  672. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  673. iwl_apm_init(trans);
  674. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  675. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  676. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  677. iwl_set_pwr_vmain(trans);
  678. iwl_op_mode_nic_config(trans->op_mode);
  679. #ifndef CONFIG_IWLWIFI_IDI
  680. /* Allocate the RX queue, or reset if it is already allocated */
  681. iwl_rx_init(trans);
  682. #endif
  683. /* Allocate or reset and init all Tx and Command queues */
  684. if (iwl_tx_init(trans))
  685. return -ENOMEM;
  686. if (trans->cfg->base_params->shadow_reg_enable) {
  687. /* enable shadow regs in HW */
  688. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
  689. IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
  690. }
  691. return 0;
  692. }
  693. #define HW_READY_TIMEOUT (50)
  694. /* Note: returns poll_bit return value, which is >= 0 if success */
  695. static int iwl_set_hw_ready(struct iwl_trans *trans)
  696. {
  697. int ret;
  698. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  699. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  700. /* See if we got it */
  701. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  702. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  703. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  704. HW_READY_TIMEOUT);
  705. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  706. return ret;
  707. }
  708. /* Note: returns standard 0/-ERROR code */
  709. static int iwl_prepare_card_hw(struct iwl_trans *trans)
  710. {
  711. int ret;
  712. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  713. ret = iwl_set_hw_ready(trans);
  714. /* If the card is ready, exit 0 */
  715. if (ret >= 0)
  716. return 0;
  717. /* If HW is not ready, prepare the conditions to check again */
  718. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  719. CSR_HW_IF_CONFIG_REG_PREPARE);
  720. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  721. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  722. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  723. if (ret < 0)
  724. return ret;
  725. /* HW should be ready by now, check again. */
  726. ret = iwl_set_hw_ready(trans);
  727. if (ret >= 0)
  728. return 0;
  729. return ret;
  730. }
  731. /*
  732. * ucode
  733. */
  734. static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
  735. const struct fw_desc *section)
  736. {
  737. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  738. dma_addr_t phy_addr = section->p_addr;
  739. u32 byte_cnt = section->len;
  740. u32 dst_addr = section->offset;
  741. int ret;
  742. trans_pcie->ucode_write_complete = false;
  743. iwl_write_direct32(trans,
  744. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  745. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  746. iwl_write_direct32(trans,
  747. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
  748. dst_addr);
  749. iwl_write_direct32(trans,
  750. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  751. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  752. iwl_write_direct32(trans,
  753. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  754. (iwl_get_dma_hi_addr(phy_addr)
  755. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  756. iwl_write_direct32(trans,
  757. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  758. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  759. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  760. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  761. iwl_write_direct32(trans,
  762. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  763. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  764. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  765. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  766. IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
  767. section_num);
  768. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  769. trans_pcie->ucode_write_complete, 5 * HZ);
  770. if (!ret) {
  771. IWL_ERR(trans, "Could not load the [%d] uCode section\n",
  772. section_num);
  773. return -ETIMEDOUT;
  774. }
  775. return 0;
  776. }
  777. static int iwl_load_given_ucode(struct iwl_trans *trans,
  778. const struct fw_img *image)
  779. {
  780. int ret = 0;
  781. int i;
  782. for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
  783. if (!image->sec[i].p_addr)
  784. break;
  785. ret = iwl_load_section(trans, i, &image->sec[i]);
  786. if (ret)
  787. return ret;
  788. }
  789. /* Remove all resets to allow NIC to operate */
  790. iwl_write32(trans, CSR_RESET, 0);
  791. return 0;
  792. }
  793. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  794. const struct fw_img *fw)
  795. {
  796. int ret;
  797. bool hw_rfkill;
  798. /* This may fail if AMT took ownership of the device */
  799. if (iwl_prepare_card_hw(trans)) {
  800. IWL_WARN(trans, "Exit HW not ready\n");
  801. return -EIO;
  802. }
  803. iwl_enable_rfkill_int(trans);
  804. /* If platform's RF_KILL switch is NOT set to KILL */
  805. hw_rfkill = iwl_is_rfkill_set(trans);
  806. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  807. if (hw_rfkill)
  808. return -ERFKILL;
  809. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  810. ret = iwl_nic_init(trans);
  811. if (ret) {
  812. IWL_ERR(trans, "Unable to init nic\n");
  813. return ret;
  814. }
  815. /* make sure rfkill handshake bits are cleared */
  816. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  817. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  818. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  819. /* clear (again), then enable host interrupts */
  820. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  821. iwl_enable_interrupts(trans);
  822. /* really make sure rfkill handshake bits are cleared */
  823. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  824. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  825. /* Load the given image to the HW */
  826. return iwl_load_given_ucode(trans, fw);
  827. }
  828. /*
  829. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  830. * must be called under the irq lock and with MAC access
  831. */
  832. static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
  833. {
  834. struct iwl_trans_pcie __maybe_unused *trans_pcie =
  835. IWL_TRANS_GET_PCIE_TRANS(trans);
  836. lockdep_assert_held(&trans_pcie->irq_lock);
  837. iwl_write_prph(trans, SCD_TXFACT, mask);
  838. }
  839. static void iwl_tx_start(struct iwl_trans *trans)
  840. {
  841. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  842. u32 a;
  843. unsigned long flags;
  844. int i, chan;
  845. u32 reg_val;
  846. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  847. trans_pcie->scd_base_addr =
  848. iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
  849. a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  850. /* reset conext data memory */
  851. for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  852. a += 4)
  853. iwl_write_targ_mem(trans, a, 0);
  854. /* reset tx status memory */
  855. for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  856. a += 4)
  857. iwl_write_targ_mem(trans, a, 0);
  858. for (; a < trans_pcie->scd_base_addr +
  859. SCD_TRANS_TBL_OFFSET_QUEUE(
  860. trans->cfg->base_params->num_of_queues);
  861. a += 4)
  862. iwl_write_targ_mem(trans, a, 0);
  863. iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
  864. trans_pcie->scd_bc_tbls.dma >> 10);
  865. /* Enable DMA channel */
  866. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  867. iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  868. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  869. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  870. /* Update FH chicken bits */
  871. reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
  872. iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
  873. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  874. iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
  875. SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
  876. iwl_write_prph(trans, SCD_AGGR_SEL, 0);
  877. /* initiate the queues */
  878. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
  879. iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
  880. iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
  881. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  882. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  883. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  884. SCD_CONTEXT_QUEUE_OFFSET(i) +
  885. sizeof(u32),
  886. ((SCD_WIN_SIZE <<
  887. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  888. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  889. ((SCD_FRAME_LIMIT <<
  890. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  891. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  892. }
  893. iwl_write_prph(trans, SCD_INTERRUPT_MASK,
  894. IWL_MASK(0, trans->cfg->base_params->num_of_queues));
  895. /* Activate all Tx DMA/FIFO channels */
  896. iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
  897. iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
  898. /* make sure all queue are not stopped/used */
  899. memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  900. memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  901. for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
  902. int fifo = trans_pcie->setup_q_to_fifo[i];
  903. set_bit(i, trans_pcie->queue_used);
  904. iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
  905. fifo, true);
  906. }
  907. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  908. /* Enable L1-Active */
  909. iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
  910. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  911. }
  912. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
  913. {
  914. iwl_reset_ict(trans);
  915. iwl_tx_start(trans);
  916. }
  917. /**
  918. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  919. */
  920. static int iwl_trans_tx_stop(struct iwl_trans *trans)
  921. {
  922. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  923. int ch, txq_id, ret;
  924. unsigned long flags;
  925. /* Turn off all Tx DMA fifos */
  926. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  927. iwl_trans_txq_set_sched(trans, 0);
  928. /* Stop each Tx DMA channel, and wait for it to be idle */
  929. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  930. iwl_write_direct32(trans,
  931. FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  932. ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
  933. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
  934. if (ret < 0)
  935. IWL_ERR(trans,
  936. "Failing on timeout while stopping DMA channel %d [0x%08x]",
  937. ch,
  938. iwl_read_direct32(trans,
  939. FH_TSSR_TX_STATUS_REG));
  940. }
  941. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  942. if (!trans_pcie->txq) {
  943. IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
  944. return 0;
  945. }
  946. /* Unmap DMA from host system and free skb's */
  947. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  948. txq_id++)
  949. iwl_tx_queue_unmap(trans, txq_id);
  950. return 0;
  951. }
  952. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
  953. {
  954. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  955. unsigned long flags;
  956. /* tell the device to stop sending interrupts */
  957. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  958. iwl_disable_interrupts(trans);
  959. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  960. /* device going down, Stop using ICT table */
  961. iwl_disable_ict(trans);
  962. /*
  963. * If a HW restart happens during firmware loading,
  964. * then the firmware loading might call this function
  965. * and later it might be called again due to the
  966. * restart. So don't process again if the device is
  967. * already dead.
  968. */
  969. if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
  970. iwl_trans_tx_stop(trans);
  971. #ifndef CONFIG_IWLWIFI_IDI
  972. iwl_trans_rx_stop(trans);
  973. #endif
  974. /* Power-down device's busmaster DMA clocks */
  975. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  976. APMG_CLK_VAL_DMA_CLK_RQT);
  977. udelay(5);
  978. }
  979. /* Make sure (redundant) we've released our request to stay awake */
  980. iwl_clear_bit(trans, CSR_GP_CNTRL,
  981. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  982. /* Stop the device, and put it in low power state */
  983. iwl_apm_stop(trans);
  984. /* Upon stop, the APM issues an interrupt if HW RF kill is set.
  985. * Clean again the interrupt here
  986. */
  987. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  988. iwl_disable_interrupts(trans);
  989. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  990. iwl_enable_rfkill_int(trans);
  991. /* wait to make sure we flush pending tasklet*/
  992. synchronize_irq(trans_pcie->irq);
  993. tasklet_kill(&trans_pcie->irq_tasklet);
  994. cancel_work_sync(&trans_pcie->rx_replenish);
  995. /* stop and reset the on-board processor */
  996. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  997. /* clear all status bits */
  998. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  999. clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
  1000. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  1001. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1002. }
  1003. static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
  1004. {
  1005. /* let the ucode operate on its own */
  1006. iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
  1007. CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
  1008. iwl_disable_interrupts(trans);
  1009. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1010. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1011. }
  1012. static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
  1013. struct iwl_device_cmd *dev_cmd, int txq_id)
  1014. {
  1015. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1016. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1017. struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  1018. struct iwl_cmd_meta *out_meta;
  1019. struct iwl_tx_queue *txq;
  1020. struct iwl_queue *q;
  1021. dma_addr_t phys_addr = 0;
  1022. dma_addr_t txcmd_phys;
  1023. dma_addr_t scratch_phys;
  1024. u16 len, firstlen, secondlen;
  1025. u8 wait_write_ptr = 0;
  1026. __le16 fc = hdr->frame_control;
  1027. u8 hdr_len = ieee80211_hdrlen(fc);
  1028. u16 __maybe_unused wifi_seq;
  1029. txq = &trans_pcie->txq[txq_id];
  1030. q = &txq->q;
  1031. if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
  1032. WARN_ON_ONCE(1);
  1033. return -EINVAL;
  1034. }
  1035. spin_lock(&txq->lock);
  1036. /* Set up driver data for this TFD */
  1037. txq->entries[q->write_ptr].skb = skb;
  1038. txq->entries[q->write_ptr].cmd = dev_cmd;
  1039. dev_cmd->hdr.cmd = REPLY_TX;
  1040. dev_cmd->hdr.sequence =
  1041. cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  1042. INDEX_TO_SEQ(q->write_ptr)));
  1043. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  1044. out_meta = &txq->entries[q->write_ptr].meta;
  1045. /*
  1046. * Use the first empty entry in this queue's command buffer array
  1047. * to contain the Tx command and MAC header concatenated together
  1048. * (payload data will be in another buffer).
  1049. * Size of this varies, due to varying MAC header length.
  1050. * If end is not dword aligned, we'll have 2 extra bytes at the end
  1051. * of the MAC header (device reads on dword boundaries).
  1052. * We'll tell device about this padding later.
  1053. */
  1054. len = sizeof(struct iwl_tx_cmd) +
  1055. sizeof(struct iwl_cmd_header) + hdr_len;
  1056. firstlen = (len + 3) & ~3;
  1057. /* Tell NIC about any 2-byte padding after MAC header */
  1058. if (firstlen != len)
  1059. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  1060. /* Physical address of this Tx command's header (not MAC header!),
  1061. * within command buffer array. */
  1062. txcmd_phys = dma_map_single(trans->dev,
  1063. &dev_cmd->hdr, firstlen,
  1064. DMA_BIDIRECTIONAL);
  1065. if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
  1066. goto out_err;
  1067. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  1068. dma_unmap_len_set(out_meta, len, firstlen);
  1069. if (!ieee80211_has_morefrags(fc)) {
  1070. txq->need_update = 1;
  1071. } else {
  1072. wait_write_ptr = 1;
  1073. txq->need_update = 0;
  1074. }
  1075. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  1076. * if any (802.11 null frames have no payload). */
  1077. secondlen = skb->len - hdr_len;
  1078. if (secondlen > 0) {
  1079. phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
  1080. secondlen, DMA_TO_DEVICE);
  1081. if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
  1082. dma_unmap_single(trans->dev,
  1083. dma_unmap_addr(out_meta, mapping),
  1084. dma_unmap_len(out_meta, len),
  1085. DMA_BIDIRECTIONAL);
  1086. goto out_err;
  1087. }
  1088. }
  1089. /* Attach buffers to TFD */
  1090. iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
  1091. if (secondlen > 0)
  1092. iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
  1093. secondlen, 0);
  1094. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  1095. offsetof(struct iwl_tx_cmd, scratch);
  1096. /* take back ownership of DMA buffer to enable update */
  1097. dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
  1098. DMA_BIDIRECTIONAL);
  1099. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  1100. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  1101. IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
  1102. le16_to_cpu(dev_cmd->hdr.sequence));
  1103. IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  1104. /* Set up entry for this TFD in Tx byte-count array */
  1105. iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
  1106. dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
  1107. DMA_BIDIRECTIONAL);
  1108. trace_iwlwifi_dev_tx(trans->dev,
  1109. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  1110. sizeof(struct iwl_tfd),
  1111. &dev_cmd->hdr, firstlen,
  1112. skb->data + hdr_len, secondlen);
  1113. /* start timer if queue currently empty */
  1114. if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
  1115. mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
  1116. /* Tell device the write index *just past* this latest filled TFD */
  1117. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  1118. iwl_txq_update_write_ptr(trans, txq);
  1119. /*
  1120. * At this point the frame is "transmitted" successfully
  1121. * and we will get a TX status notification eventually,
  1122. * regardless of the value of ret. "ret" only indicates
  1123. * whether or not we should update the write pointer.
  1124. */
  1125. if (iwl_queue_space(q) < q->high_mark) {
  1126. if (wait_write_ptr) {
  1127. txq->need_update = 1;
  1128. iwl_txq_update_write_ptr(trans, txq);
  1129. } else {
  1130. iwl_stop_queue(trans, txq);
  1131. }
  1132. }
  1133. spin_unlock(&txq->lock);
  1134. return 0;
  1135. out_err:
  1136. spin_unlock(&txq->lock);
  1137. return -1;
  1138. }
  1139. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
  1140. {
  1141. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1142. int err;
  1143. bool hw_rfkill;
  1144. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  1145. if (!trans_pcie->irq_requested) {
  1146. tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
  1147. iwl_irq_tasklet, (unsigned long)trans);
  1148. iwl_alloc_isr_ict(trans);
  1149. err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
  1150. DRV_NAME, trans);
  1151. if (err) {
  1152. IWL_ERR(trans, "Error allocating IRQ %d\n",
  1153. trans_pcie->irq);
  1154. goto error;
  1155. }
  1156. INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
  1157. trans_pcie->irq_requested = true;
  1158. }
  1159. err = iwl_prepare_card_hw(trans);
  1160. if (err) {
  1161. IWL_ERR(trans, "Error while preparing HW: %d", err);
  1162. goto err_free_irq;
  1163. }
  1164. iwl_apm_init(trans);
  1165. /* From now on, the op_mode will be kept updated about RF kill state */
  1166. iwl_enable_rfkill_int(trans);
  1167. hw_rfkill = iwl_is_rfkill_set(trans);
  1168. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1169. return err;
  1170. err_free_irq:
  1171. free_irq(trans_pcie->irq, trans);
  1172. error:
  1173. iwl_free_isr_ict(trans);
  1174. tasklet_kill(&trans_pcie->irq_tasklet);
  1175. return err;
  1176. }
  1177. static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
  1178. bool op_mode_leaving)
  1179. {
  1180. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1181. bool hw_rfkill;
  1182. unsigned long flags;
  1183. iwl_apm_stop(trans);
  1184. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1185. iwl_disable_interrupts(trans);
  1186. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1187. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1188. if (!op_mode_leaving) {
  1189. /*
  1190. * Even if we stop the HW, we still want the RF kill
  1191. * interrupt
  1192. */
  1193. iwl_enable_rfkill_int(trans);
  1194. /*
  1195. * Check again since the RF kill state may have changed while
  1196. * all the interrupts were disabled, in this case we couldn't
  1197. * receive the RF kill interrupt and update the state in the
  1198. * op_mode.
  1199. */
  1200. hw_rfkill = iwl_is_rfkill_set(trans);
  1201. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1202. }
  1203. }
  1204. static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
  1205. struct sk_buff_head *skbs)
  1206. {
  1207. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1208. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  1209. /* n_bd is usually 256 => n_bd - 1 = 0xff */
  1210. int tfd_num = ssn & (txq->q.n_bd - 1);
  1211. int freed = 0;
  1212. spin_lock(&txq->lock);
  1213. if (txq->q.read_ptr != tfd_num) {
  1214. IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
  1215. txq_id, txq->q.read_ptr, tfd_num, ssn);
  1216. freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
  1217. if (iwl_queue_space(&txq->q) > txq->q.low_mark)
  1218. iwl_wake_queue(trans, txq);
  1219. }
  1220. spin_unlock(&txq->lock);
  1221. }
  1222. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  1223. {
  1224. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1225. }
  1226. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  1227. {
  1228. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1229. }
  1230. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  1231. {
  1232. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1233. }
  1234. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  1235. const struct iwl_trans_config *trans_cfg)
  1236. {
  1237. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1238. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  1239. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  1240. trans_pcie->n_no_reclaim_cmds = 0;
  1241. else
  1242. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  1243. if (trans_pcie->n_no_reclaim_cmds)
  1244. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  1245. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  1246. trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
  1247. if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
  1248. trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
  1249. /* at least the command queue must be mapped */
  1250. WARN_ON(!trans_pcie->n_q_to_fifo);
  1251. memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
  1252. trans_pcie->n_q_to_fifo * sizeof(u8));
  1253. trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
  1254. if (trans_pcie->rx_buf_size_8k)
  1255. trans_pcie->rx_page_order = get_order(8 * 1024);
  1256. else
  1257. trans_pcie->rx_page_order = get_order(4 * 1024);
  1258. trans_pcie->wd_timeout =
  1259. msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
  1260. trans_pcie->command_names = trans_cfg->command_names;
  1261. }
  1262. void iwl_trans_pcie_free(struct iwl_trans *trans)
  1263. {
  1264. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1265. iwl_trans_pcie_tx_free(trans);
  1266. #ifndef CONFIG_IWLWIFI_IDI
  1267. iwl_trans_pcie_rx_free(trans);
  1268. #endif
  1269. if (trans_pcie->irq_requested == true) {
  1270. free_irq(trans_pcie->irq, trans);
  1271. iwl_free_isr_ict(trans);
  1272. }
  1273. pci_disable_msi(trans_pcie->pci_dev);
  1274. iounmap(trans_pcie->hw_base);
  1275. pci_release_regions(trans_pcie->pci_dev);
  1276. pci_disable_device(trans_pcie->pci_dev);
  1277. kfree(trans);
  1278. }
  1279. static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
  1280. {
  1281. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1282. if (state)
  1283. set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1284. else
  1285. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1286. }
  1287. #ifdef CONFIG_PM_SLEEP
  1288. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  1289. {
  1290. return 0;
  1291. }
  1292. static int iwl_trans_pcie_resume(struct iwl_trans *trans)
  1293. {
  1294. bool hw_rfkill;
  1295. iwl_enable_rfkill_int(trans);
  1296. hw_rfkill = iwl_is_rfkill_set(trans);
  1297. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1298. if (!hw_rfkill)
  1299. iwl_enable_interrupts(trans);
  1300. return 0;
  1301. }
  1302. #endif /* CONFIG_PM_SLEEP */
  1303. #define IWL_FLUSH_WAIT_MS 2000
  1304. static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
  1305. {
  1306. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1307. struct iwl_tx_queue *txq;
  1308. struct iwl_queue *q;
  1309. int cnt;
  1310. unsigned long now = jiffies;
  1311. int ret = 0;
  1312. /* waiting for all the tx frames complete might take a while */
  1313. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1314. if (cnt == trans_pcie->cmd_queue)
  1315. continue;
  1316. txq = &trans_pcie->txq[cnt];
  1317. q = &txq->q;
  1318. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  1319. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  1320. msleep(1);
  1321. if (q->read_ptr != q->write_ptr) {
  1322. IWL_ERR(trans, "fail to flush all tx fifo queues\n");
  1323. ret = -ETIMEDOUT;
  1324. break;
  1325. }
  1326. }
  1327. return ret;
  1328. }
  1329. static const char *get_fh_string(int cmd)
  1330. {
  1331. #define IWL_CMD(x) case x: return #x
  1332. switch (cmd) {
  1333. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  1334. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  1335. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  1336. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  1337. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  1338. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  1339. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  1340. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  1341. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  1342. default:
  1343. return "UNKNOWN";
  1344. }
  1345. #undef IWL_CMD
  1346. }
  1347. int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
  1348. {
  1349. int i;
  1350. #ifdef CONFIG_IWLWIFI_DEBUG
  1351. int pos = 0;
  1352. size_t bufsz = 0;
  1353. #endif
  1354. static const u32 fh_tbl[] = {
  1355. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  1356. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  1357. FH_RSCSR_CHNL0_WPTR,
  1358. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  1359. FH_MEM_RSSR_SHARED_CTRL_REG,
  1360. FH_MEM_RSSR_RX_STATUS_REG,
  1361. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  1362. FH_TSSR_TX_STATUS_REG,
  1363. FH_TSSR_TX_ERROR_REG
  1364. };
  1365. #ifdef CONFIG_IWLWIFI_DEBUG
  1366. if (display) {
  1367. bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  1368. *buf = kmalloc(bufsz, GFP_KERNEL);
  1369. if (!*buf)
  1370. return -ENOMEM;
  1371. pos += scnprintf(*buf + pos, bufsz - pos,
  1372. "FH register values:\n");
  1373. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1374. pos += scnprintf(*buf + pos, bufsz - pos,
  1375. " %34s: 0X%08x\n",
  1376. get_fh_string(fh_tbl[i]),
  1377. iwl_read_direct32(trans, fh_tbl[i]));
  1378. }
  1379. return pos;
  1380. }
  1381. #endif
  1382. IWL_ERR(trans, "FH register values:\n");
  1383. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1384. IWL_ERR(trans, " %34s: 0X%08x\n",
  1385. get_fh_string(fh_tbl[i]),
  1386. iwl_read_direct32(trans, fh_tbl[i]));
  1387. }
  1388. return 0;
  1389. }
  1390. static const char *get_csr_string(int cmd)
  1391. {
  1392. #define IWL_CMD(x) case x: return #x
  1393. switch (cmd) {
  1394. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1395. IWL_CMD(CSR_INT_COALESCING);
  1396. IWL_CMD(CSR_INT);
  1397. IWL_CMD(CSR_INT_MASK);
  1398. IWL_CMD(CSR_FH_INT_STATUS);
  1399. IWL_CMD(CSR_GPIO_IN);
  1400. IWL_CMD(CSR_RESET);
  1401. IWL_CMD(CSR_GP_CNTRL);
  1402. IWL_CMD(CSR_HW_REV);
  1403. IWL_CMD(CSR_EEPROM_REG);
  1404. IWL_CMD(CSR_EEPROM_GP);
  1405. IWL_CMD(CSR_OTP_GP_REG);
  1406. IWL_CMD(CSR_GIO_REG);
  1407. IWL_CMD(CSR_GP_UCODE_REG);
  1408. IWL_CMD(CSR_GP_DRIVER_REG);
  1409. IWL_CMD(CSR_UCODE_DRV_GP1);
  1410. IWL_CMD(CSR_UCODE_DRV_GP2);
  1411. IWL_CMD(CSR_LED_REG);
  1412. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  1413. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  1414. IWL_CMD(CSR_ANA_PLL_CFG);
  1415. IWL_CMD(CSR_HW_REV_WA_REG);
  1416. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  1417. default:
  1418. return "UNKNOWN";
  1419. }
  1420. #undef IWL_CMD
  1421. }
  1422. void iwl_dump_csr(struct iwl_trans *trans)
  1423. {
  1424. int i;
  1425. static const u32 csr_tbl[] = {
  1426. CSR_HW_IF_CONFIG_REG,
  1427. CSR_INT_COALESCING,
  1428. CSR_INT,
  1429. CSR_INT_MASK,
  1430. CSR_FH_INT_STATUS,
  1431. CSR_GPIO_IN,
  1432. CSR_RESET,
  1433. CSR_GP_CNTRL,
  1434. CSR_HW_REV,
  1435. CSR_EEPROM_REG,
  1436. CSR_EEPROM_GP,
  1437. CSR_OTP_GP_REG,
  1438. CSR_GIO_REG,
  1439. CSR_GP_UCODE_REG,
  1440. CSR_GP_DRIVER_REG,
  1441. CSR_UCODE_DRV_GP1,
  1442. CSR_UCODE_DRV_GP2,
  1443. CSR_LED_REG,
  1444. CSR_DRAM_INT_TBL_REG,
  1445. CSR_GIO_CHICKEN_BITS,
  1446. CSR_ANA_PLL_CFG,
  1447. CSR_HW_REV_WA_REG,
  1448. CSR_DBG_HPET_MEM_REG
  1449. };
  1450. IWL_ERR(trans, "CSR values:\n");
  1451. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  1452. "CSR_INT_PERIODIC_REG)\n");
  1453. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  1454. IWL_ERR(trans, " %25s: 0X%08x\n",
  1455. get_csr_string(csr_tbl[i]),
  1456. iwl_read32(trans, csr_tbl[i]));
  1457. }
  1458. }
  1459. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1460. /* create and remove of files */
  1461. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  1462. if (!debugfs_create_file(#name, mode, parent, trans, \
  1463. &iwl_dbgfs_##name##_ops)) \
  1464. return -ENOMEM; \
  1465. } while (0)
  1466. /* file operation */
  1467. #define DEBUGFS_READ_FUNC(name) \
  1468. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  1469. char __user *user_buf, \
  1470. size_t count, loff_t *ppos);
  1471. #define DEBUGFS_WRITE_FUNC(name) \
  1472. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  1473. const char __user *user_buf, \
  1474. size_t count, loff_t *ppos);
  1475. #define DEBUGFS_READ_FILE_OPS(name) \
  1476. DEBUGFS_READ_FUNC(name); \
  1477. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1478. .read = iwl_dbgfs_##name##_read, \
  1479. .open = simple_open, \
  1480. .llseek = generic_file_llseek, \
  1481. };
  1482. #define DEBUGFS_WRITE_FILE_OPS(name) \
  1483. DEBUGFS_WRITE_FUNC(name); \
  1484. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1485. .write = iwl_dbgfs_##name##_write, \
  1486. .open = simple_open, \
  1487. .llseek = generic_file_llseek, \
  1488. };
  1489. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1490. DEBUGFS_READ_FUNC(name); \
  1491. DEBUGFS_WRITE_FUNC(name); \
  1492. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1493. .write = iwl_dbgfs_##name##_write, \
  1494. .read = iwl_dbgfs_##name##_read, \
  1495. .open = simple_open, \
  1496. .llseek = generic_file_llseek, \
  1497. };
  1498. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1499. char __user *user_buf,
  1500. size_t count, loff_t *ppos)
  1501. {
  1502. struct iwl_trans *trans = file->private_data;
  1503. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1504. struct iwl_tx_queue *txq;
  1505. struct iwl_queue *q;
  1506. char *buf;
  1507. int pos = 0;
  1508. int cnt;
  1509. int ret;
  1510. size_t bufsz;
  1511. bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
  1512. if (!trans_pcie->txq)
  1513. return -EAGAIN;
  1514. buf = kzalloc(bufsz, GFP_KERNEL);
  1515. if (!buf)
  1516. return -ENOMEM;
  1517. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1518. txq = &trans_pcie->txq[cnt];
  1519. q = &txq->q;
  1520. pos += scnprintf(buf + pos, bufsz - pos,
  1521. "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
  1522. cnt, q->read_ptr, q->write_ptr,
  1523. !!test_bit(cnt, trans_pcie->queue_used),
  1524. !!test_bit(cnt, trans_pcie->queue_stopped));
  1525. }
  1526. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1527. kfree(buf);
  1528. return ret;
  1529. }
  1530. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1531. char __user *user_buf,
  1532. size_t count, loff_t *ppos)
  1533. {
  1534. struct iwl_trans *trans = file->private_data;
  1535. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1536. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1537. char buf[256];
  1538. int pos = 0;
  1539. const size_t bufsz = sizeof(buf);
  1540. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  1541. rxq->read);
  1542. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  1543. rxq->write);
  1544. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  1545. rxq->free_count);
  1546. if (rxq->rb_stts) {
  1547. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  1548. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  1549. } else {
  1550. pos += scnprintf(buf + pos, bufsz - pos,
  1551. "closed_rb_num: Not Allocated\n");
  1552. }
  1553. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1554. }
  1555. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  1556. char __user *user_buf,
  1557. size_t count, loff_t *ppos)
  1558. {
  1559. struct iwl_trans *trans = file->private_data;
  1560. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1561. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1562. int pos = 0;
  1563. char *buf;
  1564. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  1565. ssize_t ret;
  1566. buf = kzalloc(bufsz, GFP_KERNEL);
  1567. if (!buf)
  1568. return -ENOMEM;
  1569. pos += scnprintf(buf + pos, bufsz - pos,
  1570. "Interrupt Statistics Report:\n");
  1571. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  1572. isr_stats->hw);
  1573. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  1574. isr_stats->sw);
  1575. if (isr_stats->sw || isr_stats->hw) {
  1576. pos += scnprintf(buf + pos, bufsz - pos,
  1577. "\tLast Restarting Code: 0x%X\n",
  1578. isr_stats->err_code);
  1579. }
  1580. #ifdef CONFIG_IWLWIFI_DEBUG
  1581. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  1582. isr_stats->sch);
  1583. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  1584. isr_stats->alive);
  1585. #endif
  1586. pos += scnprintf(buf + pos, bufsz - pos,
  1587. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  1588. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  1589. isr_stats->ctkill);
  1590. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  1591. isr_stats->wakeup);
  1592. pos += scnprintf(buf + pos, bufsz - pos,
  1593. "Rx command responses:\t\t %u\n", isr_stats->rx);
  1594. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  1595. isr_stats->tx);
  1596. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  1597. isr_stats->unhandled);
  1598. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1599. kfree(buf);
  1600. return ret;
  1601. }
  1602. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  1603. const char __user *user_buf,
  1604. size_t count, loff_t *ppos)
  1605. {
  1606. struct iwl_trans *trans = file->private_data;
  1607. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1608. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1609. char buf[8];
  1610. int buf_size;
  1611. u32 reset_flag;
  1612. memset(buf, 0, sizeof(buf));
  1613. buf_size = min(count, sizeof(buf) - 1);
  1614. if (copy_from_user(buf, user_buf, buf_size))
  1615. return -EFAULT;
  1616. if (sscanf(buf, "%x", &reset_flag) != 1)
  1617. return -EFAULT;
  1618. if (reset_flag == 0)
  1619. memset(isr_stats, 0, sizeof(*isr_stats));
  1620. return count;
  1621. }
  1622. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  1623. const char __user *user_buf,
  1624. size_t count, loff_t *ppos)
  1625. {
  1626. struct iwl_trans *trans = file->private_data;
  1627. char buf[8];
  1628. int buf_size;
  1629. int csr;
  1630. memset(buf, 0, sizeof(buf));
  1631. buf_size = min(count, sizeof(buf) - 1);
  1632. if (copy_from_user(buf, user_buf, buf_size))
  1633. return -EFAULT;
  1634. if (sscanf(buf, "%d", &csr) != 1)
  1635. return -EFAULT;
  1636. iwl_dump_csr(trans);
  1637. return count;
  1638. }
  1639. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  1640. char __user *user_buf,
  1641. size_t count, loff_t *ppos)
  1642. {
  1643. struct iwl_trans *trans = file->private_data;
  1644. char *buf;
  1645. int pos = 0;
  1646. ssize_t ret = -EFAULT;
  1647. ret = pos = iwl_dump_fh(trans, &buf, true);
  1648. if (buf) {
  1649. ret = simple_read_from_buffer(user_buf,
  1650. count, ppos, buf, pos);
  1651. kfree(buf);
  1652. }
  1653. return ret;
  1654. }
  1655. static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
  1656. const char __user *user_buf,
  1657. size_t count, loff_t *ppos)
  1658. {
  1659. struct iwl_trans *trans = file->private_data;
  1660. if (!trans->op_mode)
  1661. return -EAGAIN;
  1662. iwl_op_mode_nic_error(trans->op_mode);
  1663. return count;
  1664. }
  1665. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  1666. DEBUGFS_READ_FILE_OPS(fh_reg);
  1667. DEBUGFS_READ_FILE_OPS(rx_queue);
  1668. DEBUGFS_READ_FILE_OPS(tx_queue);
  1669. DEBUGFS_WRITE_FILE_OPS(csr);
  1670. DEBUGFS_WRITE_FILE_OPS(fw_restart);
  1671. /*
  1672. * Create the debugfs files and directories
  1673. *
  1674. */
  1675. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1676. struct dentry *dir)
  1677. {
  1678. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1679. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1680. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  1681. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  1682. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  1683. DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
  1684. return 0;
  1685. }
  1686. #else
  1687. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1688. struct dentry *dir)
  1689. {
  1690. return 0;
  1691. }
  1692. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1693. static const struct iwl_trans_ops trans_ops_pcie = {
  1694. .start_hw = iwl_trans_pcie_start_hw,
  1695. .stop_hw = iwl_trans_pcie_stop_hw,
  1696. .fw_alive = iwl_trans_pcie_fw_alive,
  1697. .start_fw = iwl_trans_pcie_start_fw,
  1698. .stop_device = iwl_trans_pcie_stop_device,
  1699. .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
  1700. .send_cmd = iwl_trans_pcie_send_cmd,
  1701. .tx = iwl_trans_pcie_tx,
  1702. .reclaim = iwl_trans_pcie_reclaim,
  1703. .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
  1704. .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
  1705. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1706. .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
  1707. #ifdef CONFIG_PM_SLEEP
  1708. .suspend = iwl_trans_pcie_suspend,
  1709. .resume = iwl_trans_pcie_resume,
  1710. #endif
  1711. .write8 = iwl_trans_pcie_write8,
  1712. .write32 = iwl_trans_pcie_write32,
  1713. .read32 = iwl_trans_pcie_read32,
  1714. .configure = iwl_trans_pcie_configure,
  1715. .set_pmi = iwl_trans_pcie_set_pmi,
  1716. };
  1717. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  1718. const struct pci_device_id *ent,
  1719. const struct iwl_cfg *cfg)
  1720. {
  1721. struct iwl_trans_pcie *trans_pcie;
  1722. struct iwl_trans *trans;
  1723. u16 pci_cmd;
  1724. int err;
  1725. trans = kzalloc(sizeof(struct iwl_trans) +
  1726. sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  1727. if (WARN_ON(!trans))
  1728. return NULL;
  1729. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1730. trans->ops = &trans_ops_pcie;
  1731. trans->cfg = cfg;
  1732. trans_pcie->trans = trans;
  1733. spin_lock_init(&trans_pcie->irq_lock);
  1734. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  1735. /* W/A - seems to solve weird behavior. We need to remove this if we
  1736. * don't want to stay in L1 all the time. This wastes a lot of power */
  1737. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1738. PCIE_LINK_STATE_CLKPM);
  1739. if (pci_enable_device(pdev)) {
  1740. err = -ENODEV;
  1741. goto out_no_pci;
  1742. }
  1743. pci_set_master(pdev);
  1744. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
  1745. if (!err)
  1746. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
  1747. if (err) {
  1748. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1749. if (!err)
  1750. err = pci_set_consistent_dma_mask(pdev,
  1751. DMA_BIT_MASK(32));
  1752. /* both attempts failed: */
  1753. if (err) {
  1754. dev_printk(KERN_ERR, &pdev->dev,
  1755. "No suitable DMA available.\n");
  1756. goto out_pci_disable_device;
  1757. }
  1758. }
  1759. err = pci_request_regions(pdev, DRV_NAME);
  1760. if (err) {
  1761. dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
  1762. goto out_pci_disable_device;
  1763. }
  1764. trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
  1765. if (!trans_pcie->hw_base) {
  1766. dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
  1767. err = -ENODEV;
  1768. goto out_pci_release_regions;
  1769. }
  1770. dev_printk(KERN_INFO, &pdev->dev,
  1771. "pci_resource_len = 0x%08llx\n",
  1772. (unsigned long long) pci_resource_len(pdev, 0));
  1773. dev_printk(KERN_INFO, &pdev->dev,
  1774. "pci_resource_base = %p\n", trans_pcie->hw_base);
  1775. dev_printk(KERN_INFO, &pdev->dev,
  1776. "HW Revision ID = 0x%X\n", pdev->revision);
  1777. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  1778. * PCI Tx retries from interfering with C3 CPU state */
  1779. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  1780. err = pci_enable_msi(pdev);
  1781. if (err)
  1782. dev_printk(KERN_ERR, &pdev->dev,
  1783. "pci_enable_msi failed(0X%x)", err);
  1784. trans->dev = &pdev->dev;
  1785. trans_pcie->irq = pdev->irq;
  1786. trans_pcie->pci_dev = pdev;
  1787. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  1788. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  1789. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  1790. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  1791. /* TODO: Move this away, not needed if not MSI */
  1792. /* enable rfkill interrupt: hw bug w/a */
  1793. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1794. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1795. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1796. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1797. }
  1798. /* Initialize the wait queue for commands */
  1799. init_waitqueue_head(&trans->wait_command_queue);
  1800. spin_lock_init(&trans->reg_lock);
  1801. return trans;
  1802. out_pci_release_regions:
  1803. pci_release_regions(pdev);
  1804. out_pci_disable_device:
  1805. pci_disable_device(pdev);
  1806. out_no_pci:
  1807. kfree(trans);
  1808. return NULL;
  1809. }