iwl-trans-pcie.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/pci.h>
  64. #include <linux/pci-aspm.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/debugfs.h>
  67. #include <linux/sched.h>
  68. #include <linux/bitops.h>
  69. #include <linux/gfp.h>
  70. #include "iwl-trans.h"
  71. #include "iwl-trans-pcie-int.h"
  72. #include "iwl-csr.h"
  73. #include "iwl-prph.h"
  74. #include "iwl-eeprom.h"
  75. #include "iwl-agn-hw.h"
  76. #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
  77. #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
  78. (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
  79. (~(1<<(trans_pcie)->cmd_queue)))
  80. static int iwl_trans_rx_alloc(struct iwl_trans *trans)
  81. {
  82. struct iwl_trans_pcie *trans_pcie =
  83. IWL_TRANS_GET_PCIE_TRANS(trans);
  84. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  85. struct device *dev = trans->dev;
  86. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  87. spin_lock_init(&rxq->lock);
  88. if (WARN_ON(rxq->bd || rxq->rb_stts))
  89. return -EINVAL;
  90. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  91. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  92. &rxq->bd_dma, GFP_KERNEL);
  93. if (!rxq->bd)
  94. goto err_bd;
  95. /*Allocate the driver's pointer to receive buffer status */
  96. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  97. &rxq->rb_stts_dma, GFP_KERNEL);
  98. if (!rxq->rb_stts)
  99. goto err_rb_stts;
  100. return 0;
  101. err_rb_stts:
  102. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  103. rxq->bd, rxq->bd_dma);
  104. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  105. rxq->bd = NULL;
  106. err_bd:
  107. return -ENOMEM;
  108. }
  109. static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
  110. {
  111. struct iwl_trans_pcie *trans_pcie =
  112. IWL_TRANS_GET_PCIE_TRANS(trans);
  113. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  114. int i;
  115. /* Fill the rx_used queue with _all_ of the Rx buffers */
  116. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  117. /* In the reset function, these buffers may have been allocated
  118. * to an SKB, so we need to unmap and free potential storage */
  119. if (rxq->pool[i].page != NULL) {
  120. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  121. PAGE_SIZE << trans_pcie->rx_page_order,
  122. DMA_FROM_DEVICE);
  123. __free_pages(rxq->pool[i].page,
  124. trans_pcie->rx_page_order);
  125. rxq->pool[i].page = NULL;
  126. }
  127. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  128. }
  129. }
  130. static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
  131. struct iwl_rx_queue *rxq)
  132. {
  133. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  134. u32 rb_size;
  135. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  136. u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
  137. if (trans_pcie->rx_buf_size_8k)
  138. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  139. else
  140. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  141. /* Stop Rx DMA */
  142. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  143. /* Reset driver's Rx queue write index */
  144. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  145. /* Tell device where to find RBD circular buffer in DRAM */
  146. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  147. (u32)(rxq->bd_dma >> 8));
  148. /* Tell device where in DRAM to update its Rx status */
  149. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  150. rxq->rb_stts_dma >> 4);
  151. /* Enable Rx DMA
  152. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  153. * the credit mechanism in 5000 HW RX FIFO
  154. * Direct rx interrupts to hosts
  155. * Rx buffer size 4 or 8k
  156. * RB timeout 0x10
  157. * 256 RBDs
  158. */
  159. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  160. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  161. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  162. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  163. rb_size|
  164. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  165. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  166. /* Set interrupt coalescing timer to default (2048 usecs) */
  167. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  168. }
  169. static int iwl_rx_init(struct iwl_trans *trans)
  170. {
  171. struct iwl_trans_pcie *trans_pcie =
  172. IWL_TRANS_GET_PCIE_TRANS(trans);
  173. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  174. int i, err;
  175. unsigned long flags;
  176. if (!rxq->bd) {
  177. err = iwl_trans_rx_alloc(trans);
  178. if (err)
  179. return err;
  180. }
  181. spin_lock_irqsave(&rxq->lock, flags);
  182. INIT_LIST_HEAD(&rxq->rx_free);
  183. INIT_LIST_HEAD(&rxq->rx_used);
  184. iwl_trans_rxq_free_rx_bufs(trans);
  185. for (i = 0; i < RX_QUEUE_SIZE; i++)
  186. rxq->queue[i] = NULL;
  187. /* Set us so that we have processed and used all buffers, but have
  188. * not restocked the Rx queue with fresh buffers */
  189. rxq->read = rxq->write = 0;
  190. rxq->write_actual = 0;
  191. rxq->free_count = 0;
  192. spin_unlock_irqrestore(&rxq->lock, flags);
  193. iwlagn_rx_replenish(trans);
  194. iwl_trans_rx_hw_init(trans, rxq);
  195. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  196. rxq->need_update = 1;
  197. iwl_rx_queue_update_write_ptr(trans, rxq);
  198. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  199. return 0;
  200. }
  201. static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
  202. {
  203. struct iwl_trans_pcie *trans_pcie =
  204. IWL_TRANS_GET_PCIE_TRANS(trans);
  205. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  206. unsigned long flags;
  207. /*if rxq->bd is NULL, it means that nothing has been allocated,
  208. * exit now */
  209. if (!rxq->bd) {
  210. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  211. return;
  212. }
  213. spin_lock_irqsave(&rxq->lock, flags);
  214. iwl_trans_rxq_free_rx_bufs(trans);
  215. spin_unlock_irqrestore(&rxq->lock, flags);
  216. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  217. rxq->bd, rxq->bd_dma);
  218. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  219. rxq->bd = NULL;
  220. if (rxq->rb_stts)
  221. dma_free_coherent(trans->dev,
  222. sizeof(struct iwl_rb_status),
  223. rxq->rb_stts, rxq->rb_stts_dma);
  224. else
  225. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  226. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  227. rxq->rb_stts = NULL;
  228. }
  229. static int iwl_trans_rx_stop(struct iwl_trans *trans)
  230. {
  231. /* stop Rx DMA */
  232. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  233. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  234. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  235. }
  236. static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
  237. struct iwl_dma_ptr *ptr, size_t size)
  238. {
  239. if (WARN_ON(ptr->addr))
  240. return -EINVAL;
  241. ptr->addr = dma_alloc_coherent(trans->dev, size,
  242. &ptr->dma, GFP_KERNEL);
  243. if (!ptr->addr)
  244. return -ENOMEM;
  245. ptr->size = size;
  246. return 0;
  247. }
  248. static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
  249. struct iwl_dma_ptr *ptr)
  250. {
  251. if (unlikely(!ptr->addr))
  252. return;
  253. dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
  254. memset(ptr, 0, sizeof(*ptr));
  255. }
  256. static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
  257. {
  258. struct iwl_tx_queue *txq = (void *)data;
  259. struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
  260. struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
  261. spin_lock(&txq->lock);
  262. /* check if triggered erroneously */
  263. if (txq->q.read_ptr == txq->q.write_ptr) {
  264. spin_unlock(&txq->lock);
  265. return;
  266. }
  267. spin_unlock(&txq->lock);
  268. IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
  269. jiffies_to_msecs(trans_pcie->wd_timeout));
  270. IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
  271. txq->q.read_ptr, txq->q.write_ptr);
  272. IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
  273. iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
  274. & (TFD_QUEUE_SIZE_MAX - 1),
  275. iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
  276. iwl_op_mode_nic_error(trans->op_mode);
  277. }
  278. static int iwl_trans_txq_alloc(struct iwl_trans *trans,
  279. struct iwl_tx_queue *txq, int slots_num,
  280. u32 txq_id)
  281. {
  282. size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
  283. int i;
  284. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  285. if (WARN_ON(txq->entries || txq->tfds))
  286. return -EINVAL;
  287. setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
  288. (unsigned long)txq);
  289. txq->trans_pcie = trans_pcie;
  290. txq->q.n_window = slots_num;
  291. txq->entries = kcalloc(slots_num,
  292. sizeof(struct iwl_pcie_tx_queue_entry),
  293. GFP_KERNEL);
  294. if (!txq->entries)
  295. goto error;
  296. if (txq_id == trans_pcie->cmd_queue)
  297. for (i = 0; i < slots_num; i++) {
  298. txq->entries[i].cmd =
  299. kmalloc(sizeof(struct iwl_device_cmd),
  300. GFP_KERNEL);
  301. if (!txq->entries[i].cmd)
  302. goto error;
  303. }
  304. /* Circular buffer of transmit frame descriptors (TFDs),
  305. * shared with device */
  306. txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
  307. &txq->q.dma_addr, GFP_KERNEL);
  308. if (!txq->tfds) {
  309. IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  310. goto error;
  311. }
  312. txq->q.id = txq_id;
  313. return 0;
  314. error:
  315. if (txq->entries && txq_id == trans_pcie->cmd_queue)
  316. for (i = 0; i < slots_num; i++)
  317. kfree(txq->entries[i].cmd);
  318. kfree(txq->entries);
  319. txq->entries = NULL;
  320. return -ENOMEM;
  321. }
  322. static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
  323. int slots_num, u32 txq_id)
  324. {
  325. int ret;
  326. txq->need_update = 0;
  327. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  328. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  329. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  330. /* Initialize queue's high/low-water marks, and head/tail indexes */
  331. ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  332. txq_id);
  333. if (ret)
  334. return ret;
  335. spin_lock_init(&txq->lock);
  336. /*
  337. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  338. * given Tx queue, and enable the DMA channel used for that queue.
  339. * Circular buffer (TFD queue in DRAM) physical base address */
  340. iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
  341. txq->q.dma_addr >> 8);
  342. return 0;
  343. }
  344. /**
  345. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  346. */
  347. static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
  348. {
  349. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  350. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  351. struct iwl_queue *q = &txq->q;
  352. enum dma_data_direction dma_dir;
  353. if (!q->n_bd)
  354. return;
  355. /* In the command queue, all the TBs are mapped as BIDI
  356. * so unmap them as such.
  357. */
  358. if (txq_id == trans_pcie->cmd_queue)
  359. dma_dir = DMA_BIDIRECTIONAL;
  360. else
  361. dma_dir = DMA_TO_DEVICE;
  362. spin_lock_bh(&txq->lock);
  363. while (q->write_ptr != q->read_ptr) {
  364. /* The read_ptr needs to bound by q->n_window */
  365. iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
  366. dma_dir);
  367. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  368. }
  369. spin_unlock_bh(&txq->lock);
  370. }
  371. /**
  372. * iwl_tx_queue_free - Deallocate DMA queue.
  373. * @txq: Transmit queue to deallocate.
  374. *
  375. * Empty queue by removing and destroying all BD's.
  376. * Free all buffers.
  377. * 0-fill, but do not free "txq" descriptor structure.
  378. */
  379. static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
  380. {
  381. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  382. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  383. struct device *dev = trans->dev;
  384. int i;
  385. if (WARN_ON(!txq))
  386. return;
  387. iwl_tx_queue_unmap(trans, txq_id);
  388. /* De-alloc array of command/tx buffers */
  389. if (txq_id == trans_pcie->cmd_queue)
  390. for (i = 0; i < txq->q.n_window; i++)
  391. kfree(txq->entries[i].cmd);
  392. /* De-alloc circular buffer of TFDs */
  393. if (txq->q.n_bd) {
  394. dma_free_coherent(dev, sizeof(struct iwl_tfd) *
  395. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  396. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  397. }
  398. kfree(txq->entries);
  399. txq->entries = NULL;
  400. del_timer_sync(&txq->stuck_timer);
  401. /* 0-fill queue descriptor structure */
  402. memset(txq, 0, sizeof(*txq));
  403. }
  404. /**
  405. * iwl_trans_tx_free - Free TXQ Context
  406. *
  407. * Destroy all TX DMA queues and structures
  408. */
  409. static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
  410. {
  411. int txq_id;
  412. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  413. /* Tx queues */
  414. if (trans_pcie->txq) {
  415. for (txq_id = 0;
  416. txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
  417. iwl_tx_queue_free(trans, txq_id);
  418. }
  419. kfree(trans_pcie->txq);
  420. trans_pcie->txq = NULL;
  421. iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
  422. iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
  423. }
  424. /**
  425. * iwl_trans_tx_alloc - allocate TX context
  426. * Allocate all Tx DMA structures and initialize them
  427. *
  428. * @param priv
  429. * @return error code
  430. */
  431. static int iwl_trans_tx_alloc(struct iwl_trans *trans)
  432. {
  433. int ret;
  434. int txq_id, slots_num;
  435. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  436. u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
  437. sizeof(struct iwlagn_scd_bc_tbl);
  438. /*It is not allowed to alloc twice, so warn when this happens.
  439. * We cannot rely on the previous allocation, so free and fail */
  440. if (WARN_ON(trans_pcie->txq)) {
  441. ret = -EINVAL;
  442. goto error;
  443. }
  444. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
  445. scd_bc_tbls_size);
  446. if (ret) {
  447. IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
  448. goto error;
  449. }
  450. /* Alloc keep-warm buffer */
  451. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
  452. if (ret) {
  453. IWL_ERR(trans, "Keep Warm allocation failed\n");
  454. goto error;
  455. }
  456. trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
  457. sizeof(struct iwl_tx_queue), GFP_KERNEL);
  458. if (!trans_pcie->txq) {
  459. IWL_ERR(trans, "Not enough memory for txq\n");
  460. ret = ENOMEM;
  461. goto error;
  462. }
  463. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  464. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  465. txq_id++) {
  466. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  467. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  468. ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
  469. slots_num, txq_id);
  470. if (ret) {
  471. IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
  472. goto error;
  473. }
  474. }
  475. return 0;
  476. error:
  477. iwl_trans_pcie_tx_free(trans);
  478. return ret;
  479. }
  480. static int iwl_tx_init(struct iwl_trans *trans)
  481. {
  482. int ret;
  483. int txq_id, slots_num;
  484. unsigned long flags;
  485. bool alloc = false;
  486. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  487. if (!trans_pcie->txq) {
  488. ret = iwl_trans_tx_alloc(trans);
  489. if (ret)
  490. goto error;
  491. alloc = true;
  492. }
  493. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  494. /* Turn off all Tx DMA fifos */
  495. iwl_write_prph(trans, SCD_TXFACT, 0);
  496. /* Tell NIC where to find the "keep warm" buffer */
  497. iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
  498. trans_pcie->kw.dma >> 4);
  499. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  500. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  501. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  502. txq_id++) {
  503. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  504. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  505. ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
  506. slots_num, txq_id);
  507. if (ret) {
  508. IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
  509. goto error;
  510. }
  511. }
  512. return 0;
  513. error:
  514. /*Upon error, free only if we allocated something */
  515. if (alloc)
  516. iwl_trans_pcie_tx_free(trans);
  517. return ret;
  518. }
  519. static void iwl_set_pwr_vmain(struct iwl_trans *trans)
  520. {
  521. /*
  522. * (for documentation purposes)
  523. * to set power to V_AUX, do:
  524. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  525. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  526. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  527. ~APMG_PS_CTRL_MSK_PWR_SRC);
  528. */
  529. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  530. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  531. ~APMG_PS_CTRL_MSK_PWR_SRC);
  532. }
  533. /* PCI registers */
  534. #define PCI_CFG_RETRY_TIMEOUT 0x041
  535. #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
  536. #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
  537. static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
  538. {
  539. int pos;
  540. u16 pci_lnk_ctl;
  541. struct iwl_trans_pcie *trans_pcie =
  542. IWL_TRANS_GET_PCIE_TRANS(trans);
  543. struct pci_dev *pci_dev = trans_pcie->pci_dev;
  544. pos = pci_pcie_cap(pci_dev);
  545. pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
  546. return pci_lnk_ctl;
  547. }
  548. static void iwl_apm_config(struct iwl_trans *trans)
  549. {
  550. /*
  551. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  552. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  553. * If so (likely), disable L0S, so device moves directly L0->L1;
  554. * costs negligible amount of power savings.
  555. * If not (unlikely), enable L0S, so there is at least some
  556. * power savings, even without L1.
  557. */
  558. u16 lctl = iwl_pciexp_link_ctrl(trans);
  559. if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
  560. PCI_CFG_LINK_CTRL_VAL_L1_EN) {
  561. /* L1-ASPM enabled; disable(!) L0S */
  562. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  563. dev_printk(KERN_INFO, trans->dev,
  564. "L1 Enabled; Disabling L0S\n");
  565. } else {
  566. /* L1-ASPM disabled; enable(!) L0S */
  567. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  568. dev_printk(KERN_INFO, trans->dev,
  569. "L1 Disabled; Enabling L0S\n");
  570. }
  571. trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
  572. }
  573. /*
  574. * Start up NIC's basic functionality after it has been reset
  575. * (e.g. after platform boot, or shutdown via iwl_apm_stop())
  576. * NOTE: This does not load uCode nor start the embedded processor
  577. */
  578. static int iwl_apm_init(struct iwl_trans *trans)
  579. {
  580. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  581. int ret = 0;
  582. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  583. /*
  584. * Use "set_bit" below rather than "write", to preserve any hardware
  585. * bits already set by default after reset.
  586. */
  587. /* Disable L0S exit timer (platform NMI Work/Around) */
  588. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  589. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  590. /*
  591. * Disable L0s without affecting L1;
  592. * don't wait for ICH L0s (ICH bug W/A)
  593. */
  594. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  595. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  596. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  597. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  598. /*
  599. * Enable HAP INTA (interrupt from management bus) to
  600. * wake device's PCI Express link L1a -> L0s
  601. */
  602. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  603. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  604. iwl_apm_config(trans);
  605. /* Configure analog phase-lock-loop before activating to D0A */
  606. if (trans->cfg->base_params->pll_cfg_val)
  607. iwl_set_bit(trans, CSR_ANA_PLL_CFG,
  608. trans->cfg->base_params->pll_cfg_val);
  609. /*
  610. * Set "initialization complete" bit to move adapter from
  611. * D0U* --> D0A* (powered-up active) state.
  612. */
  613. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  614. /*
  615. * Wait for clock stabilization; once stabilized, access to
  616. * device-internal resources is supported, e.g. iwl_write_prph()
  617. * and accesses to uCode SRAM.
  618. */
  619. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  620. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  621. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  622. if (ret < 0) {
  623. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  624. goto out;
  625. }
  626. /*
  627. * Enable DMA clock and wait for it to stabilize.
  628. *
  629. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
  630. * do not disable clocks. This preserves any hardware bits already
  631. * set by default in "CLK_CTRL_REG" after reset.
  632. */
  633. iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
  634. udelay(20);
  635. /* Disable L1-Active */
  636. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  637. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  638. set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  639. out:
  640. return ret;
  641. }
  642. static int iwl_apm_stop_master(struct iwl_trans *trans)
  643. {
  644. int ret = 0;
  645. /* stop device's busmaster DMA activity */
  646. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  647. ret = iwl_poll_bit(trans, CSR_RESET,
  648. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  649. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  650. if (ret)
  651. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  652. IWL_DEBUG_INFO(trans, "stop master\n");
  653. return ret;
  654. }
  655. static void iwl_apm_stop(struct iwl_trans *trans)
  656. {
  657. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  658. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  659. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  660. /* Stop device's DMA activity */
  661. iwl_apm_stop_master(trans);
  662. /* Reset the entire device */
  663. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  664. udelay(10);
  665. /*
  666. * Clear "initialization complete" bit to move adapter from
  667. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  668. */
  669. iwl_clear_bit(trans, CSR_GP_CNTRL,
  670. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  671. }
  672. static int iwl_nic_init(struct iwl_trans *trans)
  673. {
  674. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  675. unsigned long flags;
  676. /* nic_init */
  677. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  678. iwl_apm_init(trans);
  679. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  680. iwl_write8(trans, CSR_INT_COALESCING,
  681. IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  682. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  683. iwl_set_pwr_vmain(trans);
  684. iwl_op_mode_nic_config(trans->op_mode);
  685. #ifndef CONFIG_IWLWIFI_IDI
  686. /* Allocate the RX queue, or reset if it is already allocated */
  687. iwl_rx_init(trans);
  688. #endif
  689. /* Allocate or reset and init all Tx and Command queues */
  690. if (iwl_tx_init(trans))
  691. return -ENOMEM;
  692. if (trans->cfg->base_params->shadow_reg_enable) {
  693. /* enable shadow regs in HW */
  694. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
  695. 0x800FFFFF);
  696. }
  697. return 0;
  698. }
  699. #define HW_READY_TIMEOUT (50)
  700. /* Note: returns poll_bit return value, which is >= 0 if success */
  701. static int iwl_set_hw_ready(struct iwl_trans *trans)
  702. {
  703. int ret;
  704. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  705. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  706. /* See if we got it */
  707. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  708. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  709. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  710. HW_READY_TIMEOUT);
  711. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  712. return ret;
  713. }
  714. /* Note: returns standard 0/-ERROR code */
  715. static int iwl_prepare_card_hw(struct iwl_trans *trans)
  716. {
  717. int ret;
  718. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  719. ret = iwl_set_hw_ready(trans);
  720. /* If the card is ready, exit 0 */
  721. if (ret >= 0)
  722. return 0;
  723. /* If HW is not ready, prepare the conditions to check again */
  724. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  725. CSR_HW_IF_CONFIG_REG_PREPARE);
  726. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  727. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  728. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  729. if (ret < 0)
  730. return ret;
  731. /* HW should be ready by now, check again. */
  732. ret = iwl_set_hw_ready(trans);
  733. if (ret >= 0)
  734. return 0;
  735. return ret;
  736. }
  737. /*
  738. * ucode
  739. */
  740. static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
  741. const struct fw_desc *section)
  742. {
  743. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  744. dma_addr_t phy_addr = section->p_addr;
  745. u32 byte_cnt = section->len;
  746. u32 dst_addr = section->offset;
  747. int ret;
  748. trans_pcie->ucode_write_complete = false;
  749. iwl_write_direct32(trans,
  750. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  751. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  752. iwl_write_direct32(trans,
  753. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
  754. iwl_write_direct32(trans,
  755. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  756. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  757. iwl_write_direct32(trans,
  758. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  759. (iwl_get_dma_hi_addr(phy_addr)
  760. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  761. iwl_write_direct32(trans,
  762. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  763. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  764. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  765. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  766. iwl_write_direct32(trans,
  767. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  768. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  769. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  770. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  771. IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
  772. section_num);
  773. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  774. trans_pcie->ucode_write_complete, 5 * HZ);
  775. if (!ret) {
  776. IWL_ERR(trans, "Could not load the [%d] uCode section\n",
  777. section_num);
  778. return -ETIMEDOUT;
  779. }
  780. return 0;
  781. }
  782. static int iwl_load_given_ucode(struct iwl_trans *trans,
  783. const struct fw_img *image)
  784. {
  785. int ret = 0;
  786. int i;
  787. for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
  788. if (!image->sec[i].p_addr)
  789. break;
  790. ret = iwl_load_section(trans, i, &image->sec[i]);
  791. if (ret)
  792. return ret;
  793. }
  794. /* Remove all resets to allow NIC to operate */
  795. iwl_write32(trans, CSR_RESET, 0);
  796. return 0;
  797. }
  798. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  799. const struct fw_img *fw)
  800. {
  801. int ret;
  802. bool hw_rfkill;
  803. /* This may fail if AMT took ownership of the device */
  804. if (iwl_prepare_card_hw(trans)) {
  805. IWL_WARN(trans, "Exit HW not ready\n");
  806. return -EIO;
  807. }
  808. /* If platform's RF_KILL switch is NOT set to KILL */
  809. hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
  810. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  811. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  812. if (hw_rfkill) {
  813. iwl_enable_rfkill_int(trans);
  814. return -ERFKILL;
  815. }
  816. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  817. ret = iwl_nic_init(trans);
  818. if (ret) {
  819. IWL_ERR(trans, "Unable to init nic\n");
  820. return ret;
  821. }
  822. /* make sure rfkill handshake bits are cleared */
  823. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  824. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  825. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  826. /* clear (again), then enable host interrupts */
  827. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  828. iwl_enable_interrupts(trans);
  829. /* really make sure rfkill handshake bits are cleared */
  830. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  831. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  832. /* Load the given image to the HW */
  833. return iwl_load_given_ucode(trans, fw);
  834. }
  835. /*
  836. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  837. * must be called under the irq lock and with MAC access
  838. */
  839. static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
  840. {
  841. struct iwl_trans_pcie __maybe_unused *trans_pcie =
  842. IWL_TRANS_GET_PCIE_TRANS(trans);
  843. lockdep_assert_held(&trans_pcie->irq_lock);
  844. iwl_write_prph(trans, SCD_TXFACT, mask);
  845. }
  846. static void iwl_tx_start(struct iwl_trans *trans)
  847. {
  848. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  849. u32 a;
  850. unsigned long flags;
  851. int i, chan;
  852. u32 reg_val;
  853. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  854. trans_pcie->scd_base_addr =
  855. iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
  856. a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  857. /* reset conext data memory */
  858. for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  859. a += 4)
  860. iwl_write_targ_mem(trans, a, 0);
  861. /* reset tx status memory */
  862. for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  863. a += 4)
  864. iwl_write_targ_mem(trans, a, 0);
  865. for (; a < trans_pcie->scd_base_addr +
  866. SCD_TRANS_TBL_OFFSET_QUEUE(
  867. trans->cfg->base_params->num_of_queues);
  868. a += 4)
  869. iwl_write_targ_mem(trans, a, 0);
  870. iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
  871. trans_pcie->scd_bc_tbls.dma >> 10);
  872. /* Enable DMA channel */
  873. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  874. iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  875. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  876. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  877. /* Update FH chicken bits */
  878. reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
  879. iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
  880. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  881. iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
  882. SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
  883. iwl_write_prph(trans, SCD_AGGR_SEL, 0);
  884. /* initiate the queues */
  885. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
  886. iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
  887. iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
  888. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  889. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  890. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  891. SCD_CONTEXT_QUEUE_OFFSET(i) +
  892. sizeof(u32),
  893. ((SCD_WIN_SIZE <<
  894. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  895. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  896. ((SCD_FRAME_LIMIT <<
  897. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  898. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  899. }
  900. iwl_write_prph(trans, SCD_INTERRUPT_MASK,
  901. IWL_MASK(0, trans->cfg->base_params->num_of_queues));
  902. /* Activate all Tx DMA/FIFO channels */
  903. iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
  904. iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
  905. /* make sure all queue are not stopped/used */
  906. memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  907. memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  908. for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
  909. int fifo = trans_pcie->setup_q_to_fifo[i];
  910. set_bit(i, trans_pcie->queue_used);
  911. iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
  912. fifo, true);
  913. }
  914. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  915. /* Enable L1-Active */
  916. iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
  917. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  918. }
  919. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
  920. {
  921. iwl_reset_ict(trans);
  922. iwl_tx_start(trans);
  923. }
  924. /**
  925. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  926. */
  927. static int iwl_trans_tx_stop(struct iwl_trans *trans)
  928. {
  929. int ch, txq_id, ret;
  930. unsigned long flags;
  931. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  932. /* Turn off all Tx DMA fifos */
  933. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  934. iwl_trans_txq_set_sched(trans, 0);
  935. /* Stop each Tx DMA channel, and wait for it to be idle */
  936. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  937. iwl_write_direct32(trans,
  938. FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  939. ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
  940. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  941. 1000);
  942. if (ret < 0)
  943. IWL_ERR(trans, "Failing on timeout while stopping"
  944. " DMA channel %d [0x%08x]", ch,
  945. iwl_read_direct32(trans,
  946. FH_TSSR_TX_STATUS_REG));
  947. }
  948. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  949. if (!trans_pcie->txq) {
  950. IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
  951. return 0;
  952. }
  953. /* Unmap DMA from host system and free skb's */
  954. for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
  955. txq_id++)
  956. iwl_tx_queue_unmap(trans, txq_id);
  957. return 0;
  958. }
  959. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
  960. {
  961. unsigned long flags;
  962. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  963. /* tell the device to stop sending interrupts */
  964. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  965. iwl_disable_interrupts(trans);
  966. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  967. /* device going down, Stop using ICT table */
  968. iwl_disable_ict(trans);
  969. /*
  970. * If a HW restart happens during firmware loading,
  971. * then the firmware loading might call this function
  972. * and later it might be called again due to the
  973. * restart. So don't process again if the device is
  974. * already dead.
  975. */
  976. if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
  977. iwl_trans_tx_stop(trans);
  978. #ifndef CONFIG_IWLWIFI_IDI
  979. iwl_trans_rx_stop(trans);
  980. #endif
  981. /* Power-down device's busmaster DMA clocks */
  982. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  983. APMG_CLK_VAL_DMA_CLK_RQT);
  984. udelay(5);
  985. }
  986. /* Make sure (redundant) we've released our request to stay awake */
  987. iwl_clear_bit(trans, CSR_GP_CNTRL,
  988. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  989. /* Stop the device, and put it in low power state */
  990. iwl_apm_stop(trans);
  991. /* Upon stop, the APM issues an interrupt if HW RF kill is set.
  992. * Clean again the interrupt here
  993. */
  994. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  995. iwl_disable_interrupts(trans);
  996. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  997. /* wait to make sure we flush pending tasklet*/
  998. synchronize_irq(trans_pcie->irq);
  999. tasklet_kill(&trans_pcie->irq_tasklet);
  1000. cancel_work_sync(&trans_pcie->rx_replenish);
  1001. /* stop and reset the on-board processor */
  1002. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  1003. /* clear all status bits */
  1004. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  1005. clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
  1006. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  1007. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1008. }
  1009. static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
  1010. {
  1011. /* let the ucode operate on its own */
  1012. iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
  1013. CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
  1014. iwl_disable_interrupts(trans);
  1015. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1016. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1017. }
  1018. static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
  1019. struct iwl_device_cmd *dev_cmd, int txq_id)
  1020. {
  1021. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1022. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1023. struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  1024. struct iwl_cmd_meta *out_meta;
  1025. struct iwl_tx_queue *txq;
  1026. struct iwl_queue *q;
  1027. dma_addr_t phys_addr = 0;
  1028. dma_addr_t txcmd_phys;
  1029. dma_addr_t scratch_phys;
  1030. u16 len, firstlen, secondlen;
  1031. u8 wait_write_ptr = 0;
  1032. __le16 fc = hdr->frame_control;
  1033. u8 hdr_len = ieee80211_hdrlen(fc);
  1034. u16 __maybe_unused wifi_seq;
  1035. txq = &trans_pcie->txq[txq_id];
  1036. q = &txq->q;
  1037. if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
  1038. WARN_ON_ONCE(1);
  1039. return -EINVAL;
  1040. }
  1041. spin_lock(&txq->lock);
  1042. /* Set up driver data for this TFD */
  1043. txq->entries[q->write_ptr].skb = skb;
  1044. txq->entries[q->write_ptr].cmd = dev_cmd;
  1045. dev_cmd->hdr.cmd = REPLY_TX;
  1046. dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  1047. INDEX_TO_SEQ(q->write_ptr)));
  1048. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  1049. out_meta = &txq->entries[q->write_ptr].meta;
  1050. /*
  1051. * Use the first empty entry in this queue's command buffer array
  1052. * to contain the Tx command and MAC header concatenated together
  1053. * (payload data will be in another buffer).
  1054. * Size of this varies, due to varying MAC header length.
  1055. * If end is not dword aligned, we'll have 2 extra bytes at the end
  1056. * of the MAC header (device reads on dword boundaries).
  1057. * We'll tell device about this padding later.
  1058. */
  1059. len = sizeof(struct iwl_tx_cmd) +
  1060. sizeof(struct iwl_cmd_header) + hdr_len;
  1061. firstlen = (len + 3) & ~3;
  1062. /* Tell NIC about any 2-byte padding after MAC header */
  1063. if (firstlen != len)
  1064. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  1065. /* Physical address of this Tx command's header (not MAC header!),
  1066. * within command buffer array. */
  1067. txcmd_phys = dma_map_single(trans->dev,
  1068. &dev_cmd->hdr, firstlen,
  1069. DMA_BIDIRECTIONAL);
  1070. if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
  1071. goto out_err;
  1072. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  1073. dma_unmap_len_set(out_meta, len, firstlen);
  1074. if (!ieee80211_has_morefrags(fc)) {
  1075. txq->need_update = 1;
  1076. } else {
  1077. wait_write_ptr = 1;
  1078. txq->need_update = 0;
  1079. }
  1080. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  1081. * if any (802.11 null frames have no payload). */
  1082. secondlen = skb->len - hdr_len;
  1083. if (secondlen > 0) {
  1084. phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
  1085. secondlen, DMA_TO_DEVICE);
  1086. if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
  1087. dma_unmap_single(trans->dev,
  1088. dma_unmap_addr(out_meta, mapping),
  1089. dma_unmap_len(out_meta, len),
  1090. DMA_BIDIRECTIONAL);
  1091. goto out_err;
  1092. }
  1093. }
  1094. /* Attach buffers to TFD */
  1095. iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
  1096. if (secondlen > 0)
  1097. iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
  1098. secondlen, 0);
  1099. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  1100. offsetof(struct iwl_tx_cmd, scratch);
  1101. /* take back ownership of DMA buffer to enable update */
  1102. dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
  1103. DMA_BIDIRECTIONAL);
  1104. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  1105. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  1106. IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
  1107. le16_to_cpu(dev_cmd->hdr.sequence));
  1108. IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  1109. /* Set up entry for this TFD in Tx byte-count array */
  1110. iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
  1111. dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
  1112. DMA_BIDIRECTIONAL);
  1113. trace_iwlwifi_dev_tx(trans->dev,
  1114. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  1115. sizeof(struct iwl_tfd),
  1116. &dev_cmd->hdr, firstlen,
  1117. skb->data + hdr_len, secondlen);
  1118. /* start timer if queue currently empty */
  1119. if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
  1120. mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
  1121. /* Tell device the write index *just past* this latest filled TFD */
  1122. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  1123. iwl_txq_update_write_ptr(trans, txq);
  1124. /*
  1125. * At this point the frame is "transmitted" successfully
  1126. * and we will get a TX status notification eventually,
  1127. * regardless of the value of ret. "ret" only indicates
  1128. * whether or not we should update the write pointer.
  1129. */
  1130. if (iwl_queue_space(q) < q->high_mark) {
  1131. if (wait_write_ptr) {
  1132. txq->need_update = 1;
  1133. iwl_txq_update_write_ptr(trans, txq);
  1134. } else {
  1135. iwl_stop_queue(trans, txq);
  1136. }
  1137. }
  1138. spin_unlock(&txq->lock);
  1139. return 0;
  1140. out_err:
  1141. spin_unlock(&txq->lock);
  1142. return -1;
  1143. }
  1144. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
  1145. {
  1146. struct iwl_trans_pcie *trans_pcie =
  1147. IWL_TRANS_GET_PCIE_TRANS(trans);
  1148. int err;
  1149. bool hw_rfkill;
  1150. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  1151. if (!trans_pcie->irq_requested) {
  1152. tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
  1153. iwl_irq_tasklet, (unsigned long)trans);
  1154. iwl_alloc_isr_ict(trans);
  1155. err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
  1156. DRV_NAME, trans);
  1157. if (err) {
  1158. IWL_ERR(trans, "Error allocating IRQ %d\n",
  1159. trans_pcie->irq);
  1160. goto error;
  1161. }
  1162. INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
  1163. trans_pcie->irq_requested = true;
  1164. }
  1165. err = iwl_prepare_card_hw(trans);
  1166. if (err) {
  1167. IWL_ERR(trans, "Error while preparing HW: %d", err);
  1168. goto err_free_irq;
  1169. }
  1170. iwl_apm_init(trans);
  1171. hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
  1172. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  1173. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1174. return err;
  1175. err_free_irq:
  1176. free_irq(trans_pcie->irq, trans);
  1177. error:
  1178. iwl_free_isr_ict(trans);
  1179. tasklet_kill(&trans_pcie->irq_tasklet);
  1180. return err;
  1181. }
  1182. static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
  1183. {
  1184. iwl_apm_stop(trans);
  1185. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1186. /* Even if we stop the HW, we still want the RF kill interrupt */
  1187. iwl_enable_rfkill_int(trans);
  1188. }
  1189. static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
  1190. struct sk_buff_head *skbs)
  1191. {
  1192. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1193. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  1194. /* n_bd is usually 256 => n_bd - 1 = 0xff */
  1195. int tfd_num = ssn & (txq->q.n_bd - 1);
  1196. int freed = 0;
  1197. spin_lock(&txq->lock);
  1198. if (txq->q.read_ptr != tfd_num) {
  1199. IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
  1200. txq_id, txq->q.read_ptr, tfd_num, ssn);
  1201. freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
  1202. if (iwl_queue_space(&txq->q) > txq->q.low_mark)
  1203. iwl_wake_queue(trans, txq);
  1204. }
  1205. spin_unlock(&txq->lock);
  1206. }
  1207. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  1208. {
  1209. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1210. }
  1211. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  1212. {
  1213. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1214. }
  1215. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  1216. {
  1217. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1218. }
  1219. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  1220. const struct iwl_trans_config *trans_cfg)
  1221. {
  1222. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1223. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  1224. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  1225. trans_pcie->n_no_reclaim_cmds = 0;
  1226. else
  1227. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  1228. if (trans_pcie->n_no_reclaim_cmds)
  1229. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  1230. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  1231. trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
  1232. if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
  1233. trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
  1234. /* at least the command queue must be mapped */
  1235. WARN_ON(!trans_pcie->n_q_to_fifo);
  1236. memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
  1237. trans_pcie->n_q_to_fifo * sizeof(u8));
  1238. trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
  1239. if (trans_pcie->rx_buf_size_8k)
  1240. trans_pcie->rx_page_order = get_order(8 * 1024);
  1241. else
  1242. trans_pcie->rx_page_order = get_order(4 * 1024);
  1243. trans_pcie->wd_timeout =
  1244. msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
  1245. trans_pcie->command_names = trans_cfg->command_names;
  1246. }
  1247. static void iwl_trans_pcie_free(struct iwl_trans *trans)
  1248. {
  1249. struct iwl_trans_pcie *trans_pcie =
  1250. IWL_TRANS_GET_PCIE_TRANS(trans);
  1251. iwl_trans_pcie_tx_free(trans);
  1252. #ifndef CONFIG_IWLWIFI_IDI
  1253. iwl_trans_pcie_rx_free(trans);
  1254. #endif
  1255. if (trans_pcie->irq_requested == true) {
  1256. free_irq(trans_pcie->irq, trans);
  1257. iwl_free_isr_ict(trans);
  1258. }
  1259. pci_disable_msi(trans_pcie->pci_dev);
  1260. iounmap(trans_pcie->hw_base);
  1261. pci_release_regions(trans_pcie->pci_dev);
  1262. pci_disable_device(trans_pcie->pci_dev);
  1263. kfree(trans);
  1264. }
  1265. static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
  1266. {
  1267. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1268. if (state)
  1269. set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1270. else
  1271. clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
  1272. }
  1273. #ifdef CONFIG_PM_SLEEP
  1274. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  1275. {
  1276. return 0;
  1277. }
  1278. static int iwl_trans_pcie_resume(struct iwl_trans *trans)
  1279. {
  1280. bool hw_rfkill;
  1281. hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
  1282. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  1283. if (hw_rfkill)
  1284. iwl_enable_rfkill_int(trans);
  1285. else
  1286. iwl_enable_interrupts(trans);
  1287. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1288. return 0;
  1289. }
  1290. #endif /* CONFIG_PM_SLEEP */
  1291. #define IWL_FLUSH_WAIT_MS 2000
  1292. static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
  1293. {
  1294. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1295. struct iwl_tx_queue *txq;
  1296. struct iwl_queue *q;
  1297. int cnt;
  1298. unsigned long now = jiffies;
  1299. int ret = 0;
  1300. /* waiting for all the tx frames complete might take a while */
  1301. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1302. if (cnt == trans_pcie->cmd_queue)
  1303. continue;
  1304. txq = &trans_pcie->txq[cnt];
  1305. q = &txq->q;
  1306. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  1307. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  1308. msleep(1);
  1309. if (q->read_ptr != q->write_ptr) {
  1310. IWL_ERR(trans, "fail to flush all tx fifo queues\n");
  1311. ret = -ETIMEDOUT;
  1312. break;
  1313. }
  1314. }
  1315. return ret;
  1316. }
  1317. static const char *get_fh_string(int cmd)
  1318. {
  1319. #define IWL_CMD(x) case x: return #x
  1320. switch (cmd) {
  1321. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  1322. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  1323. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  1324. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  1325. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  1326. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  1327. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  1328. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  1329. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  1330. default:
  1331. return "UNKNOWN";
  1332. }
  1333. #undef IWL_CMD
  1334. }
  1335. int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
  1336. {
  1337. int i;
  1338. #ifdef CONFIG_IWLWIFI_DEBUG
  1339. int pos = 0;
  1340. size_t bufsz = 0;
  1341. #endif
  1342. static const u32 fh_tbl[] = {
  1343. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  1344. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  1345. FH_RSCSR_CHNL0_WPTR,
  1346. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  1347. FH_MEM_RSSR_SHARED_CTRL_REG,
  1348. FH_MEM_RSSR_RX_STATUS_REG,
  1349. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  1350. FH_TSSR_TX_STATUS_REG,
  1351. FH_TSSR_TX_ERROR_REG
  1352. };
  1353. #ifdef CONFIG_IWLWIFI_DEBUG
  1354. if (display) {
  1355. bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  1356. *buf = kmalloc(bufsz, GFP_KERNEL);
  1357. if (!*buf)
  1358. return -ENOMEM;
  1359. pos += scnprintf(*buf + pos, bufsz - pos,
  1360. "FH register values:\n");
  1361. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1362. pos += scnprintf(*buf + pos, bufsz - pos,
  1363. " %34s: 0X%08x\n",
  1364. get_fh_string(fh_tbl[i]),
  1365. iwl_read_direct32(trans, fh_tbl[i]));
  1366. }
  1367. return pos;
  1368. }
  1369. #endif
  1370. IWL_ERR(trans, "FH register values:\n");
  1371. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1372. IWL_ERR(trans, " %34s: 0X%08x\n",
  1373. get_fh_string(fh_tbl[i]),
  1374. iwl_read_direct32(trans, fh_tbl[i]));
  1375. }
  1376. return 0;
  1377. }
  1378. static const char *get_csr_string(int cmd)
  1379. {
  1380. #define IWL_CMD(x) case x: return #x
  1381. switch (cmd) {
  1382. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1383. IWL_CMD(CSR_INT_COALESCING);
  1384. IWL_CMD(CSR_INT);
  1385. IWL_CMD(CSR_INT_MASK);
  1386. IWL_CMD(CSR_FH_INT_STATUS);
  1387. IWL_CMD(CSR_GPIO_IN);
  1388. IWL_CMD(CSR_RESET);
  1389. IWL_CMD(CSR_GP_CNTRL);
  1390. IWL_CMD(CSR_HW_REV);
  1391. IWL_CMD(CSR_EEPROM_REG);
  1392. IWL_CMD(CSR_EEPROM_GP);
  1393. IWL_CMD(CSR_OTP_GP_REG);
  1394. IWL_CMD(CSR_GIO_REG);
  1395. IWL_CMD(CSR_GP_UCODE_REG);
  1396. IWL_CMD(CSR_GP_DRIVER_REG);
  1397. IWL_CMD(CSR_UCODE_DRV_GP1);
  1398. IWL_CMD(CSR_UCODE_DRV_GP2);
  1399. IWL_CMD(CSR_LED_REG);
  1400. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  1401. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  1402. IWL_CMD(CSR_ANA_PLL_CFG);
  1403. IWL_CMD(CSR_HW_REV_WA_REG);
  1404. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  1405. default:
  1406. return "UNKNOWN";
  1407. }
  1408. #undef IWL_CMD
  1409. }
  1410. void iwl_dump_csr(struct iwl_trans *trans)
  1411. {
  1412. int i;
  1413. static const u32 csr_tbl[] = {
  1414. CSR_HW_IF_CONFIG_REG,
  1415. CSR_INT_COALESCING,
  1416. CSR_INT,
  1417. CSR_INT_MASK,
  1418. CSR_FH_INT_STATUS,
  1419. CSR_GPIO_IN,
  1420. CSR_RESET,
  1421. CSR_GP_CNTRL,
  1422. CSR_HW_REV,
  1423. CSR_EEPROM_REG,
  1424. CSR_EEPROM_GP,
  1425. CSR_OTP_GP_REG,
  1426. CSR_GIO_REG,
  1427. CSR_GP_UCODE_REG,
  1428. CSR_GP_DRIVER_REG,
  1429. CSR_UCODE_DRV_GP1,
  1430. CSR_UCODE_DRV_GP2,
  1431. CSR_LED_REG,
  1432. CSR_DRAM_INT_TBL_REG,
  1433. CSR_GIO_CHICKEN_BITS,
  1434. CSR_ANA_PLL_CFG,
  1435. CSR_HW_REV_WA_REG,
  1436. CSR_DBG_HPET_MEM_REG
  1437. };
  1438. IWL_ERR(trans, "CSR values:\n");
  1439. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  1440. "CSR_INT_PERIODIC_REG)\n");
  1441. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  1442. IWL_ERR(trans, " %25s: 0X%08x\n",
  1443. get_csr_string(csr_tbl[i]),
  1444. iwl_read32(trans, csr_tbl[i]));
  1445. }
  1446. }
  1447. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1448. /* create and remove of files */
  1449. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  1450. if (!debugfs_create_file(#name, mode, parent, trans, \
  1451. &iwl_dbgfs_##name##_ops)) \
  1452. return -ENOMEM; \
  1453. } while (0)
  1454. /* file operation */
  1455. #define DEBUGFS_READ_FUNC(name) \
  1456. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  1457. char __user *user_buf, \
  1458. size_t count, loff_t *ppos);
  1459. #define DEBUGFS_WRITE_FUNC(name) \
  1460. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  1461. const char __user *user_buf, \
  1462. size_t count, loff_t *ppos);
  1463. #define DEBUGFS_READ_FILE_OPS(name) \
  1464. DEBUGFS_READ_FUNC(name); \
  1465. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1466. .read = iwl_dbgfs_##name##_read, \
  1467. .open = simple_open, \
  1468. .llseek = generic_file_llseek, \
  1469. };
  1470. #define DEBUGFS_WRITE_FILE_OPS(name) \
  1471. DEBUGFS_WRITE_FUNC(name); \
  1472. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1473. .write = iwl_dbgfs_##name##_write, \
  1474. .open = simple_open, \
  1475. .llseek = generic_file_llseek, \
  1476. };
  1477. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1478. DEBUGFS_READ_FUNC(name); \
  1479. DEBUGFS_WRITE_FUNC(name); \
  1480. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1481. .write = iwl_dbgfs_##name##_write, \
  1482. .read = iwl_dbgfs_##name##_read, \
  1483. .open = simple_open, \
  1484. .llseek = generic_file_llseek, \
  1485. };
  1486. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1487. char __user *user_buf,
  1488. size_t count, loff_t *ppos)
  1489. {
  1490. struct iwl_trans *trans = file->private_data;
  1491. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1492. struct iwl_tx_queue *txq;
  1493. struct iwl_queue *q;
  1494. char *buf;
  1495. int pos = 0;
  1496. int cnt;
  1497. int ret;
  1498. size_t bufsz;
  1499. bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
  1500. if (!trans_pcie->txq) {
  1501. IWL_ERR(trans, "txq not ready\n");
  1502. return -EAGAIN;
  1503. }
  1504. buf = kzalloc(bufsz, GFP_KERNEL);
  1505. if (!buf)
  1506. return -ENOMEM;
  1507. for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
  1508. txq = &trans_pcie->txq[cnt];
  1509. q = &txq->q;
  1510. pos += scnprintf(buf + pos, bufsz - pos,
  1511. "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
  1512. cnt, q->read_ptr, q->write_ptr,
  1513. !!test_bit(cnt, trans_pcie->queue_used),
  1514. !!test_bit(cnt, trans_pcie->queue_stopped));
  1515. }
  1516. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1517. kfree(buf);
  1518. return ret;
  1519. }
  1520. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1521. char __user *user_buf,
  1522. size_t count, loff_t *ppos) {
  1523. struct iwl_trans *trans = file->private_data;
  1524. struct iwl_trans_pcie *trans_pcie =
  1525. IWL_TRANS_GET_PCIE_TRANS(trans);
  1526. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1527. char buf[256];
  1528. int pos = 0;
  1529. const size_t bufsz = sizeof(buf);
  1530. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  1531. rxq->read);
  1532. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  1533. rxq->write);
  1534. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  1535. rxq->free_count);
  1536. if (rxq->rb_stts) {
  1537. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  1538. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  1539. } else {
  1540. pos += scnprintf(buf + pos, bufsz - pos,
  1541. "closed_rb_num: Not Allocated\n");
  1542. }
  1543. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1544. }
  1545. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  1546. char __user *user_buf,
  1547. size_t count, loff_t *ppos) {
  1548. struct iwl_trans *trans = file->private_data;
  1549. struct iwl_trans_pcie *trans_pcie =
  1550. IWL_TRANS_GET_PCIE_TRANS(trans);
  1551. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1552. int pos = 0;
  1553. char *buf;
  1554. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  1555. ssize_t ret;
  1556. buf = kzalloc(bufsz, GFP_KERNEL);
  1557. if (!buf) {
  1558. IWL_ERR(trans, "Can not allocate Buffer\n");
  1559. return -ENOMEM;
  1560. }
  1561. pos += scnprintf(buf + pos, bufsz - pos,
  1562. "Interrupt Statistics Report:\n");
  1563. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  1564. isr_stats->hw);
  1565. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  1566. isr_stats->sw);
  1567. if (isr_stats->sw || isr_stats->hw) {
  1568. pos += scnprintf(buf + pos, bufsz - pos,
  1569. "\tLast Restarting Code: 0x%X\n",
  1570. isr_stats->err_code);
  1571. }
  1572. #ifdef CONFIG_IWLWIFI_DEBUG
  1573. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  1574. isr_stats->sch);
  1575. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  1576. isr_stats->alive);
  1577. #endif
  1578. pos += scnprintf(buf + pos, bufsz - pos,
  1579. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  1580. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  1581. isr_stats->ctkill);
  1582. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  1583. isr_stats->wakeup);
  1584. pos += scnprintf(buf + pos, bufsz - pos,
  1585. "Rx command responses:\t\t %u\n", isr_stats->rx);
  1586. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  1587. isr_stats->tx);
  1588. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  1589. isr_stats->unhandled);
  1590. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1591. kfree(buf);
  1592. return ret;
  1593. }
  1594. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  1595. const char __user *user_buf,
  1596. size_t count, loff_t *ppos)
  1597. {
  1598. struct iwl_trans *trans = file->private_data;
  1599. struct iwl_trans_pcie *trans_pcie =
  1600. IWL_TRANS_GET_PCIE_TRANS(trans);
  1601. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1602. char buf[8];
  1603. int buf_size;
  1604. u32 reset_flag;
  1605. memset(buf, 0, sizeof(buf));
  1606. buf_size = min(count, sizeof(buf) - 1);
  1607. if (copy_from_user(buf, user_buf, buf_size))
  1608. return -EFAULT;
  1609. if (sscanf(buf, "%x", &reset_flag) != 1)
  1610. return -EFAULT;
  1611. if (reset_flag == 0)
  1612. memset(isr_stats, 0, sizeof(*isr_stats));
  1613. return count;
  1614. }
  1615. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  1616. const char __user *user_buf,
  1617. size_t count, loff_t *ppos)
  1618. {
  1619. struct iwl_trans *trans = file->private_data;
  1620. char buf[8];
  1621. int buf_size;
  1622. int csr;
  1623. memset(buf, 0, sizeof(buf));
  1624. buf_size = min(count, sizeof(buf) - 1);
  1625. if (copy_from_user(buf, user_buf, buf_size))
  1626. return -EFAULT;
  1627. if (sscanf(buf, "%d", &csr) != 1)
  1628. return -EFAULT;
  1629. iwl_dump_csr(trans);
  1630. return count;
  1631. }
  1632. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  1633. char __user *user_buf,
  1634. size_t count, loff_t *ppos)
  1635. {
  1636. struct iwl_trans *trans = file->private_data;
  1637. char *buf;
  1638. int pos = 0;
  1639. ssize_t ret = -EFAULT;
  1640. ret = pos = iwl_dump_fh(trans, &buf, true);
  1641. if (buf) {
  1642. ret = simple_read_from_buffer(user_buf,
  1643. count, ppos, buf, pos);
  1644. kfree(buf);
  1645. }
  1646. return ret;
  1647. }
  1648. static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
  1649. const char __user *user_buf,
  1650. size_t count, loff_t *ppos)
  1651. {
  1652. struct iwl_trans *trans = file->private_data;
  1653. if (!trans->op_mode)
  1654. return -EAGAIN;
  1655. iwl_op_mode_nic_error(trans->op_mode);
  1656. return count;
  1657. }
  1658. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  1659. DEBUGFS_READ_FILE_OPS(fh_reg);
  1660. DEBUGFS_READ_FILE_OPS(rx_queue);
  1661. DEBUGFS_READ_FILE_OPS(tx_queue);
  1662. DEBUGFS_WRITE_FILE_OPS(csr);
  1663. DEBUGFS_WRITE_FILE_OPS(fw_restart);
  1664. /*
  1665. * Create the debugfs files and directories
  1666. *
  1667. */
  1668. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1669. struct dentry *dir)
  1670. {
  1671. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1672. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1673. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  1674. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  1675. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  1676. DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
  1677. return 0;
  1678. }
  1679. #else
  1680. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1681. struct dentry *dir)
  1682. { return 0; }
  1683. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1684. const struct iwl_trans_ops trans_ops_pcie = {
  1685. .start_hw = iwl_trans_pcie_start_hw,
  1686. .stop_hw = iwl_trans_pcie_stop_hw,
  1687. .fw_alive = iwl_trans_pcie_fw_alive,
  1688. .start_fw = iwl_trans_pcie_start_fw,
  1689. .stop_device = iwl_trans_pcie_stop_device,
  1690. .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
  1691. .send_cmd = iwl_trans_pcie_send_cmd,
  1692. .tx = iwl_trans_pcie_tx,
  1693. .reclaim = iwl_trans_pcie_reclaim,
  1694. .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
  1695. .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
  1696. .free = iwl_trans_pcie_free,
  1697. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1698. .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
  1699. #ifdef CONFIG_PM_SLEEP
  1700. .suspend = iwl_trans_pcie_suspend,
  1701. .resume = iwl_trans_pcie_resume,
  1702. #endif
  1703. .write8 = iwl_trans_pcie_write8,
  1704. .write32 = iwl_trans_pcie_write32,
  1705. .read32 = iwl_trans_pcie_read32,
  1706. .configure = iwl_trans_pcie_configure,
  1707. .set_pmi = iwl_trans_pcie_set_pmi,
  1708. };
  1709. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  1710. const struct pci_device_id *ent,
  1711. const struct iwl_cfg *cfg)
  1712. {
  1713. struct iwl_trans_pcie *trans_pcie;
  1714. struct iwl_trans *trans;
  1715. u16 pci_cmd;
  1716. int err;
  1717. trans = kzalloc(sizeof(struct iwl_trans) +
  1718. sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  1719. if (WARN_ON(!trans))
  1720. return NULL;
  1721. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1722. trans->ops = &trans_ops_pcie;
  1723. trans->cfg = cfg;
  1724. trans_pcie->trans = trans;
  1725. spin_lock_init(&trans_pcie->irq_lock);
  1726. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  1727. /* W/A - seems to solve weird behavior. We need to remove this if we
  1728. * don't want to stay in L1 all the time. This wastes a lot of power */
  1729. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1730. PCIE_LINK_STATE_CLKPM);
  1731. if (pci_enable_device(pdev)) {
  1732. err = -ENODEV;
  1733. goto out_no_pci;
  1734. }
  1735. pci_set_master(pdev);
  1736. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
  1737. if (!err)
  1738. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
  1739. if (err) {
  1740. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1741. if (!err)
  1742. err = pci_set_consistent_dma_mask(pdev,
  1743. DMA_BIT_MASK(32));
  1744. /* both attempts failed: */
  1745. if (err) {
  1746. dev_printk(KERN_ERR, &pdev->dev,
  1747. "No suitable DMA available.\n");
  1748. goto out_pci_disable_device;
  1749. }
  1750. }
  1751. err = pci_request_regions(pdev, DRV_NAME);
  1752. if (err) {
  1753. dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
  1754. goto out_pci_disable_device;
  1755. }
  1756. trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
  1757. if (!trans_pcie->hw_base) {
  1758. dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
  1759. err = -ENODEV;
  1760. goto out_pci_release_regions;
  1761. }
  1762. dev_printk(KERN_INFO, &pdev->dev,
  1763. "pci_resource_len = 0x%08llx\n",
  1764. (unsigned long long) pci_resource_len(pdev, 0));
  1765. dev_printk(KERN_INFO, &pdev->dev,
  1766. "pci_resource_base = %p\n", trans_pcie->hw_base);
  1767. dev_printk(KERN_INFO, &pdev->dev,
  1768. "HW Revision ID = 0x%X\n", pdev->revision);
  1769. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  1770. * PCI Tx retries from interfering with C3 CPU state */
  1771. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  1772. err = pci_enable_msi(pdev);
  1773. if (err)
  1774. dev_printk(KERN_ERR, &pdev->dev,
  1775. "pci_enable_msi failed(0X%x)", err);
  1776. trans->dev = &pdev->dev;
  1777. trans_pcie->irq = pdev->irq;
  1778. trans_pcie->pci_dev = pdev;
  1779. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  1780. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  1781. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  1782. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  1783. /* TODO: Move this away, not needed if not MSI */
  1784. /* enable rfkill interrupt: hw bug w/a */
  1785. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1786. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1787. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1788. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1789. }
  1790. /* Initialize the wait queue for commands */
  1791. init_waitqueue_head(&trans->wait_command_queue);
  1792. return trans;
  1793. out_pci_release_regions:
  1794. pci_release_regions(pdev);
  1795. out_pci_disable_device:
  1796. pci_disable_device(pdev);
  1797. out_no_pci:
  1798. kfree(trans);
  1799. return NULL;
  1800. }