iwl-trans-pcie.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/pci.h>
  64. #include <linux/pci-aspm.h>
  65. #include <linux/interrupt.h>
  66. #include <linux/debugfs.h>
  67. #include <linux/sched.h>
  68. #include <linux/bitops.h>
  69. #include <linux/gfp.h>
  70. #include "iwl-trans.h"
  71. #include "iwl-trans-pcie-int.h"
  72. #include "iwl-csr.h"
  73. #include "iwl-prph.h"
  74. #include "iwl-shared.h"
  75. #include "iwl-eeprom.h"
  76. #include "iwl-agn-hw.h"
  77. #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
  78. #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
  79. (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
  80. (~(1<<(trans_pcie)->cmd_queue)))
  81. static int iwl_trans_rx_alloc(struct iwl_trans *trans)
  82. {
  83. struct iwl_trans_pcie *trans_pcie =
  84. IWL_TRANS_GET_PCIE_TRANS(trans);
  85. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  86. struct device *dev = trans->dev;
  87. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  88. spin_lock_init(&rxq->lock);
  89. if (WARN_ON(rxq->bd || rxq->rb_stts))
  90. return -EINVAL;
  91. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  92. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  93. &rxq->bd_dma, GFP_KERNEL);
  94. if (!rxq->bd)
  95. goto err_bd;
  96. /*Allocate the driver's pointer to receive buffer status */
  97. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  98. &rxq->rb_stts_dma, GFP_KERNEL);
  99. if (!rxq->rb_stts)
  100. goto err_rb_stts;
  101. return 0;
  102. err_rb_stts:
  103. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  104. rxq->bd, rxq->bd_dma);
  105. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  106. rxq->bd = NULL;
  107. err_bd:
  108. return -ENOMEM;
  109. }
  110. static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
  111. {
  112. struct iwl_trans_pcie *trans_pcie =
  113. IWL_TRANS_GET_PCIE_TRANS(trans);
  114. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  115. int i;
  116. /* Fill the rx_used queue with _all_ of the Rx buffers */
  117. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  118. /* In the reset function, these buffers may have been allocated
  119. * to an SKB, so we need to unmap and free potential storage */
  120. if (rxq->pool[i].page != NULL) {
  121. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  122. PAGE_SIZE << hw_params(trans).rx_page_order,
  123. DMA_FROM_DEVICE);
  124. __free_pages(rxq->pool[i].page,
  125. hw_params(trans).rx_page_order);
  126. rxq->pool[i].page = NULL;
  127. }
  128. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  129. }
  130. }
  131. static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
  132. struct iwl_rx_queue *rxq)
  133. {
  134. u32 rb_size;
  135. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  136. u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
  137. if (iwlagn_mod_params.amsdu_size_8K)
  138. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  139. else
  140. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  141. /* Stop Rx DMA */
  142. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  143. /* Reset driver's Rx queue write index */
  144. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  145. /* Tell device where to find RBD circular buffer in DRAM */
  146. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  147. (u32)(rxq->bd_dma >> 8));
  148. /* Tell device where in DRAM to update its Rx status */
  149. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  150. rxq->rb_stts_dma >> 4);
  151. /* Enable Rx DMA
  152. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  153. * the credit mechanism in 5000 HW RX FIFO
  154. * Direct rx interrupts to hosts
  155. * Rx buffer size 4 or 8k
  156. * RB timeout 0x10
  157. * 256 RBDs
  158. */
  159. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  160. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  161. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  162. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  163. FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
  164. rb_size|
  165. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  166. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  167. /* Set interrupt coalescing timer to default (2048 usecs) */
  168. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  169. }
  170. static int iwl_rx_init(struct iwl_trans *trans)
  171. {
  172. struct iwl_trans_pcie *trans_pcie =
  173. IWL_TRANS_GET_PCIE_TRANS(trans);
  174. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  175. int i, err;
  176. unsigned long flags;
  177. if (!rxq->bd) {
  178. err = iwl_trans_rx_alloc(trans);
  179. if (err)
  180. return err;
  181. }
  182. spin_lock_irqsave(&rxq->lock, flags);
  183. INIT_LIST_HEAD(&rxq->rx_free);
  184. INIT_LIST_HEAD(&rxq->rx_used);
  185. iwl_trans_rxq_free_rx_bufs(trans);
  186. for (i = 0; i < RX_QUEUE_SIZE; i++)
  187. rxq->queue[i] = NULL;
  188. /* Set us so that we have processed and used all buffers, but have
  189. * not restocked the Rx queue with fresh buffers */
  190. rxq->read = rxq->write = 0;
  191. rxq->write_actual = 0;
  192. rxq->free_count = 0;
  193. spin_unlock_irqrestore(&rxq->lock, flags);
  194. iwlagn_rx_replenish(trans);
  195. iwl_trans_rx_hw_init(trans, rxq);
  196. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  197. rxq->need_update = 1;
  198. iwl_rx_queue_update_write_ptr(trans, rxq);
  199. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  200. return 0;
  201. }
  202. static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
  203. {
  204. struct iwl_trans_pcie *trans_pcie =
  205. IWL_TRANS_GET_PCIE_TRANS(trans);
  206. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  207. unsigned long flags;
  208. /*if rxq->bd is NULL, it means that nothing has been allocated,
  209. * exit now */
  210. if (!rxq->bd) {
  211. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  212. return;
  213. }
  214. spin_lock_irqsave(&rxq->lock, flags);
  215. iwl_trans_rxq_free_rx_bufs(trans);
  216. spin_unlock_irqrestore(&rxq->lock, flags);
  217. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  218. rxq->bd, rxq->bd_dma);
  219. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  220. rxq->bd = NULL;
  221. if (rxq->rb_stts)
  222. dma_free_coherent(trans->dev,
  223. sizeof(struct iwl_rb_status),
  224. rxq->rb_stts, rxq->rb_stts_dma);
  225. else
  226. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  227. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  228. rxq->rb_stts = NULL;
  229. }
  230. static int iwl_trans_rx_stop(struct iwl_trans *trans)
  231. {
  232. /* stop Rx DMA */
  233. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  234. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  235. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  236. }
  237. static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
  238. struct iwl_dma_ptr *ptr, size_t size)
  239. {
  240. if (WARN_ON(ptr->addr))
  241. return -EINVAL;
  242. ptr->addr = dma_alloc_coherent(trans->dev, size,
  243. &ptr->dma, GFP_KERNEL);
  244. if (!ptr->addr)
  245. return -ENOMEM;
  246. ptr->size = size;
  247. return 0;
  248. }
  249. static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
  250. struct iwl_dma_ptr *ptr)
  251. {
  252. if (unlikely(!ptr->addr))
  253. return;
  254. dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
  255. memset(ptr, 0, sizeof(*ptr));
  256. }
  257. static int iwl_trans_txq_alloc(struct iwl_trans *trans,
  258. struct iwl_tx_queue *txq, int slots_num,
  259. u32 txq_id)
  260. {
  261. size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
  262. int i;
  263. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  264. if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
  265. return -EINVAL;
  266. txq->q.n_window = slots_num;
  267. txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
  268. txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
  269. if (!txq->meta || !txq->cmd)
  270. goto error;
  271. if (txq_id == trans_pcie->cmd_queue)
  272. for (i = 0; i < slots_num; i++) {
  273. txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
  274. GFP_KERNEL);
  275. if (!txq->cmd[i])
  276. goto error;
  277. }
  278. /* Alloc driver data array and TFD circular buffer */
  279. /* Driver private data, only for Tx (not command) queues,
  280. * not shared with device. */
  281. if (txq_id != trans_pcie->cmd_queue) {
  282. txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
  283. GFP_KERNEL);
  284. if (!txq->skbs) {
  285. IWL_ERR(trans, "kmalloc for auxiliary BD "
  286. "structures failed\n");
  287. goto error;
  288. }
  289. } else {
  290. txq->skbs = NULL;
  291. }
  292. /* Circular buffer of transmit frame descriptors (TFDs),
  293. * shared with device */
  294. txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
  295. &txq->q.dma_addr, GFP_KERNEL);
  296. if (!txq->tfds) {
  297. IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  298. goto error;
  299. }
  300. txq->q.id = txq_id;
  301. return 0;
  302. error:
  303. kfree(txq->skbs);
  304. txq->skbs = NULL;
  305. /* since txq->cmd has been zeroed,
  306. * all non allocated cmd[i] will be NULL */
  307. if (txq->cmd && txq_id == trans_pcie->cmd_queue)
  308. for (i = 0; i < slots_num; i++)
  309. kfree(txq->cmd[i]);
  310. kfree(txq->meta);
  311. kfree(txq->cmd);
  312. txq->meta = NULL;
  313. txq->cmd = NULL;
  314. return -ENOMEM;
  315. }
  316. static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
  317. int slots_num, u32 txq_id)
  318. {
  319. int ret;
  320. txq->need_update = 0;
  321. memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
  322. /*
  323. * For the default queues 0-3, set up the swq_id
  324. * already -- all others need to get one later
  325. * (if they need one at all).
  326. */
  327. if (txq_id < 4)
  328. iwl_set_swq_id(txq, txq_id, txq_id);
  329. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  330. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  331. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  332. /* Initialize queue's high/low-water marks, and head/tail indexes */
  333. ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  334. txq_id);
  335. if (ret)
  336. return ret;
  337. spin_lock_init(&txq->lock);
  338. /*
  339. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  340. * given Tx queue, and enable the DMA channel used for that queue.
  341. * Circular buffer (TFD queue in DRAM) physical base address */
  342. iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
  343. txq->q.dma_addr >> 8);
  344. return 0;
  345. }
  346. /**
  347. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  348. */
  349. static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
  350. {
  351. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  352. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  353. struct iwl_queue *q = &txq->q;
  354. enum dma_data_direction dma_dir;
  355. if (!q->n_bd)
  356. return;
  357. /* In the command queue, all the TBs are mapped as BIDI
  358. * so unmap them as such.
  359. */
  360. if (txq_id == trans_pcie->cmd_queue)
  361. dma_dir = DMA_BIDIRECTIONAL;
  362. else
  363. dma_dir = DMA_TO_DEVICE;
  364. spin_lock_bh(&txq->lock);
  365. while (q->write_ptr != q->read_ptr) {
  366. /* The read_ptr needs to bound by q->n_window */
  367. iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
  368. dma_dir);
  369. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  370. }
  371. spin_unlock_bh(&txq->lock);
  372. }
  373. /**
  374. * iwl_tx_queue_free - Deallocate DMA queue.
  375. * @txq: Transmit queue to deallocate.
  376. *
  377. * Empty queue by removing and destroying all BD's.
  378. * Free all buffers.
  379. * 0-fill, but do not free "txq" descriptor structure.
  380. */
  381. static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
  382. {
  383. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  384. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  385. struct device *dev = trans->dev;
  386. int i;
  387. if (WARN_ON(!txq))
  388. return;
  389. iwl_tx_queue_unmap(trans, txq_id);
  390. /* De-alloc array of command/tx buffers */
  391. if (txq_id == trans_pcie->cmd_queue)
  392. for (i = 0; i < txq->q.n_window; i++)
  393. kfree(txq->cmd[i]);
  394. /* De-alloc circular buffer of TFDs */
  395. if (txq->q.n_bd) {
  396. dma_free_coherent(dev, sizeof(struct iwl_tfd) *
  397. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  398. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  399. }
  400. /* De-alloc array of per-TFD driver data */
  401. kfree(txq->skbs);
  402. txq->skbs = NULL;
  403. /* deallocate arrays */
  404. kfree(txq->cmd);
  405. kfree(txq->meta);
  406. txq->cmd = NULL;
  407. txq->meta = NULL;
  408. /* 0-fill queue descriptor structure */
  409. memset(txq, 0, sizeof(*txq));
  410. }
  411. /**
  412. * iwl_trans_tx_free - Free TXQ Context
  413. *
  414. * Destroy all TX DMA queues and structures
  415. */
  416. static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
  417. {
  418. int txq_id;
  419. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  420. /* Tx queues */
  421. if (trans_pcie->txq) {
  422. for (txq_id = 0;
  423. txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
  424. iwl_tx_queue_free(trans, txq_id);
  425. }
  426. kfree(trans_pcie->txq);
  427. trans_pcie->txq = NULL;
  428. iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
  429. iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
  430. }
  431. /**
  432. * iwl_trans_tx_alloc - allocate TX context
  433. * Allocate all Tx DMA structures and initialize them
  434. *
  435. * @param priv
  436. * @return error code
  437. */
  438. static int iwl_trans_tx_alloc(struct iwl_trans *trans)
  439. {
  440. int ret;
  441. int txq_id, slots_num;
  442. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  443. u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
  444. sizeof(struct iwlagn_scd_bc_tbl);
  445. /*It is not allowed to alloc twice, so warn when this happens.
  446. * We cannot rely on the previous allocation, so free and fail */
  447. if (WARN_ON(trans_pcie->txq)) {
  448. ret = -EINVAL;
  449. goto error;
  450. }
  451. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
  452. scd_bc_tbls_size);
  453. if (ret) {
  454. IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
  455. goto error;
  456. }
  457. /* Alloc keep-warm buffer */
  458. ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
  459. if (ret) {
  460. IWL_ERR(trans, "Keep Warm allocation failed\n");
  461. goto error;
  462. }
  463. trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
  464. sizeof(struct iwl_tx_queue), GFP_KERNEL);
  465. if (!trans_pcie->txq) {
  466. IWL_ERR(trans, "Not enough memory for txq\n");
  467. ret = ENOMEM;
  468. goto error;
  469. }
  470. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  471. for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
  472. txq_id++) {
  473. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  474. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  475. ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
  476. slots_num, txq_id);
  477. if (ret) {
  478. IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
  479. goto error;
  480. }
  481. }
  482. return 0;
  483. error:
  484. iwl_trans_pcie_tx_free(trans);
  485. return ret;
  486. }
  487. static int iwl_tx_init(struct iwl_trans *trans)
  488. {
  489. int ret;
  490. int txq_id, slots_num;
  491. unsigned long flags;
  492. bool alloc = false;
  493. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  494. if (!trans_pcie->txq) {
  495. ret = iwl_trans_tx_alloc(trans);
  496. if (ret)
  497. goto error;
  498. alloc = true;
  499. }
  500. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  501. /* Turn off all Tx DMA fifos */
  502. iwl_write_prph(trans, SCD_TXFACT, 0);
  503. /* Tell NIC where to find the "keep warm" buffer */
  504. iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
  505. trans_pcie->kw.dma >> 4);
  506. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  507. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  508. for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
  509. txq_id++) {
  510. slots_num = (txq_id == trans_pcie->cmd_queue) ?
  511. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  512. ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
  513. slots_num, txq_id);
  514. if (ret) {
  515. IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
  516. goto error;
  517. }
  518. }
  519. return 0;
  520. error:
  521. /*Upon error, free only if we allocated something */
  522. if (alloc)
  523. iwl_trans_pcie_tx_free(trans);
  524. return ret;
  525. }
  526. static void iwl_set_pwr_vmain(struct iwl_trans *trans)
  527. {
  528. /*
  529. * (for documentation purposes)
  530. * to set power to V_AUX, do:
  531. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  532. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  533. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  534. ~APMG_PS_CTRL_MSK_PWR_SRC);
  535. */
  536. iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
  537. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  538. ~APMG_PS_CTRL_MSK_PWR_SRC);
  539. }
  540. /* PCI registers */
  541. #define PCI_CFG_RETRY_TIMEOUT 0x041
  542. #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
  543. #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
  544. static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
  545. {
  546. int pos;
  547. u16 pci_lnk_ctl;
  548. struct iwl_trans_pcie *trans_pcie =
  549. IWL_TRANS_GET_PCIE_TRANS(trans);
  550. struct pci_dev *pci_dev = trans_pcie->pci_dev;
  551. pos = pci_pcie_cap(pci_dev);
  552. pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
  553. return pci_lnk_ctl;
  554. }
  555. static void iwl_apm_config(struct iwl_trans *trans)
  556. {
  557. /*
  558. * HW bug W/A for instability in PCIe bus L0S->L1 transition.
  559. * Check if BIOS (or OS) enabled L1-ASPM on this device.
  560. * If so (likely), disable L0S, so device moves directly L0->L1;
  561. * costs negligible amount of power savings.
  562. * If not (unlikely), enable L0S, so there is at least some
  563. * power savings, even without L1.
  564. */
  565. u16 lctl = iwl_pciexp_link_ctrl(trans);
  566. if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
  567. PCI_CFG_LINK_CTRL_VAL_L1_EN) {
  568. /* L1-ASPM enabled; disable(!) L0S */
  569. iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  570. dev_printk(KERN_INFO, trans->dev,
  571. "L1 Enabled; Disabling L0S\n");
  572. } else {
  573. /* L1-ASPM disabled; enable(!) L0S */
  574. iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
  575. dev_printk(KERN_INFO, trans->dev,
  576. "L1 Disabled; Enabling L0S\n");
  577. }
  578. trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
  579. }
  580. /*
  581. * Start up NIC's basic functionality after it has been reset
  582. * (e.g. after platform boot, or shutdown via iwl_apm_stop())
  583. * NOTE: This does not load uCode nor start the embedded processor
  584. */
  585. static int iwl_apm_init(struct iwl_trans *trans)
  586. {
  587. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  588. int ret = 0;
  589. IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
  590. /*
  591. * Use "set_bit" below rather than "write", to preserve any hardware
  592. * bits already set by default after reset.
  593. */
  594. /* Disable L0S exit timer (platform NMI Work/Around) */
  595. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  596. CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  597. /*
  598. * Disable L0s without affecting L1;
  599. * don't wait for ICH L0s (ICH bug W/A)
  600. */
  601. iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
  602. CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  603. /* Set FH wait threshold to maximum (HW error during stress W/A) */
  604. iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
  605. /*
  606. * Enable HAP INTA (interrupt from management bus) to
  607. * wake device's PCI Express link L1a -> L0s
  608. */
  609. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  610. CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  611. iwl_apm_config(trans);
  612. /* Configure analog phase-lock-loop before activating to D0A */
  613. if (cfg(trans)->base_params->pll_cfg_val)
  614. iwl_set_bit(trans, CSR_ANA_PLL_CFG,
  615. cfg(trans)->base_params->pll_cfg_val);
  616. /*
  617. * Set "initialization complete" bit to move adapter from
  618. * D0U* --> D0A* (powered-up active) state.
  619. */
  620. iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  621. /*
  622. * Wait for clock stabilization; once stabilized, access to
  623. * device-internal resources is supported, e.g. iwl_write_prph()
  624. * and accesses to uCode SRAM.
  625. */
  626. ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
  627. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  628. CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  629. if (ret < 0) {
  630. IWL_DEBUG_INFO(trans, "Failed to init the card\n");
  631. goto out;
  632. }
  633. /*
  634. * Enable DMA clock and wait for it to stabilize.
  635. *
  636. * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
  637. * do not disable clocks. This preserves any hardware bits already
  638. * set by default in "CLK_CTRL_REG" after reset.
  639. */
  640. iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
  641. udelay(20);
  642. /* Disable L1-Active */
  643. iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
  644. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  645. set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  646. out:
  647. return ret;
  648. }
  649. static int iwl_apm_stop_master(struct iwl_trans *trans)
  650. {
  651. int ret = 0;
  652. /* stop device's busmaster DMA activity */
  653. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
  654. ret = iwl_poll_bit(trans, CSR_RESET,
  655. CSR_RESET_REG_FLAG_MASTER_DISABLED,
  656. CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  657. if (ret)
  658. IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
  659. IWL_DEBUG_INFO(trans, "stop master\n");
  660. return ret;
  661. }
  662. static void iwl_apm_stop(struct iwl_trans *trans)
  663. {
  664. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  665. IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
  666. clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
  667. /* Stop device's DMA activity */
  668. iwl_apm_stop_master(trans);
  669. /* Reset the entire device */
  670. iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
  671. udelay(10);
  672. /*
  673. * Clear "initialization complete" bit to move adapter from
  674. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
  675. */
  676. iwl_clear_bit(trans, CSR_GP_CNTRL,
  677. CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  678. }
  679. static int iwl_nic_init(struct iwl_trans *trans)
  680. {
  681. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  682. unsigned long flags;
  683. /* nic_init */
  684. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  685. iwl_apm_init(trans);
  686. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  687. iwl_write8(trans, CSR_INT_COALESCING,
  688. IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  689. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  690. iwl_set_pwr_vmain(trans);
  691. iwl_op_mode_nic_config(trans->op_mode);
  692. #ifndef CONFIG_IWLWIFI_IDI
  693. /* Allocate the RX queue, or reset if it is already allocated */
  694. iwl_rx_init(trans);
  695. #endif
  696. /* Allocate or reset and init all Tx and Command queues */
  697. if (iwl_tx_init(trans))
  698. return -ENOMEM;
  699. if (cfg(trans)->base_params->shadow_reg_enable) {
  700. /* enable shadow regs in HW */
  701. iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
  702. 0x800FFFFF);
  703. }
  704. return 0;
  705. }
  706. #define HW_READY_TIMEOUT (50)
  707. /* Note: returns poll_bit return value, which is >= 0 if success */
  708. static int iwl_set_hw_ready(struct iwl_trans *trans)
  709. {
  710. int ret;
  711. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  712. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  713. /* See if we got it */
  714. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  715. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  716. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  717. HW_READY_TIMEOUT);
  718. IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
  719. return ret;
  720. }
  721. /* Note: returns standard 0/-ERROR code */
  722. static int iwl_prepare_card_hw(struct iwl_trans *trans)
  723. {
  724. int ret;
  725. IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
  726. ret = iwl_set_hw_ready(trans);
  727. /* If the card is ready, exit 0 */
  728. if (ret >= 0)
  729. return 0;
  730. /* If HW is not ready, prepare the conditions to check again */
  731. iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
  732. CSR_HW_IF_CONFIG_REG_PREPARE);
  733. ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
  734. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  735. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  736. if (ret < 0)
  737. return ret;
  738. /* HW should be ready by now, check again. */
  739. ret = iwl_set_hw_ready(trans);
  740. if (ret >= 0)
  741. return 0;
  742. return ret;
  743. }
  744. #define IWL_AC_UNSET -1
  745. struct queue_to_fifo_ac {
  746. s8 fifo, ac;
  747. };
  748. static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
  749. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  750. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  751. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  752. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  753. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  754. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  755. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  756. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  757. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  758. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  759. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  760. };
  761. static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
  762. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  763. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  764. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  765. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  766. { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
  767. { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
  768. { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
  769. { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
  770. { IWL_TX_FIFO_BE_IPAN, 2, },
  771. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  772. { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
  773. };
  774. static const u8 iwlagn_bss_ac_to_fifo[] = {
  775. IWL_TX_FIFO_VO,
  776. IWL_TX_FIFO_VI,
  777. IWL_TX_FIFO_BE,
  778. IWL_TX_FIFO_BK,
  779. };
  780. static const u8 iwlagn_bss_ac_to_queue[] = {
  781. 0, 1, 2, 3,
  782. };
  783. static const u8 iwlagn_pan_ac_to_fifo[] = {
  784. IWL_TX_FIFO_VO_IPAN,
  785. IWL_TX_FIFO_VI_IPAN,
  786. IWL_TX_FIFO_BE_IPAN,
  787. IWL_TX_FIFO_BK_IPAN,
  788. };
  789. static const u8 iwlagn_pan_ac_to_queue[] = {
  790. 7, 6, 5, 4,
  791. };
  792. /*
  793. * ucode
  794. */
  795. static int iwl_load_section(struct iwl_trans *trans, const char *name,
  796. const struct fw_desc *image, u32 dst_addr)
  797. {
  798. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  799. dma_addr_t phy_addr = image->p_addr;
  800. u32 byte_cnt = image->len;
  801. int ret;
  802. trans_pcie->ucode_write_complete = false;
  803. iwl_write_direct32(trans,
  804. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  805. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  806. iwl_write_direct32(trans,
  807. FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
  808. iwl_write_direct32(trans,
  809. FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
  810. phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
  811. iwl_write_direct32(trans,
  812. FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  813. (iwl_get_dma_hi_addr(phy_addr)
  814. << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  815. iwl_write_direct32(trans,
  816. FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  817. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  818. 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  819. FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  820. iwl_write_direct32(trans,
  821. FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  822. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  823. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  824. FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  825. IWL_DEBUG_FW(trans, "%s uCode section being loaded...\n", name);
  826. ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
  827. trans_pcie->ucode_write_complete, 5 * HZ);
  828. if (!ret) {
  829. IWL_ERR(trans, "Could not load the %s uCode section\n",
  830. name);
  831. return -ETIMEDOUT;
  832. }
  833. return 0;
  834. }
  835. static int iwl_load_given_ucode(struct iwl_trans *trans,
  836. const struct fw_img *image)
  837. {
  838. int ret = 0;
  839. ret = iwl_load_section(trans, "INST", &image->code,
  840. IWLAGN_RTC_INST_LOWER_BOUND);
  841. if (ret)
  842. return ret;
  843. ret = iwl_load_section(trans, "DATA", &image->data,
  844. IWLAGN_RTC_DATA_LOWER_BOUND);
  845. if (ret)
  846. return ret;
  847. /* Remove all resets to allow NIC to operate */
  848. iwl_write32(trans, CSR_RESET, 0);
  849. return 0;
  850. }
  851. static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
  852. const struct fw_img *fw)
  853. {
  854. int ret;
  855. struct iwl_trans_pcie *trans_pcie =
  856. IWL_TRANS_GET_PCIE_TRANS(trans);
  857. bool hw_rfkill;
  858. trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
  859. trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
  860. trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
  861. trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
  862. trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
  863. trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
  864. /* This may fail if AMT took ownership of the device */
  865. if (iwl_prepare_card_hw(trans)) {
  866. IWL_WARN(trans, "Exit HW not ready\n");
  867. return -EIO;
  868. }
  869. /* If platform's RF_KILL switch is NOT set to KILL */
  870. hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
  871. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  872. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  873. if (hw_rfkill) {
  874. iwl_enable_rfkill_int(trans);
  875. return -ERFKILL;
  876. }
  877. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  878. ret = iwl_nic_init(trans);
  879. if (ret) {
  880. IWL_ERR(trans, "Unable to init nic\n");
  881. return ret;
  882. }
  883. /* make sure rfkill handshake bits are cleared */
  884. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  885. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
  886. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  887. /* clear (again), then enable host interrupts */
  888. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  889. iwl_enable_interrupts(trans);
  890. /* really make sure rfkill handshake bits are cleared */
  891. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  892. iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  893. /* Load the given image to the HW */
  894. return iwl_load_given_ucode(trans, fw);
  895. }
  896. /*
  897. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  898. * must be called under the irq lock and with MAC access
  899. */
  900. static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
  901. {
  902. struct iwl_trans_pcie __maybe_unused *trans_pcie =
  903. IWL_TRANS_GET_PCIE_TRANS(trans);
  904. lockdep_assert_held(&trans_pcie->irq_lock);
  905. iwl_write_prph(trans, SCD_TXFACT, mask);
  906. }
  907. static void iwl_tx_start(struct iwl_trans *trans)
  908. {
  909. const struct queue_to_fifo_ac *queue_to_fifo;
  910. struct iwl_trans_pcie *trans_pcie =
  911. IWL_TRANS_GET_PCIE_TRANS(trans);
  912. u32 a;
  913. unsigned long flags;
  914. int i, chan;
  915. u32 reg_val;
  916. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  917. trans_pcie->scd_base_addr =
  918. iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
  919. a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  920. /* reset conext data memory */
  921. for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  922. a += 4)
  923. iwl_write_targ_mem(trans, a, 0);
  924. /* reset tx status memory */
  925. for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  926. a += 4)
  927. iwl_write_targ_mem(trans, a, 0);
  928. for (; a < trans_pcie->scd_base_addr +
  929. SCD_TRANS_TBL_OFFSET_QUEUE(
  930. cfg(trans)->base_params->num_of_queues);
  931. a += 4)
  932. iwl_write_targ_mem(trans, a, 0);
  933. iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
  934. trans_pcie->scd_bc_tbls.dma >> 10);
  935. /* Enable DMA channel */
  936. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  937. iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  938. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  939. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  940. /* Update FH chicken bits */
  941. reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
  942. iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
  943. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  944. iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
  945. SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
  946. iwl_write_prph(trans, SCD_AGGR_SEL, 0);
  947. /* initiate the queues */
  948. for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
  949. iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
  950. iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
  951. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  952. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  953. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  954. SCD_CONTEXT_QUEUE_OFFSET(i) +
  955. sizeof(u32),
  956. ((SCD_WIN_SIZE <<
  957. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  958. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  959. ((SCD_FRAME_LIMIT <<
  960. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  961. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  962. }
  963. iwl_write_prph(trans, SCD_INTERRUPT_MASK,
  964. IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
  965. /* Activate all Tx DMA/FIFO channels */
  966. iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
  967. /* map queues to FIFOs */
  968. if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
  969. queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
  970. else
  971. queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
  972. iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
  973. /* make sure all queue are not stopped */
  974. memset(&trans_pcie->queue_stopped[0], 0,
  975. sizeof(trans_pcie->queue_stopped));
  976. for (i = 0; i < 4; i++)
  977. atomic_set(&trans_pcie->queue_stop_count[i], 0);
  978. /* reset to 0 to enable all the queue first */
  979. trans_pcie->txq_ctx_active_msk = 0;
  980. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
  981. IWLAGN_FIRST_AMPDU_QUEUE);
  982. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
  983. IWLAGN_FIRST_AMPDU_QUEUE);
  984. for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
  985. int fifo = queue_to_fifo[i].fifo;
  986. int ac = queue_to_fifo[i].ac;
  987. iwl_txq_ctx_activate(trans_pcie, i);
  988. if (fifo == IWL_TX_FIFO_UNUSED)
  989. continue;
  990. if (ac != IWL_AC_UNSET)
  991. iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
  992. iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
  993. fifo, 0);
  994. }
  995. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  996. /* Enable L1-Active */
  997. iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
  998. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  999. }
  1000. static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
  1001. {
  1002. iwl_reset_ict(trans);
  1003. iwl_tx_start(trans);
  1004. }
  1005. /**
  1006. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  1007. */
  1008. static int iwl_trans_tx_stop(struct iwl_trans *trans)
  1009. {
  1010. int ch, txq_id, ret;
  1011. unsigned long flags;
  1012. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1013. /* Turn off all Tx DMA fifos */
  1014. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1015. iwl_trans_txq_set_sched(trans, 0);
  1016. /* Stop each Tx DMA channel, and wait for it to be idle */
  1017. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  1018. iwl_write_direct32(trans,
  1019. FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  1020. ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
  1021. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  1022. 1000);
  1023. if (ret < 0)
  1024. IWL_ERR(trans, "Failing on timeout while stopping"
  1025. " DMA channel %d [0x%08x]", ch,
  1026. iwl_read_direct32(trans,
  1027. FH_TSSR_TX_STATUS_REG));
  1028. }
  1029. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1030. if (!trans_pcie->txq) {
  1031. IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
  1032. return 0;
  1033. }
  1034. /* Unmap DMA from host system and free skb's */
  1035. for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
  1036. txq_id++)
  1037. iwl_tx_queue_unmap(trans, txq_id);
  1038. return 0;
  1039. }
  1040. static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
  1041. {
  1042. unsigned long flags;
  1043. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1044. /* tell the device to stop sending interrupts */
  1045. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1046. iwl_disable_interrupts(trans);
  1047. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1048. /* device going down, Stop using ICT table */
  1049. iwl_disable_ict(trans);
  1050. /*
  1051. * If a HW restart happens during firmware loading,
  1052. * then the firmware loading might call this function
  1053. * and later it might be called again due to the
  1054. * restart. So don't process again if the device is
  1055. * already dead.
  1056. */
  1057. if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
  1058. iwl_trans_tx_stop(trans);
  1059. #ifndef CONFIG_IWLWIFI_IDI
  1060. iwl_trans_rx_stop(trans);
  1061. #endif
  1062. /* Power-down device's busmaster DMA clocks */
  1063. iwl_write_prph(trans, APMG_CLK_DIS_REG,
  1064. APMG_CLK_VAL_DMA_CLK_RQT);
  1065. udelay(5);
  1066. }
  1067. /* Make sure (redundant) we've released our request to stay awake */
  1068. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1069. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1070. /* Stop the device, and put it in low power state */
  1071. iwl_apm_stop(trans);
  1072. /* Upon stop, the APM issues an interrupt if HW RF kill is set.
  1073. * Clean again the interrupt here
  1074. */
  1075. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1076. iwl_disable_interrupts(trans);
  1077. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1078. /* wait to make sure we flush pending tasklet*/
  1079. synchronize_irq(trans_pcie->irq);
  1080. tasklet_kill(&trans_pcie->irq_tasklet);
  1081. cancel_work_sync(&trans_pcie->rx_replenish);
  1082. /* stop and reset the on-board processor */
  1083. iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  1084. }
  1085. static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
  1086. {
  1087. /* let the ucode operate on its own */
  1088. iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
  1089. CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
  1090. iwl_disable_interrupts(trans);
  1091. iwl_clear_bit(trans, CSR_GP_CNTRL,
  1092. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1093. }
  1094. static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
  1095. struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
  1096. u8 sta_id, u8 tid)
  1097. {
  1098. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1099. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1100. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1101. struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  1102. struct iwl_cmd_meta *out_meta;
  1103. struct iwl_tx_queue *txq;
  1104. struct iwl_queue *q;
  1105. dma_addr_t phys_addr = 0;
  1106. dma_addr_t txcmd_phys;
  1107. dma_addr_t scratch_phys;
  1108. u16 len, firstlen, secondlen;
  1109. u8 wait_write_ptr = 0;
  1110. u8 txq_id;
  1111. bool is_agg = false;
  1112. __le16 fc = hdr->frame_control;
  1113. u8 hdr_len = ieee80211_hdrlen(fc);
  1114. u16 __maybe_unused wifi_seq;
  1115. /*
  1116. * Send this frame after DTIM -- there's a special queue
  1117. * reserved for this for contexts that support AP mode.
  1118. */
  1119. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1120. txq_id = trans_pcie->mcast_queue[ctx];
  1121. /*
  1122. * The microcode will clear the more data
  1123. * bit in the last frame it transmits.
  1124. */
  1125. hdr->frame_control |=
  1126. cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1127. } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
  1128. txq_id = IWL_AUX_QUEUE;
  1129. else
  1130. txq_id =
  1131. trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
  1132. /* aggregation is on for this <sta,tid> */
  1133. if (info->flags & IEEE80211_TX_CTL_AMPDU) {
  1134. WARN_ON(tid >= IWL_MAX_TID_COUNT);
  1135. txq_id = trans_pcie->agg_txq[sta_id][tid];
  1136. is_agg = true;
  1137. }
  1138. txq = &trans_pcie->txq[txq_id];
  1139. q = &txq->q;
  1140. spin_lock(&txq->lock);
  1141. /* In AGG mode, the index in the ring must correspond to the WiFi
  1142. * sequence number. This is a HW requirements to help the SCD to parse
  1143. * the BA.
  1144. * Check here that the packets are in the right place on the ring.
  1145. */
  1146. #ifdef CONFIG_IWLWIFI_DEBUG
  1147. wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
  1148. WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
  1149. "Q: %d WiFi Seq %d tfdNum %d",
  1150. txq_id, wifi_seq, q->write_ptr);
  1151. #endif
  1152. /* Set up driver data for this TFD */
  1153. txq->skbs[q->write_ptr] = skb;
  1154. txq->cmd[q->write_ptr] = dev_cmd;
  1155. dev_cmd->hdr.cmd = REPLY_TX;
  1156. dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  1157. INDEX_TO_SEQ(q->write_ptr)));
  1158. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  1159. out_meta = &txq->meta[q->write_ptr];
  1160. /*
  1161. * Use the first empty entry in this queue's command buffer array
  1162. * to contain the Tx command and MAC header concatenated together
  1163. * (payload data will be in another buffer).
  1164. * Size of this varies, due to varying MAC header length.
  1165. * If end is not dword aligned, we'll have 2 extra bytes at the end
  1166. * of the MAC header (device reads on dword boundaries).
  1167. * We'll tell device about this padding later.
  1168. */
  1169. len = sizeof(struct iwl_tx_cmd) +
  1170. sizeof(struct iwl_cmd_header) + hdr_len;
  1171. firstlen = (len + 3) & ~3;
  1172. /* Tell NIC about any 2-byte padding after MAC header */
  1173. if (firstlen != len)
  1174. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  1175. /* Physical address of this Tx command's header (not MAC header!),
  1176. * within command buffer array. */
  1177. txcmd_phys = dma_map_single(trans->dev,
  1178. &dev_cmd->hdr, firstlen,
  1179. DMA_BIDIRECTIONAL);
  1180. if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
  1181. goto out_err;
  1182. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  1183. dma_unmap_len_set(out_meta, len, firstlen);
  1184. if (!ieee80211_has_morefrags(fc)) {
  1185. txq->need_update = 1;
  1186. } else {
  1187. wait_write_ptr = 1;
  1188. txq->need_update = 0;
  1189. }
  1190. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  1191. * if any (802.11 null frames have no payload). */
  1192. secondlen = skb->len - hdr_len;
  1193. if (secondlen > 0) {
  1194. phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
  1195. secondlen, DMA_TO_DEVICE);
  1196. if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
  1197. dma_unmap_single(trans->dev,
  1198. dma_unmap_addr(out_meta, mapping),
  1199. dma_unmap_len(out_meta, len),
  1200. DMA_BIDIRECTIONAL);
  1201. goto out_err;
  1202. }
  1203. }
  1204. /* Attach buffers to TFD */
  1205. iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
  1206. if (secondlen > 0)
  1207. iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
  1208. secondlen, 0);
  1209. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  1210. offsetof(struct iwl_tx_cmd, scratch);
  1211. /* take back ownership of DMA buffer to enable update */
  1212. dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
  1213. DMA_BIDIRECTIONAL);
  1214. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  1215. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  1216. IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
  1217. le16_to_cpu(dev_cmd->hdr.sequence));
  1218. IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  1219. /* Set up entry for this TFD in Tx byte-count array */
  1220. iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
  1221. dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
  1222. DMA_BIDIRECTIONAL);
  1223. trace_iwlwifi_dev_tx(trans->dev,
  1224. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  1225. sizeof(struct iwl_tfd),
  1226. &dev_cmd->hdr, firstlen,
  1227. skb->data + hdr_len, secondlen);
  1228. /* Tell device the write index *just past* this latest filled TFD */
  1229. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  1230. iwl_txq_update_write_ptr(trans, txq);
  1231. /*
  1232. * At this point the frame is "transmitted" successfully
  1233. * and we will get a TX status notification eventually,
  1234. * regardless of the value of ret. "ret" only indicates
  1235. * whether or not we should update the write pointer.
  1236. */
  1237. if (iwl_queue_space(q) < q->high_mark) {
  1238. if (wait_write_ptr) {
  1239. txq->need_update = 1;
  1240. iwl_txq_update_write_ptr(trans, txq);
  1241. } else {
  1242. iwl_stop_queue(trans, txq);
  1243. }
  1244. }
  1245. spin_unlock(&txq->lock);
  1246. return 0;
  1247. out_err:
  1248. spin_unlock(&txq->lock);
  1249. return -1;
  1250. }
  1251. static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
  1252. {
  1253. struct iwl_trans_pcie *trans_pcie =
  1254. IWL_TRANS_GET_PCIE_TRANS(trans);
  1255. int err;
  1256. bool hw_rfkill;
  1257. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  1258. if (!trans_pcie->irq_requested) {
  1259. tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
  1260. iwl_irq_tasklet, (unsigned long)trans);
  1261. iwl_alloc_isr_ict(trans);
  1262. err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
  1263. DRV_NAME, trans);
  1264. if (err) {
  1265. IWL_ERR(trans, "Error allocating IRQ %d\n",
  1266. trans_pcie->irq);
  1267. goto error;
  1268. }
  1269. INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
  1270. trans_pcie->irq_requested = true;
  1271. }
  1272. err = iwl_prepare_card_hw(trans);
  1273. if (err) {
  1274. IWL_ERR(trans, "Error while preparing HW: %d", err);
  1275. goto err_free_irq;
  1276. }
  1277. iwl_apm_init(trans);
  1278. hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
  1279. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  1280. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1281. return err;
  1282. err_free_irq:
  1283. free_irq(trans_pcie->irq, trans);
  1284. error:
  1285. iwl_free_isr_ict(trans);
  1286. tasklet_kill(&trans_pcie->irq_tasklet);
  1287. return err;
  1288. }
  1289. static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
  1290. {
  1291. iwl_apm_stop(trans);
  1292. iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  1293. /* Even if we stop the HW, we still want the RF kill interrupt */
  1294. iwl_enable_rfkill_int(trans);
  1295. }
  1296. static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
  1297. int txq_id, int ssn, struct sk_buff_head *skbs)
  1298. {
  1299. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1300. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  1301. /* n_bd is usually 256 => n_bd - 1 = 0xff */
  1302. int tfd_num = ssn & (txq->q.n_bd - 1);
  1303. int freed = 0;
  1304. spin_lock(&txq->lock);
  1305. txq->time_stamp = jiffies;
  1306. if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
  1307. tid != IWL_TID_NON_QOS &&
  1308. txq_id != trans_pcie->agg_txq[sta_id][tid])) {
  1309. /*
  1310. * FIXME: this is a uCode bug which need to be addressed,
  1311. * log the information and return for now.
  1312. * Since it is can possibly happen very often and in order
  1313. * not to fill the syslog, don't use IWL_ERR or IWL_WARN
  1314. */
  1315. IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
  1316. "agg_txq[sta_id[tid] %d", txq_id,
  1317. trans_pcie->agg_txq[sta_id][tid]);
  1318. spin_unlock(&txq->lock);
  1319. return 1;
  1320. }
  1321. if (txq->q.read_ptr != tfd_num) {
  1322. IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
  1323. txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
  1324. tfd_num, ssn);
  1325. freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
  1326. if (iwl_queue_space(&txq->q) > txq->q.low_mark)
  1327. iwl_wake_queue(trans, txq);
  1328. }
  1329. spin_unlock(&txq->lock);
  1330. return 0;
  1331. }
  1332. static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
  1333. {
  1334. writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1335. }
  1336. static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
  1337. {
  1338. writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1339. }
  1340. static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
  1341. {
  1342. return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
  1343. }
  1344. static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  1345. const struct iwl_trans_config *trans_cfg)
  1346. {
  1347. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1348. trans_pcie->cmd_queue = trans_cfg->cmd_queue;
  1349. if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
  1350. trans_pcie->n_no_reclaim_cmds = 0;
  1351. else
  1352. trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
  1353. if (trans_pcie->n_no_reclaim_cmds)
  1354. memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
  1355. trans_pcie->n_no_reclaim_cmds * sizeof(u8));
  1356. }
  1357. static void iwl_trans_pcie_free(struct iwl_trans *trans)
  1358. {
  1359. struct iwl_trans_pcie *trans_pcie =
  1360. IWL_TRANS_GET_PCIE_TRANS(trans);
  1361. iwl_trans_pcie_tx_free(trans);
  1362. #ifndef CONFIG_IWLWIFI_IDI
  1363. iwl_trans_pcie_rx_free(trans);
  1364. #endif
  1365. if (trans_pcie->irq_requested == true) {
  1366. free_irq(trans_pcie->irq, trans);
  1367. iwl_free_isr_ict(trans);
  1368. }
  1369. pci_disable_msi(trans_pcie->pci_dev);
  1370. iounmap(trans_pcie->hw_base);
  1371. pci_release_regions(trans_pcie->pci_dev);
  1372. pci_disable_device(trans_pcie->pci_dev);
  1373. trans->shrd->trans = NULL;
  1374. kfree(trans);
  1375. }
  1376. #ifdef CONFIG_PM_SLEEP
  1377. static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
  1378. {
  1379. return 0;
  1380. }
  1381. static int iwl_trans_pcie_resume(struct iwl_trans *trans)
  1382. {
  1383. bool hw_rfkill;
  1384. hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
  1385. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  1386. if (hw_rfkill)
  1387. iwl_enable_rfkill_int(trans);
  1388. else
  1389. iwl_enable_interrupts(trans);
  1390. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  1391. return 0;
  1392. }
  1393. #endif /* CONFIG_PM_SLEEP */
  1394. #define IWL_FLUSH_WAIT_MS 2000
  1395. static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
  1396. {
  1397. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1398. struct iwl_tx_queue *txq;
  1399. struct iwl_queue *q;
  1400. int cnt;
  1401. unsigned long now = jiffies;
  1402. int ret = 0;
  1403. /* waiting for all the tx frames complete might take a while */
  1404. for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
  1405. if (cnt == trans_pcie->cmd_queue)
  1406. continue;
  1407. txq = &trans_pcie->txq[cnt];
  1408. q = &txq->q;
  1409. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  1410. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  1411. msleep(1);
  1412. if (q->read_ptr != q->write_ptr) {
  1413. IWL_ERR(trans, "fail to flush all tx fifo queues\n");
  1414. ret = -ETIMEDOUT;
  1415. break;
  1416. }
  1417. }
  1418. return ret;
  1419. }
  1420. /*
  1421. * On every watchdog tick we check (latest) time stamp. If it does not
  1422. * change during timeout period and queue is not empty we reset firmware.
  1423. */
  1424. static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
  1425. {
  1426. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1427. struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
  1428. struct iwl_queue *q = &txq->q;
  1429. unsigned long timeout;
  1430. if (q->read_ptr == q->write_ptr) {
  1431. txq->time_stamp = jiffies;
  1432. return 0;
  1433. }
  1434. timeout = txq->time_stamp +
  1435. msecs_to_jiffies(hw_params(trans).wd_timeout);
  1436. if (time_after(jiffies, timeout)) {
  1437. IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
  1438. hw_params(trans).wd_timeout);
  1439. IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
  1440. q->read_ptr, q->write_ptr);
  1441. IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
  1442. iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
  1443. & (TFD_QUEUE_SIZE_MAX - 1),
  1444. iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
  1445. return 1;
  1446. }
  1447. return 0;
  1448. }
  1449. static const char *get_fh_string(int cmd)
  1450. {
  1451. switch (cmd) {
  1452. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  1453. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  1454. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  1455. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  1456. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  1457. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  1458. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  1459. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  1460. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  1461. default:
  1462. return "UNKNOWN";
  1463. }
  1464. }
  1465. int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
  1466. {
  1467. int i;
  1468. #ifdef CONFIG_IWLWIFI_DEBUG
  1469. int pos = 0;
  1470. size_t bufsz = 0;
  1471. #endif
  1472. static const u32 fh_tbl[] = {
  1473. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  1474. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  1475. FH_RSCSR_CHNL0_WPTR,
  1476. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  1477. FH_MEM_RSSR_SHARED_CTRL_REG,
  1478. FH_MEM_RSSR_RX_STATUS_REG,
  1479. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  1480. FH_TSSR_TX_STATUS_REG,
  1481. FH_TSSR_TX_ERROR_REG
  1482. };
  1483. #ifdef CONFIG_IWLWIFI_DEBUG
  1484. if (display) {
  1485. bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  1486. *buf = kmalloc(bufsz, GFP_KERNEL);
  1487. if (!*buf)
  1488. return -ENOMEM;
  1489. pos += scnprintf(*buf + pos, bufsz - pos,
  1490. "FH register values:\n");
  1491. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1492. pos += scnprintf(*buf + pos, bufsz - pos,
  1493. " %34s: 0X%08x\n",
  1494. get_fh_string(fh_tbl[i]),
  1495. iwl_read_direct32(trans, fh_tbl[i]));
  1496. }
  1497. return pos;
  1498. }
  1499. #endif
  1500. IWL_ERR(trans, "FH register values:\n");
  1501. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  1502. IWL_ERR(trans, " %34s: 0X%08x\n",
  1503. get_fh_string(fh_tbl[i]),
  1504. iwl_read_direct32(trans, fh_tbl[i]));
  1505. }
  1506. return 0;
  1507. }
  1508. static const char *get_csr_string(int cmd)
  1509. {
  1510. switch (cmd) {
  1511. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1512. IWL_CMD(CSR_INT_COALESCING);
  1513. IWL_CMD(CSR_INT);
  1514. IWL_CMD(CSR_INT_MASK);
  1515. IWL_CMD(CSR_FH_INT_STATUS);
  1516. IWL_CMD(CSR_GPIO_IN);
  1517. IWL_CMD(CSR_RESET);
  1518. IWL_CMD(CSR_GP_CNTRL);
  1519. IWL_CMD(CSR_HW_REV);
  1520. IWL_CMD(CSR_EEPROM_REG);
  1521. IWL_CMD(CSR_EEPROM_GP);
  1522. IWL_CMD(CSR_OTP_GP_REG);
  1523. IWL_CMD(CSR_GIO_REG);
  1524. IWL_CMD(CSR_GP_UCODE_REG);
  1525. IWL_CMD(CSR_GP_DRIVER_REG);
  1526. IWL_CMD(CSR_UCODE_DRV_GP1);
  1527. IWL_CMD(CSR_UCODE_DRV_GP2);
  1528. IWL_CMD(CSR_LED_REG);
  1529. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  1530. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  1531. IWL_CMD(CSR_ANA_PLL_CFG);
  1532. IWL_CMD(CSR_HW_REV_WA_REG);
  1533. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  1534. default:
  1535. return "UNKNOWN";
  1536. }
  1537. }
  1538. void iwl_dump_csr(struct iwl_trans *trans)
  1539. {
  1540. int i;
  1541. static const u32 csr_tbl[] = {
  1542. CSR_HW_IF_CONFIG_REG,
  1543. CSR_INT_COALESCING,
  1544. CSR_INT,
  1545. CSR_INT_MASK,
  1546. CSR_FH_INT_STATUS,
  1547. CSR_GPIO_IN,
  1548. CSR_RESET,
  1549. CSR_GP_CNTRL,
  1550. CSR_HW_REV,
  1551. CSR_EEPROM_REG,
  1552. CSR_EEPROM_GP,
  1553. CSR_OTP_GP_REG,
  1554. CSR_GIO_REG,
  1555. CSR_GP_UCODE_REG,
  1556. CSR_GP_DRIVER_REG,
  1557. CSR_UCODE_DRV_GP1,
  1558. CSR_UCODE_DRV_GP2,
  1559. CSR_LED_REG,
  1560. CSR_DRAM_INT_TBL_REG,
  1561. CSR_GIO_CHICKEN_BITS,
  1562. CSR_ANA_PLL_CFG,
  1563. CSR_HW_REV_WA_REG,
  1564. CSR_DBG_HPET_MEM_REG
  1565. };
  1566. IWL_ERR(trans, "CSR values:\n");
  1567. IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
  1568. "CSR_INT_PERIODIC_REG)\n");
  1569. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  1570. IWL_ERR(trans, " %25s: 0X%08x\n",
  1571. get_csr_string(csr_tbl[i]),
  1572. iwl_read32(trans, csr_tbl[i]));
  1573. }
  1574. }
  1575. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1576. /* create and remove of files */
  1577. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  1578. if (!debugfs_create_file(#name, mode, parent, trans, \
  1579. &iwl_dbgfs_##name##_ops)) \
  1580. return -ENOMEM; \
  1581. } while (0)
  1582. /* file operation */
  1583. #define DEBUGFS_READ_FUNC(name) \
  1584. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  1585. char __user *user_buf, \
  1586. size_t count, loff_t *ppos);
  1587. #define DEBUGFS_WRITE_FUNC(name) \
  1588. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  1589. const char __user *user_buf, \
  1590. size_t count, loff_t *ppos);
  1591. static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
  1592. {
  1593. file->private_data = inode->i_private;
  1594. return 0;
  1595. }
  1596. #define DEBUGFS_READ_FILE_OPS(name) \
  1597. DEBUGFS_READ_FUNC(name); \
  1598. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1599. .read = iwl_dbgfs_##name##_read, \
  1600. .open = iwl_dbgfs_open_file_generic, \
  1601. .llseek = generic_file_llseek, \
  1602. };
  1603. #define DEBUGFS_WRITE_FILE_OPS(name) \
  1604. DEBUGFS_WRITE_FUNC(name); \
  1605. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1606. .write = iwl_dbgfs_##name##_write, \
  1607. .open = iwl_dbgfs_open_file_generic, \
  1608. .llseek = generic_file_llseek, \
  1609. };
  1610. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1611. DEBUGFS_READ_FUNC(name); \
  1612. DEBUGFS_WRITE_FUNC(name); \
  1613. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1614. .write = iwl_dbgfs_##name##_write, \
  1615. .read = iwl_dbgfs_##name##_read, \
  1616. .open = iwl_dbgfs_open_file_generic, \
  1617. .llseek = generic_file_llseek, \
  1618. };
  1619. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1620. char __user *user_buf,
  1621. size_t count, loff_t *ppos)
  1622. {
  1623. struct iwl_trans *trans = file->private_data;
  1624. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1625. struct iwl_tx_queue *txq;
  1626. struct iwl_queue *q;
  1627. char *buf;
  1628. int pos = 0;
  1629. int cnt;
  1630. int ret;
  1631. size_t bufsz;
  1632. bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
  1633. if (!trans_pcie->txq) {
  1634. IWL_ERR(trans, "txq not ready\n");
  1635. return -EAGAIN;
  1636. }
  1637. buf = kzalloc(bufsz, GFP_KERNEL);
  1638. if (!buf)
  1639. return -ENOMEM;
  1640. for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
  1641. txq = &trans_pcie->txq[cnt];
  1642. q = &txq->q;
  1643. pos += scnprintf(buf + pos, bufsz - pos,
  1644. "hwq %.2d: read=%u write=%u stop=%d"
  1645. " swq_id=%#.2x (ac %d/hwq %d)\n",
  1646. cnt, q->read_ptr, q->write_ptr,
  1647. !!test_bit(cnt, trans_pcie->queue_stopped),
  1648. txq->swq_id, txq->swq_id & 3,
  1649. (txq->swq_id >> 2) & 0x1f);
  1650. if (cnt >= 4)
  1651. continue;
  1652. /* for the ACs, display the stop count too */
  1653. pos += scnprintf(buf + pos, bufsz - pos,
  1654. " stop-count: %d\n",
  1655. atomic_read(&trans_pcie->queue_stop_count[cnt]));
  1656. }
  1657. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1658. kfree(buf);
  1659. return ret;
  1660. }
  1661. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1662. char __user *user_buf,
  1663. size_t count, loff_t *ppos) {
  1664. struct iwl_trans *trans = file->private_data;
  1665. struct iwl_trans_pcie *trans_pcie =
  1666. IWL_TRANS_GET_PCIE_TRANS(trans);
  1667. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1668. char buf[256];
  1669. int pos = 0;
  1670. const size_t bufsz = sizeof(buf);
  1671. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  1672. rxq->read);
  1673. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  1674. rxq->write);
  1675. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  1676. rxq->free_count);
  1677. if (rxq->rb_stts) {
  1678. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  1679. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  1680. } else {
  1681. pos += scnprintf(buf + pos, bufsz - pos,
  1682. "closed_rb_num: Not Allocated\n");
  1683. }
  1684. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1685. }
  1686. static ssize_t iwl_dbgfs_log_event_read(struct file *file,
  1687. char __user *user_buf,
  1688. size_t count, loff_t *ppos)
  1689. {
  1690. struct iwl_trans *trans = file->private_data;
  1691. char *buf;
  1692. int pos = 0;
  1693. ssize_t ret = -ENOMEM;
  1694. ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
  1695. if (buf) {
  1696. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1697. kfree(buf);
  1698. }
  1699. return ret;
  1700. }
  1701. static ssize_t iwl_dbgfs_log_event_write(struct file *file,
  1702. const char __user *user_buf,
  1703. size_t count, loff_t *ppos)
  1704. {
  1705. struct iwl_trans *trans = file->private_data;
  1706. u32 event_log_flag;
  1707. char buf[8];
  1708. int buf_size;
  1709. memset(buf, 0, sizeof(buf));
  1710. buf_size = min(count, sizeof(buf) - 1);
  1711. if (copy_from_user(buf, user_buf, buf_size))
  1712. return -EFAULT;
  1713. if (sscanf(buf, "%d", &event_log_flag) != 1)
  1714. return -EFAULT;
  1715. if (event_log_flag == 1)
  1716. iwl_dump_nic_event_log(trans, true, NULL, false);
  1717. return count;
  1718. }
  1719. static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
  1720. char __user *user_buf,
  1721. size_t count, loff_t *ppos) {
  1722. struct iwl_trans *trans = file->private_data;
  1723. struct iwl_trans_pcie *trans_pcie =
  1724. IWL_TRANS_GET_PCIE_TRANS(trans);
  1725. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1726. int pos = 0;
  1727. char *buf;
  1728. int bufsz = 24 * 64; /* 24 items * 64 char per item */
  1729. ssize_t ret;
  1730. buf = kzalloc(bufsz, GFP_KERNEL);
  1731. if (!buf) {
  1732. IWL_ERR(trans, "Can not allocate Buffer\n");
  1733. return -ENOMEM;
  1734. }
  1735. pos += scnprintf(buf + pos, bufsz - pos,
  1736. "Interrupt Statistics Report:\n");
  1737. pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
  1738. isr_stats->hw);
  1739. pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
  1740. isr_stats->sw);
  1741. if (isr_stats->sw || isr_stats->hw) {
  1742. pos += scnprintf(buf + pos, bufsz - pos,
  1743. "\tLast Restarting Code: 0x%X\n",
  1744. isr_stats->err_code);
  1745. }
  1746. #ifdef CONFIG_IWLWIFI_DEBUG
  1747. pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
  1748. isr_stats->sch);
  1749. pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
  1750. isr_stats->alive);
  1751. #endif
  1752. pos += scnprintf(buf + pos, bufsz - pos,
  1753. "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
  1754. pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
  1755. isr_stats->ctkill);
  1756. pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
  1757. isr_stats->wakeup);
  1758. pos += scnprintf(buf + pos, bufsz - pos,
  1759. "Rx command responses:\t\t %u\n", isr_stats->rx);
  1760. pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
  1761. isr_stats->tx);
  1762. pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
  1763. isr_stats->unhandled);
  1764. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1765. kfree(buf);
  1766. return ret;
  1767. }
  1768. static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  1769. const char __user *user_buf,
  1770. size_t count, loff_t *ppos)
  1771. {
  1772. struct iwl_trans *trans = file->private_data;
  1773. struct iwl_trans_pcie *trans_pcie =
  1774. IWL_TRANS_GET_PCIE_TRANS(trans);
  1775. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  1776. char buf[8];
  1777. int buf_size;
  1778. u32 reset_flag;
  1779. memset(buf, 0, sizeof(buf));
  1780. buf_size = min(count, sizeof(buf) - 1);
  1781. if (copy_from_user(buf, user_buf, buf_size))
  1782. return -EFAULT;
  1783. if (sscanf(buf, "%x", &reset_flag) != 1)
  1784. return -EFAULT;
  1785. if (reset_flag == 0)
  1786. memset(isr_stats, 0, sizeof(*isr_stats));
  1787. return count;
  1788. }
  1789. static ssize_t iwl_dbgfs_csr_write(struct file *file,
  1790. const char __user *user_buf,
  1791. size_t count, loff_t *ppos)
  1792. {
  1793. struct iwl_trans *trans = file->private_data;
  1794. char buf[8];
  1795. int buf_size;
  1796. int csr;
  1797. memset(buf, 0, sizeof(buf));
  1798. buf_size = min(count, sizeof(buf) - 1);
  1799. if (copy_from_user(buf, user_buf, buf_size))
  1800. return -EFAULT;
  1801. if (sscanf(buf, "%d", &csr) != 1)
  1802. return -EFAULT;
  1803. iwl_dump_csr(trans);
  1804. return count;
  1805. }
  1806. static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
  1807. char __user *user_buf,
  1808. size_t count, loff_t *ppos)
  1809. {
  1810. struct iwl_trans *trans = file->private_data;
  1811. char *buf;
  1812. int pos = 0;
  1813. ssize_t ret = -EFAULT;
  1814. ret = pos = iwl_dump_fh(trans, &buf, true);
  1815. if (buf) {
  1816. ret = simple_read_from_buffer(user_buf,
  1817. count, ppos, buf, pos);
  1818. kfree(buf);
  1819. }
  1820. return ret;
  1821. }
  1822. DEBUGFS_READ_WRITE_FILE_OPS(log_event);
  1823. DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
  1824. DEBUGFS_READ_FILE_OPS(fh_reg);
  1825. DEBUGFS_READ_FILE_OPS(rx_queue);
  1826. DEBUGFS_READ_FILE_OPS(tx_queue);
  1827. DEBUGFS_WRITE_FILE_OPS(csr);
  1828. /*
  1829. * Create the debugfs files and directories
  1830. *
  1831. */
  1832. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1833. struct dentry *dir)
  1834. {
  1835. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1836. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1837. DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
  1838. DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
  1839. DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
  1840. DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
  1841. return 0;
  1842. }
  1843. #else
  1844. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1845. struct dentry *dir)
  1846. { return 0; }
  1847. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1848. const struct iwl_trans_ops trans_ops_pcie = {
  1849. .start_hw = iwl_trans_pcie_start_hw,
  1850. .stop_hw = iwl_trans_pcie_stop_hw,
  1851. .fw_alive = iwl_trans_pcie_fw_alive,
  1852. .start_fw = iwl_trans_pcie_start_fw,
  1853. .stop_device = iwl_trans_pcie_stop_device,
  1854. .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
  1855. .send_cmd = iwl_trans_pcie_send_cmd,
  1856. .tx = iwl_trans_pcie_tx,
  1857. .reclaim = iwl_trans_pcie_reclaim,
  1858. .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
  1859. .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
  1860. .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
  1861. .free = iwl_trans_pcie_free,
  1862. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1863. .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
  1864. .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
  1865. #ifdef CONFIG_PM_SLEEP
  1866. .suspend = iwl_trans_pcie_suspend,
  1867. .resume = iwl_trans_pcie_resume,
  1868. #endif
  1869. .write8 = iwl_trans_pcie_write8,
  1870. .write32 = iwl_trans_pcie_write32,
  1871. .read32 = iwl_trans_pcie_read32,
  1872. .configure = iwl_trans_pcie_configure,
  1873. };
  1874. struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
  1875. struct pci_dev *pdev,
  1876. const struct pci_device_id *ent)
  1877. {
  1878. struct iwl_trans_pcie *trans_pcie;
  1879. struct iwl_trans *trans;
  1880. u16 pci_cmd;
  1881. int err;
  1882. trans = kzalloc(sizeof(struct iwl_trans) +
  1883. sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  1884. if (WARN_ON(!trans))
  1885. return NULL;
  1886. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1887. trans->ops = &trans_ops_pcie;
  1888. trans->shrd = shrd;
  1889. trans_pcie->trans = trans;
  1890. spin_lock_init(&trans_pcie->irq_lock);
  1891. init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  1892. /* W/A - seems to solve weird behavior. We need to remove this if we
  1893. * don't want to stay in L1 all the time. This wastes a lot of power */
  1894. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1895. PCIE_LINK_STATE_CLKPM);
  1896. if (pci_enable_device(pdev)) {
  1897. err = -ENODEV;
  1898. goto out_no_pci;
  1899. }
  1900. pci_set_master(pdev);
  1901. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
  1902. if (!err)
  1903. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
  1904. if (err) {
  1905. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1906. if (!err)
  1907. err = pci_set_consistent_dma_mask(pdev,
  1908. DMA_BIT_MASK(32));
  1909. /* both attempts failed: */
  1910. if (err) {
  1911. dev_printk(KERN_ERR, &pdev->dev,
  1912. "No suitable DMA available.\n");
  1913. goto out_pci_disable_device;
  1914. }
  1915. }
  1916. err = pci_request_regions(pdev, DRV_NAME);
  1917. if (err) {
  1918. dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
  1919. goto out_pci_disable_device;
  1920. }
  1921. trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
  1922. if (!trans_pcie->hw_base) {
  1923. dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
  1924. err = -ENODEV;
  1925. goto out_pci_release_regions;
  1926. }
  1927. dev_printk(KERN_INFO, &pdev->dev,
  1928. "pci_resource_len = 0x%08llx\n",
  1929. (unsigned long long) pci_resource_len(pdev, 0));
  1930. dev_printk(KERN_INFO, &pdev->dev,
  1931. "pci_resource_base = %p\n", trans_pcie->hw_base);
  1932. dev_printk(KERN_INFO, &pdev->dev,
  1933. "HW Revision ID = 0x%X\n", pdev->revision);
  1934. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  1935. * PCI Tx retries from interfering with C3 CPU state */
  1936. pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  1937. err = pci_enable_msi(pdev);
  1938. if (err)
  1939. dev_printk(KERN_ERR, &pdev->dev,
  1940. "pci_enable_msi failed(0X%x)", err);
  1941. trans->dev = &pdev->dev;
  1942. trans_pcie->irq = pdev->irq;
  1943. trans_pcie->pci_dev = pdev;
  1944. trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  1945. trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  1946. snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
  1947. "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
  1948. /* TODO: Move this away, not needed if not MSI */
  1949. /* enable rfkill interrupt: hw bug w/a */
  1950. pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
  1951. if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
  1952. pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
  1953. pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
  1954. }
  1955. /* Initialize the wait queue for commands */
  1956. init_waitqueue_head(&trans->wait_command_queue);
  1957. return trans;
  1958. out_pci_release_regions:
  1959. pci_release_regions(pdev);
  1960. out_pci_disable_device:
  1961. pci_disable_device(pdev);
  1962. out_no_pci:
  1963. kfree(trans);
  1964. return NULL;
  1965. }