iwl-trans.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/interrupt.h>
  64. #include <linux/debugfs.h>
  65. #include "iwl-dev.h"
  66. #include "iwl-trans.h"
  67. #include "iwl-core.h"
  68. #include "iwl-helpers.h"
  69. #include "iwl-trans-int-pcie.h"
  70. /*TODO remove uneeded includes when the transport layer tx_free will be here */
  71. #include "iwl-agn.h"
  72. #include "iwl-core.h"
  73. #include "iwl-shared.h"
  74. static int iwl_trans_rx_alloc(struct iwl_trans *trans)
  75. {
  76. struct iwl_trans_pcie *trans_pcie =
  77. IWL_TRANS_GET_PCIE_TRANS(trans);
  78. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  79. struct device *dev = bus(trans)->dev;
  80. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  81. spin_lock_init(&rxq->lock);
  82. INIT_LIST_HEAD(&rxq->rx_free);
  83. INIT_LIST_HEAD(&rxq->rx_used);
  84. if (WARN_ON(rxq->bd || rxq->rb_stts))
  85. return -EINVAL;
  86. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  87. rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  88. &rxq->bd_dma, GFP_KERNEL);
  89. if (!rxq->bd)
  90. goto err_bd;
  91. memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
  92. /*Allocate the driver's pointer to receive buffer status */
  93. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
  94. &rxq->rb_stts_dma, GFP_KERNEL);
  95. if (!rxq->rb_stts)
  96. goto err_rb_stts;
  97. memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
  98. return 0;
  99. err_rb_stts:
  100. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  101. rxq->bd, rxq->bd_dma);
  102. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  103. rxq->bd = NULL;
  104. err_bd:
  105. return -ENOMEM;
  106. }
  107. static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
  108. {
  109. struct iwl_trans_pcie *trans_pcie =
  110. IWL_TRANS_GET_PCIE_TRANS(trans);
  111. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  112. int i;
  113. /* Fill the rx_used queue with _all_ of the Rx buffers */
  114. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  115. /* In the reset function, these buffers may have been allocated
  116. * to an SKB, so we need to unmap and free potential storage */
  117. if (rxq->pool[i].page != NULL) {
  118. dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
  119. PAGE_SIZE << hw_params(trans).rx_page_order,
  120. DMA_FROM_DEVICE);
  121. __iwl_free_pages(priv(trans), rxq->pool[i].page);
  122. rxq->pool[i].page = NULL;
  123. }
  124. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  125. }
  126. }
  127. static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
  128. struct iwl_rx_queue *rxq)
  129. {
  130. u32 rb_size;
  131. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  132. u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
  133. rb_timeout = RX_RB_TIMEOUT;
  134. if (iwlagn_mod_params.amsdu_size_8K)
  135. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  136. else
  137. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  138. /* Stop Rx DMA */
  139. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  140. /* Reset driver's Rx queue write index */
  141. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  142. /* Tell device where to find RBD circular buffer in DRAM */
  143. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  144. (u32)(rxq->bd_dma >> 8));
  145. /* Tell device where in DRAM to update its Rx status */
  146. iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  147. rxq->rb_stts_dma >> 4);
  148. /* Enable Rx DMA
  149. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  150. * the credit mechanism in 5000 HW RX FIFO
  151. * Direct rx interrupts to hosts
  152. * Rx buffer size 4 or 8k
  153. * RB timeout 0x10
  154. * 256 RBDs
  155. */
  156. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  157. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  158. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  159. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  160. FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
  161. rb_size|
  162. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  163. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  164. /* Set interrupt coalescing timer to default (2048 usecs) */
  165. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  166. }
  167. static int iwl_rx_init(struct iwl_trans *trans)
  168. {
  169. struct iwl_trans_pcie *trans_pcie =
  170. IWL_TRANS_GET_PCIE_TRANS(trans);
  171. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  172. int i, err;
  173. unsigned long flags;
  174. if (!rxq->bd) {
  175. err = iwl_trans_rx_alloc(trans);
  176. if (err)
  177. return err;
  178. }
  179. spin_lock_irqsave(&rxq->lock, flags);
  180. INIT_LIST_HEAD(&rxq->rx_free);
  181. INIT_LIST_HEAD(&rxq->rx_used);
  182. iwl_trans_rxq_free_rx_bufs(trans);
  183. for (i = 0; i < RX_QUEUE_SIZE; i++)
  184. rxq->queue[i] = NULL;
  185. /* Set us so that we have processed and used all buffers, but have
  186. * not restocked the Rx queue with fresh buffers */
  187. rxq->read = rxq->write = 0;
  188. rxq->write_actual = 0;
  189. rxq->free_count = 0;
  190. spin_unlock_irqrestore(&rxq->lock, flags);
  191. iwlagn_rx_replenish(trans);
  192. iwl_trans_rx_hw_init(priv(trans), rxq);
  193. spin_lock_irqsave(&trans->shrd->lock, flags);
  194. rxq->need_update = 1;
  195. iwl_rx_queue_update_write_ptr(trans, rxq);
  196. spin_unlock_irqrestore(&trans->shrd->lock, flags);
  197. return 0;
  198. }
  199. static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
  200. {
  201. struct iwl_trans_pcie *trans_pcie =
  202. IWL_TRANS_GET_PCIE_TRANS(trans);
  203. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  204. unsigned long flags;
  205. /*if rxq->bd is NULL, it means that nothing has been allocated,
  206. * exit now */
  207. if (!rxq->bd) {
  208. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  209. return;
  210. }
  211. spin_lock_irqsave(&rxq->lock, flags);
  212. iwl_trans_rxq_free_rx_bufs(trans);
  213. spin_unlock_irqrestore(&rxq->lock, flags);
  214. dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  215. rxq->bd, rxq->bd_dma);
  216. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  217. rxq->bd = NULL;
  218. if (rxq->rb_stts)
  219. dma_free_coherent(bus(trans)->dev,
  220. sizeof(struct iwl_rb_status),
  221. rxq->rb_stts, rxq->rb_stts_dma);
  222. else
  223. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  224. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  225. rxq->rb_stts = NULL;
  226. }
  227. static int iwl_trans_rx_stop(struct iwl_priv *priv)
  228. {
  229. /* stop Rx DMA */
  230. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  231. return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
  232. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  233. }
  234. static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
  235. struct iwl_dma_ptr *ptr, size_t size)
  236. {
  237. if (WARN_ON(ptr->addr))
  238. return -EINVAL;
  239. ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
  240. &ptr->dma, GFP_KERNEL);
  241. if (!ptr->addr)
  242. return -ENOMEM;
  243. ptr->size = size;
  244. return 0;
  245. }
  246. static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
  247. struct iwl_dma_ptr *ptr)
  248. {
  249. if (unlikely(!ptr->addr))
  250. return;
  251. dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
  252. memset(ptr, 0, sizeof(*ptr));
  253. }
  254. static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  255. int slots_num, u32 txq_id)
  256. {
  257. size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
  258. int i;
  259. if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
  260. return -EINVAL;
  261. txq->q.n_window = slots_num;
  262. txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
  263. GFP_KERNEL);
  264. txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
  265. GFP_KERNEL);
  266. if (!txq->meta || !txq->cmd)
  267. goto error;
  268. for (i = 0; i < slots_num; i++) {
  269. txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
  270. GFP_KERNEL);
  271. if (!txq->cmd[i])
  272. goto error;
  273. }
  274. /* Alloc driver data array and TFD circular buffer */
  275. /* Driver private data, only for Tx (not command) queues,
  276. * not shared with device. */
  277. if (txq_id != priv->shrd->cmd_queue) {
  278. txq->txb = kzalloc(sizeof(txq->txb[0]) *
  279. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  280. if (!txq->txb) {
  281. IWL_ERR(priv, "kmalloc for auxiliary BD "
  282. "structures failed\n");
  283. goto error;
  284. }
  285. } else {
  286. txq->txb = NULL;
  287. }
  288. /* Circular buffer of transmit frame descriptors (TFDs),
  289. * shared with device */
  290. txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
  291. GFP_KERNEL);
  292. if (!txq->tfds) {
  293. IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  294. goto error;
  295. }
  296. txq->q.id = txq_id;
  297. return 0;
  298. error:
  299. kfree(txq->txb);
  300. txq->txb = NULL;
  301. /* since txq->cmd has been zeroed,
  302. * all non allocated cmd[i] will be NULL */
  303. if (txq->cmd)
  304. for (i = 0; i < slots_num; i++)
  305. kfree(txq->cmd[i]);
  306. kfree(txq->meta);
  307. kfree(txq->cmd);
  308. txq->meta = NULL;
  309. txq->cmd = NULL;
  310. return -ENOMEM;
  311. }
  312. static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  313. int slots_num, u32 txq_id)
  314. {
  315. int ret;
  316. txq->need_update = 0;
  317. memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
  318. /*
  319. * For the default queues 0-3, set up the swq_id
  320. * already -- all others need to get one later
  321. * (if they need one at all).
  322. */
  323. if (txq_id < 4)
  324. iwl_set_swq_id(txq, txq_id, txq_id);
  325. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  326. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  327. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  328. /* Initialize queue's high/low-water marks, and head/tail indexes */
  329. ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  330. txq_id);
  331. if (ret)
  332. return ret;
  333. /*
  334. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  335. * given Tx queue, and enable the DMA channel used for that queue.
  336. * Circular buffer (TFD queue in DRAM) physical base address */
  337. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  338. txq->q.dma_addr >> 8);
  339. return 0;
  340. }
  341. /**
  342. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  343. */
  344. static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
  345. {
  346. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  347. struct iwl_queue *q = &txq->q;
  348. if (!q->n_bd)
  349. return;
  350. while (q->write_ptr != q->read_ptr) {
  351. /* The read_ptr needs to bound by q->n_window */
  352. iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
  353. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  354. }
  355. }
  356. /**
  357. * iwl_tx_queue_free - Deallocate DMA queue.
  358. * @txq: Transmit queue to deallocate.
  359. *
  360. * Empty queue by removing and destroying all BD's.
  361. * Free all buffers.
  362. * 0-fill, but do not free "txq" descriptor structure.
  363. */
  364. static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  365. {
  366. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  367. struct device *dev = priv->bus->dev;
  368. int i;
  369. if (WARN_ON(!txq))
  370. return;
  371. iwl_tx_queue_unmap(priv, txq_id);
  372. /* De-alloc array of command/tx buffers */
  373. for (i = 0; i < txq->q.n_window; i++)
  374. kfree(txq->cmd[i]);
  375. /* De-alloc circular buffer of TFDs */
  376. if (txq->q.n_bd) {
  377. dma_free_coherent(dev, hw_params(priv).tfd_size *
  378. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  379. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  380. }
  381. /* De-alloc array of per-TFD driver data */
  382. kfree(txq->txb);
  383. txq->txb = NULL;
  384. /* deallocate arrays */
  385. kfree(txq->cmd);
  386. kfree(txq->meta);
  387. txq->cmd = NULL;
  388. txq->meta = NULL;
  389. /* 0-fill queue descriptor structure */
  390. memset(txq, 0, sizeof(*txq));
  391. }
  392. /**
  393. * iwl_trans_tx_free - Free TXQ Context
  394. *
  395. * Destroy all TX DMA queues and structures
  396. */
  397. static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
  398. {
  399. int txq_id;
  400. /* Tx queues */
  401. if (priv->txq) {
  402. for (txq_id = 0;
  403. txq_id < hw_params(priv).max_txq_num; txq_id++)
  404. iwl_tx_queue_free(priv, txq_id);
  405. }
  406. kfree(priv->txq);
  407. priv->txq = NULL;
  408. iwlagn_free_dma_ptr(priv, &priv->kw);
  409. iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
  410. }
  411. /**
  412. * iwl_trans_tx_alloc - allocate TX context
  413. * Allocate all Tx DMA structures and initialize them
  414. *
  415. * @param priv
  416. * @return error code
  417. */
  418. static int iwl_trans_tx_alloc(struct iwl_priv *priv)
  419. {
  420. int ret;
  421. int txq_id, slots_num;
  422. /*It is not allowed to alloc twice, so warn when this happens.
  423. * We cannot rely on the previous allocation, so free and fail */
  424. if (WARN_ON(priv->txq)) {
  425. ret = -EINVAL;
  426. goto error;
  427. }
  428. ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  429. hw_params(priv).scd_bc_tbls_size);
  430. if (ret) {
  431. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  432. goto error;
  433. }
  434. /* Alloc keep-warm buffer */
  435. ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  436. if (ret) {
  437. IWL_ERR(priv, "Keep Warm allocation failed\n");
  438. goto error;
  439. }
  440. priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
  441. priv->cfg->base_params->num_of_queues, GFP_KERNEL);
  442. if (!priv->txq) {
  443. IWL_ERR(priv, "Not enough memory for txq\n");
  444. ret = ENOMEM;
  445. goto error;
  446. }
  447. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  448. for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
  449. slots_num = (txq_id == priv->shrd->cmd_queue) ?
  450. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  451. ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
  452. txq_id);
  453. if (ret) {
  454. IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
  455. goto error;
  456. }
  457. }
  458. return 0;
  459. error:
  460. iwl_trans_tx_free(trans(priv));
  461. return ret;
  462. }
  463. static int iwl_tx_init(struct iwl_priv *priv)
  464. {
  465. int ret;
  466. int txq_id, slots_num;
  467. unsigned long flags;
  468. bool alloc = false;
  469. if (!priv->txq) {
  470. ret = iwl_trans_tx_alloc(priv);
  471. if (ret)
  472. goto error;
  473. alloc = true;
  474. }
  475. spin_lock_irqsave(&priv->shrd->lock, flags);
  476. /* Turn off all Tx DMA fifos */
  477. iwl_write_prph(priv, SCD_TXFACT, 0);
  478. /* Tell NIC where to find the "keep warm" buffer */
  479. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  480. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  481. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  482. for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
  483. slots_num = (txq_id == priv->shrd->cmd_queue) ?
  484. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  485. ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
  486. txq_id);
  487. if (ret) {
  488. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  489. goto error;
  490. }
  491. }
  492. return 0;
  493. error:
  494. /*Upon error, free only if we allocated something */
  495. if (alloc)
  496. iwl_trans_tx_free(trans(priv));
  497. return ret;
  498. }
  499. static void iwl_set_pwr_vmain(struct iwl_priv *priv)
  500. {
  501. /*
  502. * (for documentation purposes)
  503. * to set power to V_AUX, do:
  504. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  505. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  506. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  507. ~APMG_PS_CTRL_MSK_PWR_SRC);
  508. */
  509. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  510. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  511. ~APMG_PS_CTRL_MSK_PWR_SRC);
  512. }
  513. static int iwl_nic_init(struct iwl_priv *priv)
  514. {
  515. unsigned long flags;
  516. /* nic_init */
  517. spin_lock_irqsave(&priv->shrd->lock, flags);
  518. iwl_apm_init(priv);
  519. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  520. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  521. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  522. iwl_set_pwr_vmain(priv);
  523. priv->cfg->lib->nic_config(priv);
  524. /* Allocate the RX queue, or reset if it is already allocated */
  525. iwl_rx_init(trans(priv));
  526. /* Allocate or reset and init all Tx and Command queues */
  527. if (iwl_tx_init(priv))
  528. return -ENOMEM;
  529. if (priv->cfg->base_params->shadow_reg_enable) {
  530. /* enable shadow regs in HW */
  531. iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
  532. 0x800FFFFF);
  533. }
  534. set_bit(STATUS_INIT, &priv->shrd->status);
  535. return 0;
  536. }
  537. #define HW_READY_TIMEOUT (50)
  538. /* Note: returns poll_bit return value, which is >= 0 if success */
  539. static int iwl_set_hw_ready(struct iwl_priv *priv)
  540. {
  541. int ret;
  542. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  543. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  544. /* See if we got it */
  545. ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
  546. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  547. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  548. HW_READY_TIMEOUT);
  549. IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
  550. return ret;
  551. }
  552. /* Note: returns standard 0/-ERROR code */
  553. static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
  554. {
  555. int ret;
  556. IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
  557. ret = iwl_set_hw_ready(priv);
  558. if (ret >= 0)
  559. return 0;
  560. /* If HW is not ready, prepare the conditions to check again */
  561. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  562. CSR_HW_IF_CONFIG_REG_PREPARE);
  563. ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
  564. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  565. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  566. if (ret < 0)
  567. return ret;
  568. /* HW should be ready by now, check again. */
  569. ret = iwl_set_hw_ready(priv);
  570. if (ret >= 0)
  571. return 0;
  572. return ret;
  573. }
  574. static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
  575. {
  576. int ret;
  577. priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
  578. if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
  579. iwl_trans_pcie_prepare_card_hw(priv)) {
  580. IWL_WARN(priv, "Exit HW not ready\n");
  581. return -EIO;
  582. }
  583. /* If platform's RF_KILL switch is NOT set to KILL */
  584. if (iwl_read32(priv, CSR_GP_CNTRL) &
  585. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
  586. clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
  587. else
  588. set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
  589. if (iwl_is_rfkill(priv)) {
  590. wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
  591. iwl_enable_interrupts(priv);
  592. return -ERFKILL;
  593. }
  594. iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
  595. ret = iwl_nic_init(priv);
  596. if (ret) {
  597. IWL_ERR(priv, "Unable to init nic\n");
  598. return ret;
  599. }
  600. /* make sure rfkill handshake bits are cleared */
  601. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  602. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  603. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  604. /* clear (again), then enable host interrupts */
  605. iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
  606. iwl_enable_interrupts(priv);
  607. /* really make sure rfkill handshake bits are cleared */
  608. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  609. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  610. return 0;
  611. }
  612. /*
  613. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  614. * must be called under priv->shrd->lock and mac access
  615. */
  616. static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
  617. {
  618. iwl_write_prph(priv, SCD_TXFACT, mask);
  619. }
  620. #define IWL_AC_UNSET -1
  621. struct queue_to_fifo_ac {
  622. s8 fifo, ac;
  623. };
  624. static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
  625. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  626. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  627. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  628. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  629. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  630. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  631. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  632. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  633. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  634. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  635. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  636. };
  637. static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
  638. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  639. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  640. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  641. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  642. { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
  643. { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
  644. { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
  645. { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
  646. { IWL_TX_FIFO_BE_IPAN, 2, },
  647. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  648. { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
  649. };
  650. static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
  651. {
  652. const struct queue_to_fifo_ac *queue_to_fifo;
  653. struct iwl_rxon_context *ctx;
  654. u32 a;
  655. unsigned long flags;
  656. int i, chan;
  657. u32 reg_val;
  658. spin_lock_irqsave(&priv->shrd->lock, flags);
  659. priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
  660. a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  661. /* reset conext data memory */
  662. for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  663. a += 4)
  664. iwl_write_targ_mem(priv, a, 0);
  665. /* reset tx status memory */
  666. for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  667. a += 4)
  668. iwl_write_targ_mem(priv, a, 0);
  669. for (; a < priv->scd_base_addr +
  670. SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
  671. a += 4)
  672. iwl_write_targ_mem(priv, a, 0);
  673. iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
  674. priv->scd_bc_tbls.dma >> 10);
  675. /* Enable DMA channel */
  676. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  677. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  678. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  679. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  680. /* Update FH chicken bits */
  681. reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
  682. iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
  683. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  684. iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
  685. SCD_QUEUECHAIN_SEL_ALL(priv));
  686. iwl_write_prph(priv, SCD_AGGR_SEL, 0);
  687. /* initiate the queues */
  688. for (i = 0; i < hw_params(priv).max_txq_num; i++) {
  689. iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
  690. iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
  691. iwl_write_targ_mem(priv, priv->scd_base_addr +
  692. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  693. iwl_write_targ_mem(priv, priv->scd_base_addr +
  694. SCD_CONTEXT_QUEUE_OFFSET(i) +
  695. sizeof(u32),
  696. ((SCD_WIN_SIZE <<
  697. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  698. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  699. ((SCD_FRAME_LIMIT <<
  700. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  701. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  702. }
  703. iwl_write_prph(priv, SCD_INTERRUPT_MASK,
  704. IWL_MASK(0, hw_params(priv).max_txq_num));
  705. /* Activate all Tx DMA/FIFO channels */
  706. iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
  707. /* map queues to FIFOs */
  708. if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
  709. queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
  710. else
  711. queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
  712. iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
  713. /* make sure all queue are not stopped */
  714. memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
  715. for (i = 0; i < 4; i++)
  716. atomic_set(&priv->queue_stop_count[i], 0);
  717. for_each_context(priv, ctx)
  718. ctx->last_tx_rejected = false;
  719. /* reset to 0 to enable all the queue first */
  720. priv->txq_ctx_active_msk = 0;
  721. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
  722. IWLAGN_FIRST_AMPDU_QUEUE);
  723. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
  724. IWLAGN_FIRST_AMPDU_QUEUE);
  725. for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
  726. int fifo = queue_to_fifo[i].fifo;
  727. int ac = queue_to_fifo[i].ac;
  728. iwl_txq_ctx_activate(priv, i);
  729. if (fifo == IWL_TX_FIFO_UNUSED)
  730. continue;
  731. if (ac != IWL_AC_UNSET)
  732. iwl_set_swq_id(&priv->txq[i], ac, i);
  733. iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
  734. }
  735. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  736. /* Enable L1-Active */
  737. iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
  738. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  739. }
  740. /**
  741. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  742. */
  743. static int iwl_trans_tx_stop(struct iwl_priv *priv)
  744. {
  745. int ch, txq_id;
  746. unsigned long flags;
  747. /* Turn off all Tx DMA fifos */
  748. spin_lock_irqsave(&priv->shrd->lock, flags);
  749. iwl_trans_txq_set_sched(priv, 0);
  750. /* Stop each Tx DMA channel, and wait for it to be idle */
  751. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  752. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  753. if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  754. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  755. 1000))
  756. IWL_ERR(priv, "Failing on timeout while stopping"
  757. " DMA channel %d [0x%08x]", ch,
  758. iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
  759. }
  760. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  761. if (!priv->txq) {
  762. IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
  763. return 0;
  764. }
  765. /* Unmap DMA from host system and free skb's */
  766. for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
  767. iwl_tx_queue_unmap(priv, txq_id);
  768. return 0;
  769. }
  770. static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
  771. {
  772. unsigned long flags;
  773. /* stop and reset the on-board processor */
  774. iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  775. /* tell the device to stop sending interrupts */
  776. spin_lock_irqsave(&priv->shrd->lock, flags);
  777. iwl_disable_interrupts(priv);
  778. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  779. iwl_trans_sync_irq(trans(priv));
  780. /* device going down, Stop using ICT table */
  781. iwl_disable_ict(priv);
  782. /*
  783. * If a HW restart happens during firmware loading,
  784. * then the firmware loading might call this function
  785. * and later it might be called again due to the
  786. * restart. So don't process again if the device is
  787. * already dead.
  788. */
  789. if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
  790. iwl_trans_tx_stop(priv);
  791. iwl_trans_rx_stop(priv);
  792. /* Power-down device's busmaster DMA clocks */
  793. iwl_write_prph(priv, APMG_CLK_DIS_REG,
  794. APMG_CLK_VAL_DMA_CLK_RQT);
  795. udelay(5);
  796. }
  797. /* Make sure (redundant) we've released our request to stay awake */
  798. iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  799. /* Stop the device, and put it in low power state */
  800. iwl_apm_stop(priv);
  801. }
  802. static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
  803. int txq_id)
  804. {
  805. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  806. struct iwl_queue *q = &txq->q;
  807. struct iwl_device_cmd *dev_cmd;
  808. if (unlikely(iwl_queue_space(q) < q->high_mark))
  809. return NULL;
  810. /*
  811. * Set up the Tx-command (not MAC!) header.
  812. * Store the chosen Tx queue and TFD index within the sequence field;
  813. * after Tx, uCode's Tx response will return this value so driver can
  814. * locate the frame within the tx queue and do post-tx processing.
  815. */
  816. dev_cmd = txq->cmd[q->write_ptr];
  817. memset(dev_cmd, 0, sizeof(*dev_cmd));
  818. dev_cmd->hdr.cmd = REPLY_TX;
  819. dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  820. INDEX_TO_SEQ(q->write_ptr)));
  821. return &dev_cmd->cmd.tx;
  822. }
  823. static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
  824. struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
  825. struct iwl_rxon_context *ctx)
  826. {
  827. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  828. struct iwl_queue *q = &txq->q;
  829. struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
  830. struct iwl_cmd_meta *out_meta;
  831. dma_addr_t phys_addr = 0;
  832. dma_addr_t txcmd_phys;
  833. dma_addr_t scratch_phys;
  834. u16 len, firstlen, secondlen;
  835. u8 wait_write_ptr = 0;
  836. u8 hdr_len = ieee80211_hdrlen(fc);
  837. /* Set up driver data for this TFD */
  838. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  839. txq->txb[q->write_ptr].skb = skb;
  840. txq->txb[q->write_ptr].ctx = ctx;
  841. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  842. out_meta = &txq->meta[q->write_ptr];
  843. /*
  844. * Use the first empty entry in this queue's command buffer array
  845. * to contain the Tx command and MAC header concatenated together
  846. * (payload data will be in another buffer).
  847. * Size of this varies, due to varying MAC header length.
  848. * If end is not dword aligned, we'll have 2 extra bytes at the end
  849. * of the MAC header (device reads on dword boundaries).
  850. * We'll tell device about this padding later.
  851. */
  852. len = sizeof(struct iwl_tx_cmd) +
  853. sizeof(struct iwl_cmd_header) + hdr_len;
  854. firstlen = (len + 3) & ~3;
  855. /* Tell NIC about any 2-byte padding after MAC header */
  856. if (firstlen != len)
  857. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  858. /* Physical address of this Tx command's header (not MAC header!),
  859. * within command buffer array. */
  860. txcmd_phys = dma_map_single(priv->bus->dev,
  861. &dev_cmd->hdr, firstlen,
  862. DMA_BIDIRECTIONAL);
  863. if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
  864. return -1;
  865. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  866. dma_unmap_len_set(out_meta, len, firstlen);
  867. if (!ieee80211_has_morefrags(fc)) {
  868. txq->need_update = 1;
  869. } else {
  870. wait_write_ptr = 1;
  871. txq->need_update = 0;
  872. }
  873. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  874. * if any (802.11 null frames have no payload). */
  875. secondlen = skb->len - hdr_len;
  876. if (secondlen > 0) {
  877. phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
  878. secondlen, DMA_TO_DEVICE);
  879. if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
  880. dma_unmap_single(priv->bus->dev,
  881. dma_unmap_addr(out_meta, mapping),
  882. dma_unmap_len(out_meta, len),
  883. DMA_BIDIRECTIONAL);
  884. return -1;
  885. }
  886. }
  887. /* Attach buffers to TFD */
  888. iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
  889. if (secondlen > 0)
  890. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
  891. secondlen, 0);
  892. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  893. offsetof(struct iwl_tx_cmd, scratch);
  894. /* take back ownership of DMA buffer to enable update */
  895. dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
  896. DMA_BIDIRECTIONAL);
  897. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  898. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  899. IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
  900. le16_to_cpu(dev_cmd->hdr.sequence));
  901. IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  902. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  903. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  904. /* Set up entry for this TFD in Tx byte-count array */
  905. if (ampdu)
  906. iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
  907. le16_to_cpu(tx_cmd->len));
  908. dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
  909. DMA_BIDIRECTIONAL);
  910. trace_iwlwifi_dev_tx(priv,
  911. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  912. sizeof(struct iwl_tfd),
  913. &dev_cmd->hdr, firstlen,
  914. skb->data + hdr_len, secondlen);
  915. /* Tell device the write index *just past* this latest filled TFD */
  916. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  917. iwl_txq_update_write_ptr(priv, txq);
  918. /*
  919. * At this point the frame is "transmitted" successfully
  920. * and we will get a TX status notification eventually,
  921. * regardless of the value of ret. "ret" only indicates
  922. * whether or not we should update the write pointer.
  923. */
  924. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  925. if (wait_write_ptr) {
  926. txq->need_update = 1;
  927. iwl_txq_update_write_ptr(priv, txq);
  928. } else {
  929. iwl_stop_queue(priv, txq);
  930. }
  931. }
  932. return 0;
  933. }
  934. static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
  935. {
  936. /* Remove all resets to allow NIC to operate */
  937. iwl_write32(priv, CSR_RESET, 0);
  938. }
  939. static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
  940. {
  941. struct iwl_trans_pcie *trans_pcie =
  942. IWL_TRANS_GET_PCIE_TRANS(trans);
  943. struct iwl_priv *priv = priv(trans);
  944. int err;
  945. tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
  946. iwl_irq_tasklet, (unsigned long)priv);
  947. iwl_alloc_isr_ict(priv);
  948. err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
  949. DRV_NAME, priv);
  950. if (err) {
  951. IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
  952. iwl_free_isr_ict(priv);
  953. return err;
  954. }
  955. INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
  956. return 0;
  957. }
  958. static void iwl_trans_pcie_sync_irq(struct iwl_priv *priv)
  959. {
  960. /* wait to make sure we flush pending tasklet*/
  961. synchronize_irq(priv->bus->irq);
  962. tasklet_kill(&priv->irq_tasklet);
  963. }
  964. static void iwl_trans_pcie_free(struct iwl_priv *priv)
  965. {
  966. free_irq(priv->bus->irq, priv);
  967. iwl_free_isr_ict(priv);
  968. kfree(trans(priv));
  969. trans(priv) = NULL;
  970. }
  971. const struct iwl_trans_ops trans_ops_pcie;
  972. static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
  973. {
  974. struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
  975. sizeof(struct iwl_trans_pcie),
  976. GFP_KERNEL);
  977. if (iwl_trans) {
  978. struct iwl_trans_pcie *trans_pcie =
  979. IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
  980. iwl_trans->ops = &trans_ops_pcie;
  981. iwl_trans->shrd = shrd;
  982. trans_pcie->trans = iwl_trans;
  983. }
  984. return iwl_trans;
  985. }
  986. #ifdef CONFIG_IWLWIFI_DEBUGFS
  987. /* create and remove of files */
  988. #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
  989. if (!debugfs_create_file(#name, mode, parent, trans, \
  990. &iwl_dbgfs_##name##_ops)) \
  991. return -ENOMEM; \
  992. } while (0)
  993. /* file operation */
  994. #define DEBUGFS_READ_FUNC(name) \
  995. static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
  996. char __user *user_buf, \
  997. size_t count, loff_t *ppos);
  998. #define DEBUGFS_WRITE_FUNC(name) \
  999. static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
  1000. const char __user *user_buf, \
  1001. size_t count, loff_t *ppos);
  1002. static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
  1003. {
  1004. file->private_data = inode->i_private;
  1005. return 0;
  1006. }
  1007. #define DEBUGFS_READ_FILE_OPS(name) \
  1008. DEBUGFS_READ_FUNC(name); \
  1009. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1010. .read = iwl_dbgfs_##name##_read, \
  1011. .open = iwl_dbgfs_open_file_generic, \
  1012. .llseek = generic_file_llseek, \
  1013. };
  1014. #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
  1015. DEBUGFS_READ_FUNC(name); \
  1016. DEBUGFS_WRITE_FUNC(name); \
  1017. static const struct file_operations iwl_dbgfs_##name##_ops = { \
  1018. .write = iwl_dbgfs_##name##_write, \
  1019. .read = iwl_dbgfs_##name##_read, \
  1020. .open = iwl_dbgfs_open_file_generic, \
  1021. .llseek = generic_file_llseek, \
  1022. };
  1023. static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
  1024. char __user *user_buf,
  1025. size_t count, loff_t *ppos)
  1026. {
  1027. struct iwl_trans *trans = file->private_data;
  1028. struct iwl_priv *priv = priv(trans);
  1029. int pos = 0, ofs = 0;
  1030. int cnt = 0, entry;
  1031. struct iwl_trans_pcie *trans_pcie =
  1032. IWL_TRANS_GET_PCIE_TRANS(trans);
  1033. struct iwl_tx_queue *txq;
  1034. struct iwl_queue *q;
  1035. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1036. char *buf;
  1037. int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
  1038. (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
  1039. const u8 *ptr;
  1040. ssize_t ret;
  1041. if (!priv->txq) {
  1042. IWL_ERR(trans, "txq not ready\n");
  1043. return -EAGAIN;
  1044. }
  1045. buf = kzalloc(bufsz, GFP_KERNEL);
  1046. if (!buf) {
  1047. IWL_ERR(trans, "Can not allocate buffer\n");
  1048. return -ENOMEM;
  1049. }
  1050. pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
  1051. for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
  1052. txq = &priv->txq[cnt];
  1053. q = &txq->q;
  1054. pos += scnprintf(buf + pos, bufsz - pos,
  1055. "q[%d]: read_ptr: %u, write_ptr: %u\n",
  1056. cnt, q->read_ptr, q->write_ptr);
  1057. }
  1058. if (priv->tx_traffic &&
  1059. (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
  1060. ptr = priv->tx_traffic;
  1061. pos += scnprintf(buf + pos, bufsz - pos,
  1062. "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
  1063. for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
  1064. for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
  1065. entry++, ofs += 16) {
  1066. pos += scnprintf(buf + pos, bufsz - pos,
  1067. "0x%.4x ", ofs);
  1068. hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
  1069. buf + pos, bufsz - pos, 0);
  1070. pos += strlen(buf + pos);
  1071. if (bufsz - pos > 0)
  1072. buf[pos++] = '\n';
  1073. }
  1074. }
  1075. }
  1076. pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
  1077. pos += scnprintf(buf + pos, bufsz - pos,
  1078. "read: %u, write: %u\n",
  1079. rxq->read, rxq->write);
  1080. if (priv->rx_traffic &&
  1081. (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
  1082. ptr = priv->rx_traffic;
  1083. pos += scnprintf(buf + pos, bufsz - pos,
  1084. "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
  1085. for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
  1086. for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
  1087. entry++, ofs += 16) {
  1088. pos += scnprintf(buf + pos, bufsz - pos,
  1089. "0x%.4x ", ofs);
  1090. hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
  1091. buf + pos, bufsz - pos, 0);
  1092. pos += strlen(buf + pos);
  1093. if (bufsz - pos > 0)
  1094. buf[pos++] = '\n';
  1095. }
  1096. }
  1097. }
  1098. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1099. kfree(buf);
  1100. return ret;
  1101. }
  1102. static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
  1103. const char __user *user_buf,
  1104. size_t count, loff_t *ppos)
  1105. {
  1106. struct iwl_trans *trans = file->private_data;
  1107. char buf[8];
  1108. int buf_size;
  1109. int traffic_log;
  1110. memset(buf, 0, sizeof(buf));
  1111. buf_size = min(count, sizeof(buf) - 1);
  1112. if (copy_from_user(buf, user_buf, buf_size))
  1113. return -EFAULT;
  1114. if (sscanf(buf, "%d", &traffic_log) != 1)
  1115. return -EFAULT;
  1116. if (traffic_log == 0)
  1117. iwl_reset_traffic_log(priv(trans));
  1118. return count;
  1119. }
  1120. static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
  1121. char __user *user_buf,
  1122. size_t count, loff_t *ppos) {
  1123. struct iwl_trans *trans = file->private_data;
  1124. struct iwl_priv *priv = priv(trans);
  1125. struct iwl_tx_queue *txq;
  1126. struct iwl_queue *q;
  1127. char *buf;
  1128. int pos = 0;
  1129. int cnt;
  1130. int ret;
  1131. const size_t bufsz = sizeof(char) * 64 *
  1132. priv->cfg->base_params->num_of_queues;
  1133. if (!priv->txq) {
  1134. IWL_ERR(priv, "txq not ready\n");
  1135. return -EAGAIN;
  1136. }
  1137. buf = kzalloc(bufsz, GFP_KERNEL);
  1138. if (!buf)
  1139. return -ENOMEM;
  1140. for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
  1141. txq = &priv->txq[cnt];
  1142. q = &txq->q;
  1143. pos += scnprintf(buf + pos, bufsz - pos,
  1144. "hwq %.2d: read=%u write=%u stop=%d"
  1145. " swq_id=%#.2x (ac %d/hwq %d)\n",
  1146. cnt, q->read_ptr, q->write_ptr,
  1147. !!test_bit(cnt, priv->queue_stopped),
  1148. txq->swq_id, txq->swq_id & 3,
  1149. (txq->swq_id >> 2) & 0x1f);
  1150. if (cnt >= 4)
  1151. continue;
  1152. /* for the ACs, display the stop count too */
  1153. pos += scnprintf(buf + pos, bufsz - pos,
  1154. " stop-count: %d\n",
  1155. atomic_read(&priv->queue_stop_count[cnt]));
  1156. }
  1157. ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1158. kfree(buf);
  1159. return ret;
  1160. }
  1161. static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
  1162. char __user *user_buf,
  1163. size_t count, loff_t *ppos) {
  1164. struct iwl_trans *trans = file->private_data;
  1165. struct iwl_trans_pcie *trans_pcie =
  1166. IWL_TRANS_GET_PCIE_TRANS(trans);
  1167. struct iwl_rx_queue *rxq = &trans_pcie->rxq;
  1168. char buf[256];
  1169. int pos = 0;
  1170. const size_t bufsz = sizeof(buf);
  1171. pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
  1172. rxq->read);
  1173. pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
  1174. rxq->write);
  1175. pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
  1176. rxq->free_count);
  1177. if (rxq->rb_stts) {
  1178. pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
  1179. le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
  1180. } else {
  1181. pos += scnprintf(buf + pos, bufsz - pos,
  1182. "closed_rb_num: Not Allocated\n");
  1183. }
  1184. return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
  1185. }
  1186. DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
  1187. DEBUGFS_READ_FILE_OPS(rx_queue);
  1188. DEBUGFS_READ_FILE_OPS(tx_queue);
  1189. /*
  1190. * Create the debugfs files and directories
  1191. *
  1192. */
  1193. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1194. struct dentry *dir)
  1195. {
  1196. DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
  1197. DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
  1198. DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
  1199. return 0;
  1200. }
  1201. #else
  1202. static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
  1203. struct dentry *dir)
  1204. { return 0; }
  1205. #endif /*CONFIG_IWLWIFI_DEBUGFS */
  1206. const struct iwl_trans_ops trans_ops_pcie = {
  1207. .alloc = iwl_trans_pcie_alloc,
  1208. .request_irq = iwl_trans_pcie_request_irq,
  1209. .start_device = iwl_trans_pcie_start_device,
  1210. .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
  1211. .stop_device = iwl_trans_pcie_stop_device,
  1212. .tx_start = iwl_trans_pcie_tx_start,
  1213. .rx_free = iwl_trans_pcie_rx_free,
  1214. .tx_free = iwl_trans_pcie_tx_free,
  1215. .send_cmd = iwl_trans_pcie_send_cmd,
  1216. .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
  1217. .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
  1218. .tx = iwl_trans_pcie_tx,
  1219. .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
  1220. .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
  1221. .kick_nic = iwl_trans_pcie_kick_nic,
  1222. .sync_irq = iwl_trans_pcie_sync_irq,
  1223. .free = iwl_trans_pcie_free,
  1224. .dbgfs_register = iwl_trans_pcie_dbgfs_register,
  1225. };