iwl-tx.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * James P. Ketrenos <ipw2100-admin@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <net/mac80211.h>
  31. #include "iwl-eeprom.h"
  32. #include "iwl-dev.h"
  33. #include "iwl-core.h"
  34. #include "iwl-sta.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. static const u16 default_tid_to_tx_fifo[] = {
  38. IWL_TX_FIFO_AC1,
  39. IWL_TX_FIFO_AC0,
  40. IWL_TX_FIFO_AC0,
  41. IWL_TX_FIFO_AC1,
  42. IWL_TX_FIFO_AC2,
  43. IWL_TX_FIFO_AC2,
  44. IWL_TX_FIFO_AC3,
  45. IWL_TX_FIFO_AC3,
  46. IWL_TX_FIFO_NONE,
  47. IWL_TX_FIFO_NONE,
  48. IWL_TX_FIFO_NONE,
  49. IWL_TX_FIFO_NONE,
  50. IWL_TX_FIFO_NONE,
  51. IWL_TX_FIFO_NONE,
  52. IWL_TX_FIFO_NONE,
  53. IWL_TX_FIFO_NONE,
  54. IWL_TX_FIFO_AC3
  55. };
  56. /**
  57. * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  58. *
  59. * Does NOT advance any TFD circular buffer read/write indexes
  60. * Does NOT free the TFD itself (which is within circular buffer)
  61. */
  62. int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  63. {
  64. struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
  65. struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
  66. struct pci_dev *dev = priv->pci_dev;
  67. int i;
  68. int counter = 0;
  69. int index, is_odd;
  70. /* Host command buffers stay mapped in memory, nothing to clean */
  71. if (txq->q.id == IWL_CMD_QUEUE_NUM)
  72. return 0;
  73. /* Sanity check on number of chunks */
  74. counter = IWL_GET_BITS(*bd, num_tbs);
  75. if (counter > MAX_NUM_OF_TBS) {
  76. IWL_ERROR("Too many chunks: %i\n", counter);
  77. /* @todo issue fatal error, it is quite serious situation */
  78. return 0;
  79. }
  80. /* Unmap chunks, if any.
  81. * TFD info for odd chunks is different format than for even chunks. */
  82. for (i = 0; i < counter; i++) {
  83. index = i / 2;
  84. is_odd = i & 0x1;
  85. if (is_odd)
  86. pci_unmap_single(
  87. dev,
  88. IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
  89. (IWL_GET_BITS(bd->pa[index],
  90. tb2_addr_hi20) << 16),
  91. IWL_GET_BITS(bd->pa[index], tb2_len),
  92. PCI_DMA_TODEVICE);
  93. else if (i > 0)
  94. pci_unmap_single(dev,
  95. le32_to_cpu(bd->pa[index].tb1_addr),
  96. IWL_GET_BITS(bd->pa[index], tb1_len),
  97. PCI_DMA_TODEVICE);
  98. /* Free SKB, if any, for this chunk */
  99. if (txq->txb[txq->q.read_ptr].skb[i]) {
  100. struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
  101. dev_kfree_skb(skb);
  102. txq->txb[txq->q.read_ptr].skb[i] = NULL;
  103. }
  104. }
  105. return 0;
  106. }
  107. EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
  108. int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
  109. dma_addr_t addr, u16 len)
  110. {
  111. int index, is_odd;
  112. struct iwl_tfd_frame *tfd = ptr;
  113. u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
  114. /* Each TFD can point to a maximum 20 Tx buffers */
  115. if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
  116. IWL_ERROR("Error can not send more than %d chunks\n",
  117. MAX_NUM_OF_TBS);
  118. return -EINVAL;
  119. }
  120. index = num_tbs / 2;
  121. is_odd = num_tbs & 0x1;
  122. if (!is_odd) {
  123. tfd->pa[index].tb1_addr = cpu_to_le32(addr);
  124. IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
  125. iwl_get_dma_hi_address(addr));
  126. IWL_SET_BITS(tfd->pa[index], tb1_len, len);
  127. } else {
  128. IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
  129. (u32) (addr & 0xffff));
  130. IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
  131. IWL_SET_BITS(tfd->pa[index], tb2_len, len);
  132. }
  133. IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
  134. return 0;
  135. }
  136. EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
  137. /**
  138. * iwl_txq_update_write_ptr - Send new write index to hardware
  139. */
  140. int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  141. {
  142. u32 reg = 0;
  143. int ret = 0;
  144. int txq_id = txq->q.id;
  145. if (txq->need_update == 0)
  146. return ret;
  147. /* if we're trying to save power */
  148. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  149. /* wake up nic if it's powered down ...
  150. * uCode will wake up, and interrupt us again, so next
  151. * time we'll skip this part. */
  152. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  153. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  154. IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
  155. iwl_set_bit(priv, CSR_GP_CNTRL,
  156. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  157. return ret;
  158. }
  159. /* restore this queue's parameters in nic hardware. */
  160. ret = iwl_grab_nic_access(priv);
  161. if (ret)
  162. return ret;
  163. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  164. txq->q.write_ptr | (txq_id << 8));
  165. iwl_release_nic_access(priv);
  166. /* else not in power-save mode, uCode will never sleep when we're
  167. * trying to tx (during RFKILL, we're not trying to tx). */
  168. } else
  169. iwl_write32(priv, HBUS_TARG_WRPTR,
  170. txq->q.write_ptr | (txq_id << 8));
  171. txq->need_update = 0;
  172. return ret;
  173. }
  174. EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  175. /**
  176. * iwl_tx_queue_free - Deallocate DMA queue.
  177. * @txq: Transmit queue to deallocate.
  178. *
  179. * Empty queue by removing and destroying all BD's.
  180. * Free all buffers.
  181. * 0-fill, but do not free "txq" descriptor structure.
  182. */
  183. static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  184. {
  185. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  186. struct iwl_queue *q = &txq->q;
  187. struct pci_dev *dev = priv->pci_dev;
  188. int i, slots_num, len;
  189. if (q->n_bd == 0)
  190. return;
  191. /* first, empty all BD's */
  192. for (; q->write_ptr != q->read_ptr;
  193. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
  194. iwl_hw_txq_free_tfd(priv, txq);
  195. len = sizeof(struct iwl_cmd) * q->n_window;
  196. if (q->id == IWL_CMD_QUEUE_NUM)
  197. len += IWL_MAX_SCAN_SIZE;
  198. /* De-alloc array of command/tx buffers */
  199. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  200. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  201. for (i = 0; i < slots_num; i++)
  202. kfree(txq->cmd[i]);
  203. if (txq_id == IWL_CMD_QUEUE_NUM)
  204. kfree(txq->cmd[slots_num]);
  205. /* De-alloc circular buffer of TFDs */
  206. if (txq->q.n_bd)
  207. pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
  208. txq->q.n_bd, txq->bd, txq->q.dma_addr);
  209. /* De-alloc array of per-TFD driver data */
  210. kfree(txq->txb);
  211. txq->txb = NULL;
  212. /* 0-fill queue descriptor structure */
  213. memset(txq, 0, sizeof(*txq));
  214. }
  215. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  216. * DMA services
  217. *
  218. * Theory of operation
  219. *
  220. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  221. * of buffer descriptors, each of which points to one or more data buffers for
  222. * the device to read from or fill. Driver and device exchange status of each
  223. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  224. * entries in each circular buffer, to protect against confusing empty and full
  225. * queue states.
  226. *
  227. * The device reads or writes the data in the queues via the device's several
  228. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  229. *
  230. * For Tx queue, there are low mark and high mark limits. If, after queuing
  231. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  232. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  233. * Tx queue resumed.
  234. *
  235. * See more detailed info in iwl-4965-hw.h.
  236. ***************************************************/
  237. int iwl_queue_space(const struct iwl_queue *q)
  238. {
  239. int s = q->read_ptr - q->write_ptr;
  240. if (q->read_ptr > q->write_ptr)
  241. s -= q->n_bd;
  242. if (s <= 0)
  243. s += q->n_window;
  244. /* keep some reserve to not confuse empty and full situations */
  245. s -= 2;
  246. if (s < 0)
  247. s = 0;
  248. return s;
  249. }
  250. EXPORT_SYMBOL(iwl_queue_space);
  251. /**
  252. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  253. */
  254. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  255. int count, int slots_num, u32 id)
  256. {
  257. q->n_bd = count;
  258. q->n_window = slots_num;
  259. q->id = id;
  260. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  261. * and iwl_queue_dec_wrap are broken. */
  262. BUG_ON(!is_power_of_2(count));
  263. /* slots_num must be power-of-two size, otherwise
  264. * get_cmd_index is broken. */
  265. BUG_ON(!is_power_of_2(slots_num));
  266. q->low_mark = q->n_window / 4;
  267. if (q->low_mark < 4)
  268. q->low_mark = 4;
  269. q->high_mark = q->n_window / 8;
  270. if (q->high_mark < 2)
  271. q->high_mark = 2;
  272. q->write_ptr = q->read_ptr = 0;
  273. return 0;
  274. }
  275. /**
  276. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  277. */
  278. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  279. struct iwl_tx_queue *txq, u32 id)
  280. {
  281. struct pci_dev *dev = priv->pci_dev;
  282. /* Driver private data, only for Tx (not command) queues,
  283. * not shared with device. */
  284. if (id != IWL_CMD_QUEUE_NUM) {
  285. txq->txb = kmalloc(sizeof(txq->txb[0]) *
  286. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  287. if (!txq->txb) {
  288. IWL_ERROR("kmalloc for auxiliary BD "
  289. "structures failed\n");
  290. goto error;
  291. }
  292. } else
  293. txq->txb = NULL;
  294. /* Circular buffer of transmit frame descriptors (TFDs),
  295. * shared with device */
  296. txq->bd = pci_alloc_consistent(dev,
  297. sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
  298. &txq->q.dma_addr);
  299. if (!txq->bd) {
  300. IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
  301. sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
  302. goto error;
  303. }
  304. txq->q.id = id;
  305. return 0;
  306. error:
  307. kfree(txq->txb);
  308. txq->txb = NULL;
  309. return -ENOMEM;
  310. }
  311. /*
  312. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  313. * given Tx queue, and enable the DMA channel used for that queue.
  314. *
  315. * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
  316. * channels supported in hardware.
  317. */
  318. static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
  319. struct iwl_tx_queue *txq)
  320. {
  321. int rc;
  322. unsigned long flags;
  323. int txq_id = txq->q.id;
  324. spin_lock_irqsave(&priv->lock, flags);
  325. rc = iwl_grab_nic_access(priv);
  326. if (rc) {
  327. spin_unlock_irqrestore(&priv->lock, flags);
  328. return rc;
  329. }
  330. /* Circular buffer (TFD queue in DRAM) physical base address */
  331. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  332. txq->q.dma_addr >> 8);
  333. /* Enable DMA channel, using same id as for TFD queue */
  334. iwl_write_direct32(
  335. priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
  336. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  337. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
  338. iwl_release_nic_access(priv);
  339. spin_unlock_irqrestore(&priv->lock, flags);
  340. return 0;
  341. }
  342. /**
  343. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  344. */
  345. static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  346. int slots_num, u32 txq_id)
  347. {
  348. int i, len;
  349. int ret;
  350. /*
  351. * Alloc buffer array for commands (Tx or other types of commands).
  352. * For the command queue (#4), allocate command space + one big
  353. * command for scan, since scan command is very huge; the system will
  354. * not have two scans at the same time, so only one is needed.
  355. * For normal Tx queues (all other queues), no super-size command
  356. * space is needed.
  357. */
  358. len = sizeof(struct iwl_cmd);
  359. for (i = 0; i <= slots_num; i++) {
  360. if (i == slots_num) {
  361. if (txq_id == IWL_CMD_QUEUE_NUM)
  362. len += IWL_MAX_SCAN_SIZE;
  363. else
  364. continue;
  365. }
  366. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  367. if (!txq->cmd[i])
  368. goto err;
  369. }
  370. /* Alloc driver data array and TFD circular buffer */
  371. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  372. if (ret)
  373. goto err;
  374. txq->need_update = 0;
  375. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  376. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  377. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  378. /* Initialize queue's high/low-water marks, and head/tail indexes */
  379. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  380. /* Tell device where to find queue */
  381. iwl_hw_tx_queue_init(priv, txq);
  382. return 0;
  383. err:
  384. for (i = 0; i < slots_num; i++) {
  385. kfree(txq->cmd[i]);
  386. txq->cmd[i] = NULL;
  387. }
  388. if (txq_id == IWL_CMD_QUEUE_NUM) {
  389. kfree(txq->cmd[slots_num]);
  390. txq->cmd[slots_num] = NULL;
  391. }
  392. return -ENOMEM;
  393. }
  394. /**
  395. * iwl_hw_txq_ctx_free - Free TXQ Context
  396. *
  397. * Destroy all TX DMA queues and structures
  398. */
  399. void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
  400. {
  401. int txq_id;
  402. /* Tx queues */
  403. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  404. iwl_tx_queue_free(priv, txq_id);
  405. /* Keep-warm buffer */
  406. iwl_kw_free(priv);
  407. }
  408. EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
  409. /**
  410. * iwl_txq_ctx_reset - Reset TX queue context
  411. * Destroys all DMA structures and initialise them again
  412. *
  413. * @param priv
  414. * @return error code
  415. */
  416. int iwl_txq_ctx_reset(struct iwl_priv *priv)
  417. {
  418. int ret = 0;
  419. int txq_id, slots_num;
  420. unsigned long flags;
  421. iwl_kw_free(priv);
  422. /* Free all tx/cmd queues and keep-warm buffer */
  423. iwl_hw_txq_ctx_free(priv);
  424. /* Alloc keep-warm buffer */
  425. ret = iwl_kw_alloc(priv);
  426. if (ret) {
  427. IWL_ERROR("Keep Warm allocation failed\n");
  428. goto error_kw;
  429. }
  430. spin_lock_irqsave(&priv->lock, flags);
  431. ret = iwl_grab_nic_access(priv);
  432. if (unlikely(ret)) {
  433. spin_unlock_irqrestore(&priv->lock, flags);
  434. goto error_reset;
  435. }
  436. /* Turn off all Tx DMA fifos */
  437. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  438. iwl_release_nic_access(priv);
  439. spin_unlock_irqrestore(&priv->lock, flags);
  440. /* Tell nic where to find the keep-warm buffer */
  441. ret = iwl_kw_init(priv);
  442. if (ret) {
  443. IWL_ERROR("kw_init failed\n");
  444. goto error_reset;
  445. }
  446. /* Alloc and init all Tx queues, including the command queue (#4) */
  447. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  448. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  449. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  450. ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  451. txq_id);
  452. if (ret) {
  453. IWL_ERROR("Tx %d queue init failed\n", txq_id);
  454. goto error;
  455. }
  456. }
  457. return ret;
  458. error:
  459. iwl_hw_txq_ctx_free(priv);
  460. error_reset:
  461. iwl_kw_free(priv);
  462. error_kw:
  463. return ret;
  464. }
  465. /**
  466. * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  467. */
  468. void iwl_txq_ctx_stop(struct iwl_priv *priv)
  469. {
  470. int txq_id;
  471. unsigned long flags;
  472. /* Turn off all Tx DMA fifos */
  473. spin_lock_irqsave(&priv->lock, flags);
  474. if (iwl_grab_nic_access(priv)) {
  475. spin_unlock_irqrestore(&priv->lock, flags);
  476. return;
  477. }
  478. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  479. /* Stop each Tx DMA channel, and wait for it to be idle */
  480. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  481. iwl_write_direct32(priv,
  482. FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
  483. iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  484. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
  485. (txq_id), 200);
  486. }
  487. iwl_release_nic_access(priv);
  488. spin_unlock_irqrestore(&priv->lock, flags);
  489. /* Deallocate memory for all Tx queues */
  490. iwl_hw_txq_ctx_free(priv);
  491. }
  492. EXPORT_SYMBOL(iwl_txq_ctx_stop);
  493. /*
  494. * handle build REPLY_TX command notification.
  495. */
  496. static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
  497. struct iwl_tx_cmd *tx_cmd,
  498. struct ieee80211_tx_info *info,
  499. struct ieee80211_hdr *hdr,
  500. int is_unicast, u8 std_id)
  501. {
  502. __le16 fc = hdr->frame_control;
  503. __le32 tx_flags = tx_cmd->tx_flags;
  504. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  505. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
  506. tx_flags |= TX_CMD_FLG_ACK_MSK;
  507. if (ieee80211_is_mgmt(fc))
  508. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  509. if (ieee80211_is_probe_resp(fc) &&
  510. !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
  511. tx_flags |= TX_CMD_FLG_TSF_MSK;
  512. } else {
  513. tx_flags &= (~TX_CMD_FLG_ACK_MSK);
  514. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  515. }
  516. if (ieee80211_is_back_req(fc))
  517. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  518. tx_cmd->sta_id = std_id;
  519. if (ieee80211_has_morefrags(fc))
  520. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  521. if (ieee80211_is_data_qos(fc)) {
  522. u8 *qc = ieee80211_get_qos_ctl(hdr);
  523. tx_cmd->tid_tspec = qc[0] & 0xf;
  524. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  525. } else {
  526. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  527. }
  528. priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
  529. if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
  530. tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
  531. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  532. if (ieee80211_is_mgmt(fc)) {
  533. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  534. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  535. else
  536. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  537. } else {
  538. tx_cmd->timeout.pm_frame_timeout = 0;
  539. }
  540. tx_cmd->driver_txop = 0;
  541. tx_cmd->tx_flags = tx_flags;
  542. tx_cmd->next_frame_len = 0;
  543. }
  544. #define RTS_HCCA_RETRY_LIMIT 3
  545. #define RTS_DFAULT_RETRY_LIMIT 60
  546. static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
  547. struct iwl_tx_cmd *tx_cmd,
  548. struct ieee80211_tx_info *info,
  549. __le16 fc, int sta_id,
  550. int is_hcca)
  551. {
  552. u8 rts_retry_limit = 0;
  553. u8 data_retry_limit = 0;
  554. u8 rate_plcp;
  555. u16 rate_flags = 0;
  556. int rate_idx;
  557. rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
  558. IWL_RATE_COUNT - 1);
  559. rate_plcp = iwl_rates[rate_idx].plcp;
  560. rts_retry_limit = (is_hcca) ?
  561. RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
  562. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  563. rate_flags |= RATE_MCS_CCK_MSK;
  564. if (ieee80211_is_probe_resp(fc)) {
  565. data_retry_limit = 3;
  566. if (data_retry_limit < rts_retry_limit)
  567. rts_retry_limit = data_retry_limit;
  568. } else
  569. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  570. if (priv->data_retry_limit != -1)
  571. data_retry_limit = priv->data_retry_limit;
  572. if (ieee80211_is_data(fc)) {
  573. tx_cmd->initial_rate_index = 0;
  574. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  575. } else {
  576. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  577. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  578. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  579. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  580. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  581. if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
  582. tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  583. tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
  584. }
  585. break;
  586. default:
  587. break;
  588. }
  589. /* Alternate between antenna A and B for successive frames */
  590. if (priv->use_ant_b_for_management_frame) {
  591. priv->use_ant_b_for_management_frame = 0;
  592. rate_flags |= RATE_MCS_ANT_B_MSK;
  593. } else {
  594. priv->use_ant_b_for_management_frame = 1;
  595. rate_flags |= RATE_MCS_ANT_A_MSK;
  596. }
  597. }
  598. tx_cmd->rts_retry_limit = rts_retry_limit;
  599. tx_cmd->data_retry_limit = data_retry_limit;
  600. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  601. }
  602. static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  603. struct ieee80211_tx_info *info,
  604. struct iwl_tx_cmd *tx_cmd,
  605. struct sk_buff *skb_frag,
  606. int sta_id)
  607. {
  608. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  609. switch (keyconf->alg) {
  610. case ALG_CCMP:
  611. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  612. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  613. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  614. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  615. IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
  616. break;
  617. case ALG_TKIP:
  618. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  619. ieee80211_get_tkip_key(keyconf, skb_frag,
  620. IEEE80211_TKIP_P2_KEY, tx_cmd->key);
  621. IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
  622. break;
  623. case ALG_WEP:
  624. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  625. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  626. if (keyconf->keylen == WEP_KEY_LEN_128)
  627. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  628. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  629. IWL_DEBUG_TX("Configuring packet for WEP encryption "
  630. "with key %d\n", keyconf->keyidx);
  631. break;
  632. default:
  633. printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
  634. break;
  635. }
  636. }
  637. static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
  638. {
  639. /* 0 - mgmt, 1 - cnt, 2 - data */
  640. int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
  641. priv->tx_stats[idx].cnt++;
  642. priv->tx_stats[idx].bytes += len;
  643. }
  644. /*
  645. * start REPLY_TX command process
  646. */
  647. int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
  648. {
  649. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  650. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  651. struct iwl_tfd_frame *tfd;
  652. struct iwl_tx_queue *txq;
  653. struct iwl_queue *q;
  654. struct iwl_cmd *out_cmd;
  655. struct iwl_tx_cmd *tx_cmd;
  656. int swq_id, txq_id;
  657. dma_addr_t phys_addr;
  658. dma_addr_t txcmd_phys;
  659. dma_addr_t scratch_phys;
  660. u16 len, idx, len_org;
  661. u16 seq_number = 0;
  662. __le16 fc;
  663. u8 hdr_len, unicast;
  664. u8 sta_id;
  665. u8 wait_write_ptr = 0;
  666. u8 tid = 0;
  667. u8 *qc = NULL;
  668. unsigned long flags;
  669. int ret;
  670. spin_lock_irqsave(&priv->lock, flags);
  671. if (iwl_is_rfkill(priv)) {
  672. IWL_DEBUG_DROP("Dropping - RF KILL\n");
  673. goto drop_unlock;
  674. }
  675. if (!priv->vif) {
  676. IWL_DEBUG_DROP("Dropping - !priv->vif\n");
  677. goto drop_unlock;
  678. }
  679. if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
  680. IWL_INVALID_RATE) {
  681. IWL_ERROR("ERROR: No TX rate available.\n");
  682. goto drop_unlock;
  683. }
  684. unicast = !is_multicast_ether_addr(hdr->addr1);
  685. fc = hdr->frame_control;
  686. #ifdef CONFIG_IWLWIFI_DEBUG
  687. if (ieee80211_is_auth(fc))
  688. IWL_DEBUG_TX("Sending AUTH frame\n");
  689. else if (ieee80211_is_assoc_req(fc))
  690. IWL_DEBUG_TX("Sending ASSOC frame\n");
  691. else if (ieee80211_is_reassoc_req(fc))
  692. IWL_DEBUG_TX("Sending REASSOC frame\n");
  693. #endif
  694. /* drop all data frame if we are not associated */
  695. if (ieee80211_is_data(fc) &&
  696. (!iwl_is_associated(priv) ||
  697. ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
  698. !priv->assoc_station_added)) {
  699. IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
  700. goto drop_unlock;
  701. }
  702. spin_unlock_irqrestore(&priv->lock, flags);
  703. hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc));
  704. /* Find (or create) index into station table for destination station */
  705. sta_id = iwl_get_sta_id(priv, hdr);
  706. if (sta_id == IWL_INVALID_STATION) {
  707. DECLARE_MAC_BUF(mac);
  708. IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
  709. print_mac(mac, hdr->addr1));
  710. goto drop;
  711. }
  712. IWL_DEBUG_TX("station Id %d\n", sta_id);
  713. swq_id = skb_get_queue_mapping(skb);
  714. txq_id = swq_id;
  715. if (ieee80211_is_data_qos(fc)) {
  716. qc = ieee80211_get_qos_ctl(hdr);
  717. tid = qc[0] & 0xf;
  718. seq_number = priv->stations[sta_id].tid[tid].seq_number;
  719. seq_number &= IEEE80211_SCTL_SEQ;
  720. hdr->seq_ctrl = hdr->seq_ctrl &
  721. __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
  722. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  723. seq_number += 0x10;
  724. /* aggregation is on for this <sta,tid> */
  725. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  726. txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
  727. priv->stations[sta_id].tid[tid].tfds_in_queue++;
  728. }
  729. /* Descriptor for chosen Tx queue */
  730. txq = &priv->txq[txq_id];
  731. q = &txq->q;
  732. spin_lock_irqsave(&priv->lock, flags);
  733. /* Set up first empty TFD within this queue's circular TFD buffer */
  734. tfd = &txq->bd[q->write_ptr];
  735. memset(tfd, 0, sizeof(*tfd));
  736. idx = get_cmd_index(q, q->write_ptr, 0);
  737. /* Set up driver data for this TFD */
  738. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  739. txq->txb[q->write_ptr].skb[0] = skb;
  740. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  741. out_cmd = txq->cmd[idx];
  742. tx_cmd = &out_cmd->cmd.tx;
  743. memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
  744. memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
  745. /*
  746. * Set up the Tx-command (not MAC!) header.
  747. * Store the chosen Tx queue and TFD index within the sequence field;
  748. * after Tx, uCode's Tx response will return this value so driver can
  749. * locate the frame within the tx queue and do post-tx processing.
  750. */
  751. out_cmd->hdr.cmd = REPLY_TX;
  752. out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  753. INDEX_TO_SEQ(q->write_ptr)));
  754. /* Copy MAC header from skb into command buffer */
  755. memcpy(tx_cmd->hdr, hdr, hdr_len);
  756. /*
  757. * Use the first empty entry in this queue's command buffer array
  758. * to contain the Tx command and MAC header concatenated together
  759. * (payload data will be in another buffer).
  760. * Size of this varies, due to varying MAC header length.
  761. * If end is not dword aligned, we'll have 2 extra bytes at the end
  762. * of the MAC header (device reads on dword boundaries).
  763. * We'll tell device about this padding later.
  764. */
  765. len = sizeof(struct iwl_tx_cmd) +
  766. sizeof(struct iwl_cmd_header) + hdr_len;
  767. len_org = len;
  768. len = (len + 3) & ~3;
  769. if (len_org != len)
  770. len_org = 1;
  771. else
  772. len_org = 0;
  773. /* Physical address of this Tx command's header (not MAC header!),
  774. * within command buffer array. */
  775. txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
  776. sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
  777. txcmd_phys += offsetof(struct iwl_cmd, hdr);
  778. /* Add buffer containing Tx command and MAC(!) header to TFD's
  779. * first entry */
  780. iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
  781. if (info->control.hw_key)
  782. iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
  783. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  784. * if any (802.11 null frames have no payload). */
  785. len = skb->len - hdr_len;
  786. if (len) {
  787. phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
  788. len, PCI_DMA_TODEVICE);
  789. iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
  790. }
  791. /* Tell NIC about any 2-byte padding after MAC header */
  792. if (len_org)
  793. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  794. /* Total # bytes to be transmitted */
  795. len = (u16)skb->len;
  796. tx_cmd->len = cpu_to_le16(len);
  797. /* TODO need this for burst mode later on */
  798. iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
  799. /* set is_hcca to 0; it probably will never be implemented */
  800. iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
  801. iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
  802. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  803. offsetof(struct iwl_tx_cmd, scratch);
  804. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  805. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
  806. if (!ieee80211_has_morefrags(hdr->frame_control)) {
  807. txq->need_update = 1;
  808. if (qc)
  809. priv->stations[sta_id].tid[tid].seq_number = seq_number;
  810. } else {
  811. wait_write_ptr = 1;
  812. txq->need_update = 0;
  813. }
  814. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  815. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  816. /* Set up entry for this TFD in Tx byte-count array */
  817. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
  818. /* Tell device the write index *just past* this latest filled TFD */
  819. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  820. ret = iwl_txq_update_write_ptr(priv, txq);
  821. spin_unlock_irqrestore(&priv->lock, flags);
  822. if (ret)
  823. return ret;
  824. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  825. if (wait_write_ptr) {
  826. spin_lock_irqsave(&priv->lock, flags);
  827. txq->need_update = 1;
  828. iwl_txq_update_write_ptr(priv, txq);
  829. spin_unlock_irqrestore(&priv->lock, flags);
  830. } else {
  831. ieee80211_stop_queue(priv->hw, swq_id);
  832. }
  833. }
  834. return 0;
  835. drop_unlock:
  836. spin_unlock_irqrestore(&priv->lock, flags);
  837. drop:
  838. return -1;
  839. }
  840. EXPORT_SYMBOL(iwl_tx_skb);
  841. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  842. /**
  843. * iwl_enqueue_hcmd - enqueue a uCode command
  844. * @priv: device private data point
  845. * @cmd: a point to the ucode command structure
  846. *
  847. * The function returns < 0 values to indicate the operation is
  848. * failed. On success, it turns the index (> 0) of command in the
  849. * command queue.
  850. */
  851. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  852. {
  853. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  854. struct iwl_queue *q = &txq->q;
  855. struct iwl_tfd_frame *tfd;
  856. struct iwl_cmd *out_cmd;
  857. dma_addr_t phys_addr;
  858. unsigned long flags;
  859. int len, ret;
  860. u32 idx;
  861. u16 fix_size;
  862. cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
  863. fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
  864. /* If any of the command structures end up being larger than
  865. * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
  866. * we will need to increase the size of the TFD entries */
  867. BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
  868. !(cmd->meta.flags & CMD_SIZE_HUGE));
  869. if (iwl_is_rfkill(priv)) {
  870. IWL_DEBUG_INFO("Not sending command - RF KILL");
  871. return -EIO;
  872. }
  873. if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
  874. IWL_ERROR("No space for Tx\n");
  875. return -ENOSPC;
  876. }
  877. spin_lock_irqsave(&priv->hcmd_lock, flags);
  878. tfd = &txq->bd[q->write_ptr];
  879. memset(tfd, 0, sizeof(*tfd));
  880. idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
  881. out_cmd = txq->cmd[idx];
  882. out_cmd->hdr.cmd = cmd->id;
  883. memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
  884. memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
  885. /* At this point, the out_cmd now has all of the incoming cmd
  886. * information */
  887. out_cmd->hdr.flags = 0;
  888. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
  889. INDEX_TO_SEQ(q->write_ptr));
  890. if (out_cmd->meta.flags & CMD_SIZE_HUGE)
  891. out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
  892. len = (idx == TFD_CMD_SLOTS) ?
  893. IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
  894. phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
  895. PCI_DMA_TODEVICE);
  896. phys_addr += offsetof(struct iwl_cmd, hdr);
  897. iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
  898. IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
  899. "%d bytes at %d[%d]:%d\n",
  900. get_cmd_string(out_cmd->hdr.cmd),
  901. out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
  902. fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  903. txq->need_update = 1;
  904. /* Set up entry in queue's byte count circular buffer */
  905. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
  906. /* Increment and update queue's write index */
  907. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  908. ret = iwl_txq_update_write_ptr(priv, txq);
  909. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  910. return ret ? ret : idx;
  911. }
  912. int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  913. {
  914. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  915. struct iwl_queue *q = &txq->q;
  916. struct iwl_tx_info *tx_info;
  917. int nfreed = 0;
  918. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  919. IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
  920. "is out of range [0-%d] %d %d.\n", txq_id,
  921. index, q->n_bd, q->write_ptr, q->read_ptr);
  922. return 0;
  923. }
  924. for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
  925. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  926. tx_info = &txq->txb[txq->q.read_ptr];
  927. ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
  928. tx_info->skb[0] = NULL;
  929. if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
  930. priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
  931. iwl_hw_txq_free_tfd(priv, txq);
  932. nfreed++;
  933. }
  934. return nfreed;
  935. }
  936. EXPORT_SYMBOL(iwl_tx_queue_reclaim);
  937. /**
  938. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  939. *
  940. * When FW advances 'R' index, all entries between old and new 'R' index
  941. * need to be reclaimed. As result, some free space forms. If there is
  942. * enough free space (> low mark), wake the stack that feeds us.
  943. */
  944. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  945. {
  946. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  947. struct iwl_queue *q = &txq->q;
  948. struct iwl_tfd_frame *bd = &txq->bd[index];
  949. dma_addr_t dma_addr;
  950. int is_odd, buf_len;
  951. int nfreed = 0;
  952. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  953. IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
  954. "is out of range [0-%d] %d %d.\n", txq_id,
  955. index, q->n_bd, q->write_ptr, q->read_ptr);
  956. return;
  957. }
  958. for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
  959. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  960. if (nfreed > 1) {
  961. IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
  962. q->write_ptr, q->read_ptr);
  963. queue_work(priv->workqueue, &priv->restart);
  964. }
  965. is_odd = (index/2) & 0x1;
  966. if (is_odd) {
  967. dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
  968. (IWL_GET_BITS(bd->pa[index],
  969. tb2_addr_hi20) << 16);
  970. buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
  971. } else {
  972. dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
  973. buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
  974. }
  975. pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
  976. PCI_DMA_TODEVICE);
  977. nfreed++;
  978. }
  979. }
  980. /**
  981. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  982. * @rxb: Rx buffer to reclaim
  983. *
  984. * If an Rx buffer has an async callback associated with it the callback
  985. * will be executed. The attached skb (if present) will only be freed
  986. * if the callback returns 1
  987. */
  988. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  989. {
  990. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  991. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  992. int txq_id = SEQ_TO_QUEUE(sequence);
  993. int index = SEQ_TO_INDEX(sequence);
  994. int huge = sequence & SEQ_HUGE_FRAME;
  995. int cmd_index;
  996. struct iwl_cmd *cmd;
  997. /* If a Tx command is being handled and it isn't in the actual
  998. * command queue then there a command routing bug has been introduced
  999. * in the queue management code. */
  1000. if (txq_id != IWL_CMD_QUEUE_NUM)
  1001. IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
  1002. txq_id, pkt->hdr.cmd);
  1003. BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
  1004. cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
  1005. cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
  1006. /* Input error checking is done when commands are added to queue. */
  1007. if (cmd->meta.flags & CMD_WANT_SKB) {
  1008. cmd->meta.source->u.skb = rxb->skb;
  1009. rxb->skb = NULL;
  1010. } else if (cmd->meta.u.callback &&
  1011. !cmd->meta.u.callback(priv, cmd, rxb->skb))
  1012. rxb->skb = NULL;
  1013. iwl_hcmd_queue_reclaim(priv, txq_id, index);
  1014. if (!(cmd->meta.flags & CMD_ASYNC)) {
  1015. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  1016. wake_up_interruptible(&priv->wait_command_queue);
  1017. }
  1018. }
  1019. EXPORT_SYMBOL(iwl_tx_cmd_complete);
  1020. /*
  1021. * Find first available (lowest unused) Tx Queue, mark it "active".
  1022. * Called only when finding queue for aggregation.
  1023. * Should never return anything < 7, because they should already
  1024. * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
  1025. */
  1026. static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
  1027. {
  1028. int txq_id;
  1029. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  1030. if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
  1031. return txq_id;
  1032. return -1;
  1033. }
  1034. int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
  1035. {
  1036. int sta_id;
  1037. int tx_fifo;
  1038. int txq_id;
  1039. int ret;
  1040. unsigned long flags;
  1041. struct iwl_tid_data *tid_data;
  1042. DECLARE_MAC_BUF(mac);
  1043. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1044. tx_fifo = default_tid_to_tx_fifo[tid];
  1045. else
  1046. return -EINVAL;
  1047. IWL_WARNING("%s on ra = %s tid = %d\n",
  1048. __func__, print_mac(mac, ra), tid);
  1049. sta_id = iwl_find_station(priv, ra);
  1050. if (sta_id == IWL_INVALID_STATION)
  1051. return -ENXIO;
  1052. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
  1053. IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
  1054. return -ENXIO;
  1055. }
  1056. txq_id = iwl_txq_ctx_activate_free(priv);
  1057. if (txq_id == -1)
  1058. return -ENXIO;
  1059. spin_lock_irqsave(&priv->sta_lock, flags);
  1060. tid_data = &priv->stations[sta_id].tid[tid];
  1061. *ssn = SEQ_TO_SN(tid_data->seq_number);
  1062. tid_data->agg.txq_id = txq_id;
  1063. spin_unlock_irqrestore(&priv->sta_lock, flags);
  1064. ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
  1065. sta_id, tid, *ssn);
  1066. if (ret)
  1067. return ret;
  1068. if (tid_data->tfds_in_queue == 0) {
  1069. printk(KERN_ERR "HW queue is empty\n");
  1070. tid_data->agg.state = IWL_AGG_ON;
  1071. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1072. } else {
  1073. IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
  1074. tid_data->tfds_in_queue);
  1075. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  1076. }
  1077. return ret;
  1078. }
  1079. EXPORT_SYMBOL(iwl_tx_agg_start);
  1080. int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
  1081. {
  1082. int tx_fifo_id, txq_id, sta_id, ssn = -1;
  1083. struct iwl_tid_data *tid_data;
  1084. int ret, write_ptr, read_ptr;
  1085. unsigned long flags;
  1086. DECLARE_MAC_BUF(mac);
  1087. if (!ra) {
  1088. IWL_ERROR("ra = NULL\n");
  1089. return -EINVAL;
  1090. }
  1091. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1092. tx_fifo_id = default_tid_to_tx_fifo[tid];
  1093. else
  1094. return -EINVAL;
  1095. sta_id = iwl_find_station(priv, ra);
  1096. if (sta_id == IWL_INVALID_STATION)
  1097. return -ENXIO;
  1098. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
  1099. IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
  1100. tid_data = &priv->stations[sta_id].tid[tid];
  1101. ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
  1102. txq_id = tid_data->agg.txq_id;
  1103. write_ptr = priv->txq[txq_id].q.write_ptr;
  1104. read_ptr = priv->txq[txq_id].q.read_ptr;
  1105. /* The queue is not empty */
  1106. if (write_ptr != read_ptr) {
  1107. IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
  1108. priv->stations[sta_id].tid[tid].agg.state =
  1109. IWL_EMPTYING_HW_QUEUE_DELBA;
  1110. return 0;
  1111. }
  1112. IWL_DEBUG_HT("HW queue is empty\n");
  1113. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1114. spin_lock_irqsave(&priv->lock, flags);
  1115. ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
  1116. tx_fifo_id);
  1117. spin_unlock_irqrestore(&priv->lock, flags);
  1118. if (ret)
  1119. return ret;
  1120. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1121. return 0;
  1122. }
  1123. EXPORT_SYMBOL(iwl_tx_agg_stop);
  1124. int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
  1125. {
  1126. struct iwl_queue *q = &priv->txq[txq_id].q;
  1127. u8 *addr = priv->stations[sta_id].sta.sta.addr;
  1128. struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  1129. switch (priv->stations[sta_id].tid[tid].agg.state) {
  1130. case IWL_EMPTYING_HW_QUEUE_DELBA:
  1131. /* We are reclaiming the last packet of the */
  1132. /* aggregated HW queue */
  1133. if (txq_id == tid_data->agg.txq_id &&
  1134. q->read_ptr == q->write_ptr) {
  1135. u16 ssn = SEQ_TO_SN(tid_data->seq_number);
  1136. int tx_fifo = default_tid_to_tx_fifo[tid];
  1137. IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
  1138. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
  1139. ssn, tx_fifo);
  1140. tid_data->agg.state = IWL_AGG_OFF;
  1141. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1142. }
  1143. break;
  1144. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  1145. /* We are reclaiming the last packet of the queue */
  1146. if (tid_data->tfds_in_queue == 0) {
  1147. IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
  1148. tid_data->agg.state = IWL_AGG_ON;
  1149. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1150. }
  1151. break;
  1152. }
  1153. return 0;
  1154. }
  1155. EXPORT_SYMBOL(iwl_txq_check_empty);
  1156. /**
  1157. * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
  1158. *
  1159. * Go through block-ack's bitmap of ACK'd frames, update driver's record of
  1160. * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
  1161. */
  1162. static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
  1163. struct iwl_ht_agg *agg,
  1164. struct iwl_compressed_ba_resp *ba_resp)
  1165. {
  1166. int i, sh, ack;
  1167. u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
  1168. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1169. u64 bitmap;
  1170. int successes = 0;
  1171. struct ieee80211_tx_info *info;
  1172. if (unlikely(!agg->wait_for_ba)) {
  1173. IWL_ERROR("Received BA when not expected\n");
  1174. return -EINVAL;
  1175. }
  1176. /* Mark that the expected block-ack response arrived */
  1177. agg->wait_for_ba = 0;
  1178. IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
  1179. /* Calculate shift to align block-ack bits with our Tx window bits */
  1180. sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
  1181. if (sh < 0) /* tbw something is wrong with indices */
  1182. sh += 0x100;
  1183. /* don't use 64-bit values for now */
  1184. bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
  1185. if (agg->frame_count > (64 - sh)) {
  1186. IWL_DEBUG_TX_REPLY("more frames than bitmap size");
  1187. return -1;
  1188. }
  1189. /* check for success or failure according to the
  1190. * transmitted bitmap and block-ack bitmap */
  1191. bitmap &= agg->bitmap;
  1192. /* For each frame attempted in aggregation,
  1193. * update driver's record of tx frame's status. */
  1194. for (i = 0; i < agg->frame_count ; i++) {
  1195. ack = bitmap & (1ULL << i);
  1196. successes += !!ack;
  1197. IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
  1198. ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
  1199. agg->start_idx + i);
  1200. }
  1201. info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
  1202. memset(&info->status, 0, sizeof(info->status));
  1203. info->flags = IEEE80211_TX_STAT_ACK;
  1204. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1205. info->status.ampdu_ack_map = successes;
  1206. info->status.ampdu_ack_len = agg->frame_count;
  1207. iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  1208. IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
  1209. return 0;
  1210. }
  1211. /**
  1212. * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1213. *
  1214. * Handles block-acknowledge notification from device, which reports success
  1215. * of frames sent via aggregation.
  1216. */
  1217. void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
  1218. struct iwl_rx_mem_buffer *rxb)
  1219. {
  1220. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  1221. struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
  1222. int index;
  1223. struct iwl_tx_queue *txq = NULL;
  1224. struct iwl_ht_agg *agg;
  1225. DECLARE_MAC_BUF(mac);
  1226. /* "flow" corresponds to Tx queue */
  1227. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1228. /* "ssn" is start of block-ack Tx window, corresponds to index
  1229. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1230. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1231. if (scd_flow >= priv->hw_params.max_txq_num) {
  1232. IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
  1233. return;
  1234. }
  1235. txq = &priv->txq[scd_flow];
  1236. agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
  1237. /* Find index just before block-ack window */
  1238. index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  1239. /* TODO: Need to get this copy more safely - now good for debug */
  1240. IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
  1241. "sta_id = %d\n",
  1242. agg->wait_for_ba,
  1243. print_mac(mac, (u8 *) &ba_resp->sta_addr_lo32),
  1244. ba_resp->sta_id);
  1245. IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
  1246. "%d, scd_ssn = %d\n",
  1247. ba_resp->tid,
  1248. ba_resp->seq_ctl,
  1249. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1250. ba_resp->scd_flow,
  1251. ba_resp->scd_ssn);
  1252. IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
  1253. agg->start_idx,
  1254. (unsigned long long)agg->bitmap);
  1255. /* Update driver's record of ACK vs. not for each frame in window */
  1256. iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
  1257. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1258. * block-ack window (we assume that they've been successfully
  1259. * transmitted ... if not, it's too late anyway). */
  1260. if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
  1261. /* calculate mac80211 ampdu sw queue to wake */
  1262. int ampdu_q =
  1263. scd_flow - priv->hw_params.first_ampdu_q + priv->hw->queues;
  1264. int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
  1265. priv->stations[ba_resp->sta_id].
  1266. tid[ba_resp->tid].tfds_in_queue -= freed;
  1267. if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
  1268. priv->mac80211_registered &&
  1269. agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
  1270. ieee80211_wake_queue(priv->hw, ampdu_q);
  1271. iwl_txq_check_empty(priv, ba_resp->sta_id,
  1272. ba_resp->tid, scd_flow);
  1273. }
  1274. }
  1275. EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
  1276. #ifdef CONFIG_IWLWIFI_DEBUG
  1277. #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
  1278. const char *iwl_get_tx_fail_reason(u32 status)
  1279. {
  1280. switch (status & TX_STATUS_MSK) {
  1281. case TX_STATUS_SUCCESS:
  1282. return "SUCCESS";
  1283. TX_STATUS_ENTRY(SHORT_LIMIT);
  1284. TX_STATUS_ENTRY(LONG_LIMIT);
  1285. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  1286. TX_STATUS_ENTRY(MGMNT_ABORT);
  1287. TX_STATUS_ENTRY(NEXT_FRAG);
  1288. TX_STATUS_ENTRY(LIFE_EXPIRE);
  1289. TX_STATUS_ENTRY(DEST_PS);
  1290. TX_STATUS_ENTRY(ABORTED);
  1291. TX_STATUS_ENTRY(BT_RETRY);
  1292. TX_STATUS_ENTRY(STA_INVALID);
  1293. TX_STATUS_ENTRY(FRAG_DROPPED);
  1294. TX_STATUS_ENTRY(TID_DISABLE);
  1295. TX_STATUS_ENTRY(FRAME_FLUSHED);
  1296. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  1297. TX_STATUS_ENTRY(TX_LOCKED);
  1298. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  1299. }
  1300. return "UNKNOWN";
  1301. }
  1302. EXPORT_SYMBOL(iwl_get_tx_fail_reason);
  1303. #endif /* CONFIG_IWLWIFI_DEBUG */