i40e_adminq.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * The full GNU General Public License is included in this distribution in
  20. * the file called "COPYING".
  21. *
  22. * Contact Information:
  23. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25. *
  26. ******************************************************************************/
  27. #include "i40e_status.h"
  28. #include "i40e_type.h"
  29. #include "i40e_register.h"
  30. #include "i40e_adminq.h"
  31. #include "i40e_prototype.h"
  32. /**
  33. * i40e_adminq_init_regs - Initialize AdminQ registers
  34. * @hw: pointer to the hardware structure
  35. *
  36. * This assumes the alloc_asq and alloc_arq functions have already been called
  37. **/
  38. static void i40e_adminq_init_regs(struct i40e_hw *hw)
  39. {
  40. /* set head and tail registers in our local struct */
  41. if (hw->mac.type == I40E_MAC_VF) {
  42. hw->aq.asq.tail = I40E_VF_ATQT1;
  43. hw->aq.asq.head = I40E_VF_ATQH1;
  44. hw->aq.arq.tail = I40E_VF_ARQT1;
  45. hw->aq.arq.head = I40E_VF_ARQH1;
  46. } else {
  47. hw->aq.asq.tail = I40E_PF_ATQT;
  48. hw->aq.asq.head = I40E_PF_ATQH;
  49. hw->aq.arq.tail = I40E_PF_ARQT;
  50. hw->aq.arq.head = I40E_PF_ARQH;
  51. }
  52. }
  53. /**
  54. * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
  55. * @hw: pointer to the hardware structure
  56. **/
  57. static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
  58. {
  59. i40e_status ret_code;
  60. struct i40e_virt_mem mem;
  61. ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
  62. i40e_mem_atq_ring,
  63. (hw->aq.num_asq_entries *
  64. sizeof(struct i40e_aq_desc)),
  65. I40E_ADMINQ_DESC_ALIGNMENT);
  66. if (ret_code)
  67. return ret_code;
  68. hw->aq.asq.desc = hw->aq.asq_mem.va;
  69. hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
  70. ret_code = i40e_allocate_virt_mem(hw, &mem,
  71. (hw->aq.num_asq_entries *
  72. sizeof(struct i40e_asq_cmd_details)));
  73. if (ret_code) {
  74. i40e_free_dma_mem(hw, &hw->aq.asq_mem);
  75. hw->aq.asq_mem.va = NULL;
  76. hw->aq.asq_mem.pa = 0;
  77. return ret_code;
  78. }
  79. hw->aq.asq.details = mem.va;
  80. return ret_code;
  81. }
  82. /**
  83. * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
  84. * @hw: pointer to the hardware structure
  85. **/
  86. static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
  87. {
  88. i40e_status ret_code;
  89. ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
  90. i40e_mem_arq_ring,
  91. (hw->aq.num_arq_entries *
  92. sizeof(struct i40e_aq_desc)),
  93. I40E_ADMINQ_DESC_ALIGNMENT);
  94. if (ret_code)
  95. return ret_code;
  96. hw->aq.arq.desc = hw->aq.arq_mem.va;
  97. hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
  98. return ret_code;
  99. }
  100. /**
  101. * i40e_free_adminq_asq - Free Admin Queue send rings
  102. * @hw: pointer to the hardware structure
  103. *
  104. * This assumes the posted send buffers have already been cleaned
  105. * and de-allocated
  106. **/
  107. static void i40e_free_adminq_asq(struct i40e_hw *hw)
  108. {
  109. struct i40e_virt_mem mem;
  110. i40e_free_dma_mem(hw, &hw->aq.asq_mem);
  111. hw->aq.asq_mem.va = NULL;
  112. hw->aq.asq_mem.pa = 0;
  113. mem.va = hw->aq.asq.details;
  114. i40e_free_virt_mem(hw, &mem);
  115. hw->aq.asq.details = NULL;
  116. }
  117. /**
  118. * i40e_free_adminq_arq - Free Admin Queue receive rings
  119. * @hw: pointer to the hardware structure
  120. *
  121. * This assumes the posted receive buffers have already been cleaned
  122. * and de-allocated
  123. **/
  124. static void i40e_free_adminq_arq(struct i40e_hw *hw)
  125. {
  126. i40e_free_dma_mem(hw, &hw->aq.arq_mem);
  127. hw->aq.arq_mem.va = NULL;
  128. hw->aq.arq_mem.pa = 0;
  129. }
  130. /**
  131. * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
  132. * @hw: pointer to the hardware structure
  133. **/
  134. static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
  135. {
  136. i40e_status ret_code;
  137. struct i40e_aq_desc *desc;
  138. struct i40e_virt_mem mem;
  139. struct i40e_dma_mem *bi;
  140. int i;
  141. /* We'll be allocating the buffer info memory first, then we can
  142. * allocate the mapped buffers for the event processing
  143. */
  144. /* buffer_info structures do not need alignment */
  145. ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
  146. sizeof(struct i40e_dma_mem)));
  147. if (ret_code)
  148. goto alloc_arq_bufs;
  149. hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
  150. /* allocate the mapped buffers */
  151. for (i = 0; i < hw->aq.num_arq_entries; i++) {
  152. bi = &hw->aq.arq.r.arq_bi[i];
  153. ret_code = i40e_allocate_dma_mem(hw, bi,
  154. i40e_mem_arq_buf,
  155. hw->aq.arq_buf_size,
  156. I40E_ADMINQ_DESC_ALIGNMENT);
  157. if (ret_code)
  158. goto unwind_alloc_arq_bufs;
  159. /* now configure the descriptors for use */
  160. desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
  161. desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
  162. if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
  163. desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
  164. desc->opcode = 0;
  165. /* This is in accordance with Admin queue design, there is no
  166. * register for buffer size configuration
  167. */
  168. desc->datalen = cpu_to_le16((u16)bi->size);
  169. desc->retval = 0;
  170. desc->cookie_high = 0;
  171. desc->cookie_low = 0;
  172. desc->params.external.addr_high =
  173. cpu_to_le32(upper_32_bits(bi->pa));
  174. desc->params.external.addr_low =
  175. cpu_to_le32(lower_32_bits(bi->pa));
  176. desc->params.external.param0 = 0;
  177. desc->params.external.param1 = 0;
  178. }
  179. alloc_arq_bufs:
  180. return ret_code;
  181. unwind_alloc_arq_bufs:
  182. /* don't try to free the one that failed... */
  183. i--;
  184. for (; i >= 0; i--)
  185. i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
  186. mem.va = hw->aq.arq.r.arq_bi;
  187. i40e_free_virt_mem(hw, &mem);
  188. return ret_code;
  189. }
  190. /**
  191. * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
  192. * @hw: pointer to the hardware structure
  193. **/
  194. static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
  195. {
  196. i40e_status ret_code;
  197. struct i40e_virt_mem mem;
  198. struct i40e_dma_mem *bi;
  199. int i;
  200. /* No mapped memory needed yet, just the buffer info structures */
  201. ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
  202. sizeof(struct i40e_dma_mem)));
  203. if (ret_code)
  204. goto alloc_asq_bufs;
  205. hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
  206. /* allocate the mapped buffers */
  207. for (i = 0; i < hw->aq.num_asq_entries; i++) {
  208. bi = &hw->aq.asq.r.asq_bi[i];
  209. ret_code = i40e_allocate_dma_mem(hw, bi,
  210. i40e_mem_asq_buf,
  211. hw->aq.asq_buf_size,
  212. I40E_ADMINQ_DESC_ALIGNMENT);
  213. if (ret_code)
  214. goto unwind_alloc_asq_bufs;
  215. }
  216. alloc_asq_bufs:
  217. return ret_code;
  218. unwind_alloc_asq_bufs:
  219. /* don't try to free the one that failed... */
  220. i--;
  221. for (; i >= 0; i--)
  222. i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
  223. mem.va = hw->aq.asq.r.asq_bi;
  224. i40e_free_virt_mem(hw, &mem);
  225. return ret_code;
  226. }
  227. /**
  228. * i40e_free_arq_bufs - Free receive queue buffer info elements
  229. * @hw: pointer to the hardware structure
  230. **/
  231. static void i40e_free_arq_bufs(struct i40e_hw *hw)
  232. {
  233. struct i40e_virt_mem mem;
  234. int i;
  235. for (i = 0; i < hw->aq.num_arq_entries; i++)
  236. i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
  237. mem.va = hw->aq.arq.r.arq_bi;
  238. i40e_free_virt_mem(hw, &mem);
  239. }
  240. /**
  241. * i40e_free_asq_bufs - Free send queue buffer info elements
  242. * @hw: pointer to the hardware structure
  243. **/
  244. static void i40e_free_asq_bufs(struct i40e_hw *hw)
  245. {
  246. struct i40e_virt_mem mem;
  247. int i;
  248. /* only unmap if the address is non-NULL */
  249. for (i = 0; i < hw->aq.num_asq_entries; i++)
  250. if (hw->aq.asq.r.asq_bi[i].pa)
  251. i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
  252. /* now free the buffer info list */
  253. mem.va = hw->aq.asq.r.asq_bi;
  254. i40e_free_virt_mem(hw, &mem);
  255. }
  256. /**
  257. * i40e_config_asq_regs - configure ASQ registers
  258. * @hw: pointer to the hardware structure
  259. *
  260. * Configure base address and length registers for the transmit queue
  261. **/
  262. static void i40e_config_asq_regs(struct i40e_hw *hw)
  263. {
  264. if (hw->mac.type == I40E_MAC_VF) {
  265. /* configure the transmit queue */
  266. wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
  267. wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
  268. wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
  269. I40E_VF_ATQLEN1_ATQENABLE_MASK));
  270. } else {
  271. /* configure the transmit queue */
  272. wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
  273. wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
  274. wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
  275. I40E_PF_ATQLEN_ATQENABLE_MASK));
  276. }
  277. }
  278. /**
  279. * i40e_config_arq_regs - ARQ register configuration
  280. * @hw: pointer to the hardware structure
  281. *
  282. * Configure base address and length registers for the receive (event queue)
  283. **/
  284. static void i40e_config_arq_regs(struct i40e_hw *hw)
  285. {
  286. if (hw->mac.type == I40E_MAC_VF) {
  287. /* configure the receive queue */
  288. wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
  289. wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
  290. wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
  291. I40E_VF_ARQLEN1_ARQENABLE_MASK));
  292. } else {
  293. /* configure the receive queue */
  294. wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
  295. wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
  296. wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
  297. I40E_PF_ARQLEN_ARQENABLE_MASK));
  298. }
  299. /* Update tail in the HW to post pre-allocated buffers */
  300. wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
  301. }
  302. /**
  303. * i40e_init_asq - main initialization routine for ASQ
  304. * @hw: pointer to the hardware structure
  305. *
  306. * This is the main initialization routine for the Admin Send Queue
  307. * Prior to calling this function, drivers *MUST* set the following fields
  308. * in the hw->aq structure:
  309. * - hw->aq.num_asq_entries
  310. * - hw->aq.arq_buf_size
  311. *
  312. * Do *NOT* hold the lock when calling this as the memory allocation routines
  313. * called are not going to be atomic context safe
  314. **/
  315. static i40e_status i40e_init_asq(struct i40e_hw *hw)
  316. {
  317. i40e_status ret_code = 0;
  318. if (hw->aq.asq.count > 0) {
  319. /* queue already initialized */
  320. ret_code = I40E_ERR_NOT_READY;
  321. goto init_adminq_exit;
  322. }
  323. /* verify input for valid configuration */
  324. if ((hw->aq.num_asq_entries == 0) ||
  325. (hw->aq.asq_buf_size == 0)) {
  326. ret_code = I40E_ERR_CONFIG;
  327. goto init_adminq_exit;
  328. }
  329. hw->aq.asq.next_to_use = 0;
  330. hw->aq.asq.next_to_clean = 0;
  331. hw->aq.asq.count = hw->aq.num_asq_entries;
  332. /* allocate the ring memory */
  333. ret_code = i40e_alloc_adminq_asq_ring(hw);
  334. if (ret_code)
  335. goto init_adminq_exit;
  336. /* allocate buffers in the rings */
  337. ret_code = i40e_alloc_asq_bufs(hw);
  338. if (ret_code)
  339. goto init_adminq_free_rings;
  340. /* initialize base registers */
  341. i40e_config_asq_regs(hw);
  342. /* success! */
  343. goto init_adminq_exit;
  344. init_adminq_free_rings:
  345. i40e_free_adminq_asq(hw);
  346. init_adminq_exit:
  347. return ret_code;
  348. }
  349. /**
  350. * i40e_init_arq - initialize ARQ
  351. * @hw: pointer to the hardware structure
  352. *
  353. * The main initialization routine for the Admin Receive (Event) Queue.
  354. * Prior to calling this function, drivers *MUST* set the following fields
  355. * in the hw->aq structure:
  356. * - hw->aq.num_asq_entries
  357. * - hw->aq.arq_buf_size
  358. *
  359. * Do *NOT* hold the lock when calling this as the memory allocation routines
  360. * called are not going to be atomic context safe
  361. **/
  362. static i40e_status i40e_init_arq(struct i40e_hw *hw)
  363. {
  364. i40e_status ret_code = 0;
  365. if (hw->aq.arq.count > 0) {
  366. /* queue already initialized */
  367. ret_code = I40E_ERR_NOT_READY;
  368. goto init_adminq_exit;
  369. }
  370. /* verify input for valid configuration */
  371. if ((hw->aq.num_arq_entries == 0) ||
  372. (hw->aq.arq_buf_size == 0)) {
  373. ret_code = I40E_ERR_CONFIG;
  374. goto init_adminq_exit;
  375. }
  376. hw->aq.arq.next_to_use = 0;
  377. hw->aq.arq.next_to_clean = 0;
  378. hw->aq.arq.count = hw->aq.num_arq_entries;
  379. /* allocate the ring memory */
  380. ret_code = i40e_alloc_adminq_arq_ring(hw);
  381. if (ret_code)
  382. goto init_adminq_exit;
  383. /* allocate buffers in the rings */
  384. ret_code = i40e_alloc_arq_bufs(hw);
  385. if (ret_code)
  386. goto init_adminq_free_rings;
  387. /* initialize base registers */
  388. i40e_config_arq_regs(hw);
  389. /* success! */
  390. goto init_adminq_exit;
  391. init_adminq_free_rings:
  392. i40e_free_adminq_arq(hw);
  393. init_adminq_exit:
  394. return ret_code;
  395. }
  396. /**
  397. * i40e_shutdown_asq - shutdown the ASQ
  398. * @hw: pointer to the hardware structure
  399. *
  400. * The main shutdown routine for the Admin Send Queue
  401. **/
  402. static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
  403. {
  404. i40e_status ret_code = 0;
  405. if (hw->aq.asq.count == 0)
  406. return I40E_ERR_NOT_READY;
  407. /* Stop firmware AdminQ processing */
  408. if (hw->mac.type == I40E_MAC_VF)
  409. wr32(hw, I40E_VF_ATQLEN1, 0);
  410. else
  411. wr32(hw, I40E_PF_ATQLEN, 0);
  412. /* make sure lock is available */
  413. mutex_lock(&hw->aq.asq_mutex);
  414. hw->aq.asq.count = 0; /* to indicate uninitialized queue */
  415. /* free ring buffers */
  416. i40e_free_asq_bufs(hw);
  417. /* free the ring descriptors */
  418. i40e_free_adminq_asq(hw);
  419. mutex_unlock(&hw->aq.asq_mutex);
  420. return ret_code;
  421. }
  422. /**
  423. * i40e_shutdown_arq - shutdown ARQ
  424. * @hw: pointer to the hardware structure
  425. *
  426. * The main shutdown routine for the Admin Receive Queue
  427. **/
  428. static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
  429. {
  430. i40e_status ret_code = 0;
  431. if (hw->aq.arq.count == 0)
  432. return I40E_ERR_NOT_READY;
  433. /* Stop firmware AdminQ processing */
  434. if (hw->mac.type == I40E_MAC_VF)
  435. wr32(hw, I40E_VF_ARQLEN1, 0);
  436. else
  437. wr32(hw, I40E_PF_ARQLEN, 0);
  438. /* make sure lock is available */
  439. mutex_lock(&hw->aq.arq_mutex);
  440. hw->aq.arq.count = 0; /* to indicate uninitialized queue */
  441. /* free ring buffers */
  442. i40e_free_arq_bufs(hw);
  443. /* free the ring descriptors */
  444. i40e_free_adminq_arq(hw);
  445. mutex_unlock(&hw->aq.arq_mutex);
  446. return ret_code;
  447. }
  448. /**
  449. * i40e_init_adminq - main initialization routine for Admin Queue
  450. * @hw: pointer to the hardware structure
  451. *
  452. * Prior to calling this function, drivers *MUST* set the following fields
  453. * in the hw->aq structure:
  454. * - hw->aq.num_asq_entries
  455. * - hw->aq.num_arq_entries
  456. * - hw->aq.arq_buf_size
  457. * - hw->aq.asq_buf_size
  458. **/
  459. i40e_status i40e_init_adminq(struct i40e_hw *hw)
  460. {
  461. u16 eetrack_lo, eetrack_hi;
  462. i40e_status ret_code;
  463. /* verify input for valid configuration */
  464. if ((hw->aq.num_arq_entries == 0) ||
  465. (hw->aq.num_asq_entries == 0) ||
  466. (hw->aq.arq_buf_size == 0) ||
  467. (hw->aq.asq_buf_size == 0)) {
  468. ret_code = I40E_ERR_CONFIG;
  469. goto init_adminq_exit;
  470. }
  471. /* initialize locks */
  472. mutex_init(&hw->aq.asq_mutex);
  473. mutex_init(&hw->aq.arq_mutex);
  474. /* Set up register offsets */
  475. i40e_adminq_init_regs(hw);
  476. /* allocate the ASQ */
  477. ret_code = i40e_init_asq(hw);
  478. if (ret_code)
  479. goto init_adminq_destroy_locks;
  480. /* allocate the ARQ */
  481. ret_code = i40e_init_arq(hw);
  482. if (ret_code)
  483. goto init_adminq_free_asq;
  484. ret_code = i40e_aq_get_firmware_version(hw,
  485. &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
  486. &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
  487. NULL);
  488. if (ret_code)
  489. goto init_adminq_free_arq;
  490. if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
  491. hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
  492. ret_code = I40E_ERR_FIRMWARE_API_VERSION;
  493. goto init_adminq_free_arq;
  494. }
  495. i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
  496. i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
  497. i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
  498. hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
  499. ret_code = i40e_aq_set_hmc_resource_profile(hw,
  500. I40E_HMC_PROFILE_DEFAULT,
  501. 0,
  502. NULL);
  503. ret_code = 0;
  504. /* success! */
  505. goto init_adminq_exit;
  506. init_adminq_free_arq:
  507. i40e_shutdown_arq(hw);
  508. init_adminq_free_asq:
  509. i40e_shutdown_asq(hw);
  510. init_adminq_destroy_locks:
  511. init_adminq_exit:
  512. return ret_code;
  513. }
  514. /**
  515. * i40e_shutdown_adminq - shutdown routine for the Admin Queue
  516. * @hw: pointer to the hardware structure
  517. **/
  518. i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
  519. {
  520. i40e_status ret_code = 0;
  521. i40e_shutdown_asq(hw);
  522. i40e_shutdown_arq(hw);
  523. /* destroy the locks */
  524. return ret_code;
  525. }
  526. /**
  527. * i40e_clean_asq - cleans Admin send queue
  528. * @asq: pointer to the adminq send ring
  529. *
  530. * returns the number of free desc
  531. **/
  532. static u16 i40e_clean_asq(struct i40e_hw *hw)
  533. {
  534. struct i40e_adminq_ring *asq = &(hw->aq.asq);
  535. struct i40e_asq_cmd_details *details;
  536. u16 ntc = asq->next_to_clean;
  537. struct i40e_aq_desc desc_cb;
  538. struct i40e_aq_desc *desc;
  539. desc = I40E_ADMINQ_DESC(*asq, ntc);
  540. details = I40E_ADMINQ_DETAILS(*asq, ntc);
  541. while (rd32(hw, hw->aq.asq.head) != ntc) {
  542. if (details->callback) {
  543. I40E_ADMINQ_CALLBACK cb_func =
  544. (I40E_ADMINQ_CALLBACK)details->callback;
  545. desc_cb = *desc;
  546. cb_func(hw, &desc_cb);
  547. }
  548. memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
  549. memset((void *)details, 0,
  550. sizeof(struct i40e_asq_cmd_details));
  551. ntc++;
  552. if (ntc == asq->count)
  553. ntc = 0;
  554. desc = I40E_ADMINQ_DESC(*asq, ntc);
  555. details = I40E_ADMINQ_DETAILS(*asq, ntc);
  556. }
  557. asq->next_to_clean = ntc;
  558. return I40E_DESC_UNUSED(asq);
  559. }
  560. /**
  561. * i40e_asq_done - check if FW has processed the Admin Send Queue
  562. * @hw: pointer to the hw struct
  563. *
  564. * Returns true if the firmware has processed all descriptors on the
  565. * admin send queue. Returns false if there are still requests pending.
  566. **/
  567. bool i40e_asq_done(struct i40e_hw *hw)
  568. {
  569. /* AQ designers suggest use of head for better
  570. * timing reliability than DD bit
  571. */
  572. return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
  573. }
  574. /**
  575. * i40e_asq_send_command - send command to Admin Queue
  576. * @hw: pointer to the hw struct
  577. * @desc: prefilled descriptor describing the command (non DMA mem)
  578. * @buff: buffer to use for indirect commands
  579. * @buff_size: size of buffer for indirect commands
  580. * @opaque: pointer to info to be used in async cleanup
  581. *
  582. * This is the main send command driver routine for the Admin Queue send
  583. * queue. It runs the queue, cleans the queue, etc
  584. **/
  585. i40e_status i40e_asq_send_command(struct i40e_hw *hw,
  586. struct i40e_aq_desc *desc,
  587. void *buff, /* can be NULL */
  588. u16 buff_size,
  589. struct i40e_asq_cmd_details *cmd_details)
  590. {
  591. i40e_status status = 0;
  592. struct i40e_dma_mem *dma_buff = NULL;
  593. struct i40e_asq_cmd_details *details;
  594. struct i40e_aq_desc *desc_on_ring;
  595. bool cmd_completed = false;
  596. u16 retval = 0;
  597. if (hw->aq.asq.count == 0) {
  598. i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
  599. "AQTX: Admin queue not initialized.\n");
  600. status = I40E_ERR_QUEUE_EMPTY;
  601. goto asq_send_command_exit;
  602. }
  603. details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
  604. if (cmd_details) {
  605. *details = *cmd_details;
  606. /* If the cmd_details are defined copy the cookie. The
  607. * cpu_to_le32 is not needed here because the data is ignored
  608. * by the FW, only used by the driver
  609. */
  610. if (details->cookie) {
  611. desc->cookie_high =
  612. cpu_to_le32(upper_32_bits(details->cookie));
  613. desc->cookie_low =
  614. cpu_to_le32(lower_32_bits(details->cookie));
  615. }
  616. } else {
  617. memset(details, 0, sizeof(struct i40e_asq_cmd_details));
  618. }
  619. /* clear requested flags and then set additional flags if defined */
  620. desc->flags &= ~cpu_to_le16(details->flags_dis);
  621. desc->flags |= cpu_to_le16(details->flags_ena);
  622. mutex_lock(&hw->aq.asq_mutex);
  623. if (buff_size > hw->aq.asq_buf_size) {
  624. i40e_debug(hw,
  625. I40E_DEBUG_AQ_MESSAGE,
  626. "AQTX: Invalid buffer size: %d.\n",
  627. buff_size);
  628. status = I40E_ERR_INVALID_SIZE;
  629. goto asq_send_command_error;
  630. }
  631. if (details->postpone && !details->async) {
  632. i40e_debug(hw,
  633. I40E_DEBUG_AQ_MESSAGE,
  634. "AQTX: Async flag not set along with postpone flag");
  635. status = I40E_ERR_PARAM;
  636. goto asq_send_command_error;
  637. }
  638. /* call clean and check queue available function to reclaim the
  639. * descriptors that were processed by FW, the function returns the
  640. * number of desc available
  641. */
  642. /* the clean function called here could be called in a separate thread
  643. * in case of asynchronous completions
  644. */
  645. if (i40e_clean_asq(hw) == 0) {
  646. i40e_debug(hw,
  647. I40E_DEBUG_AQ_MESSAGE,
  648. "AQTX: Error queue is full.\n");
  649. status = I40E_ERR_ADMIN_QUEUE_FULL;
  650. goto asq_send_command_error;
  651. }
  652. /* initialize the temp desc pointer with the right desc */
  653. desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
  654. /* if the desc is available copy the temp desc to the right place */
  655. *desc_on_ring = *desc;
  656. /* if buff is not NULL assume indirect command */
  657. if (buff != NULL) {
  658. dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
  659. /* copy the user buff into the respective DMA buff */
  660. memcpy(dma_buff->va, buff, buff_size);
  661. desc_on_ring->datalen = cpu_to_le16(buff_size);
  662. /* Update the address values in the desc with the pa value
  663. * for respective buffer
  664. */
  665. desc_on_ring->params.external.addr_high =
  666. cpu_to_le32(upper_32_bits(dma_buff->pa));
  667. desc_on_ring->params.external.addr_low =
  668. cpu_to_le32(lower_32_bits(dma_buff->pa));
  669. }
  670. /* bump the tail */
  671. i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
  672. (hw->aq.asq.next_to_use)++;
  673. if (hw->aq.asq.next_to_use == hw->aq.asq.count)
  674. hw->aq.asq.next_to_use = 0;
  675. if (!details->postpone)
  676. wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
  677. /* if cmd_details are not defined or async flag is not set,
  678. * we need to wait for desc write back
  679. */
  680. if (!details->async && !details->postpone) {
  681. u32 total_delay = 0;
  682. u32 delay_len = 10;
  683. do {
  684. /* AQ designers suggest use of head for better
  685. * timing reliability than DD bit
  686. */
  687. if (i40e_asq_done(hw))
  688. break;
  689. /* ugh! delay while spin_lock */
  690. udelay(delay_len);
  691. total_delay += delay_len;
  692. } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
  693. }
  694. /* if ready, copy the desc back to temp */
  695. if (i40e_asq_done(hw)) {
  696. *desc = *desc_on_ring;
  697. if (buff != NULL)
  698. memcpy(buff, dma_buff->va, buff_size);
  699. retval = le16_to_cpu(desc->retval);
  700. if (retval != 0) {
  701. i40e_debug(hw,
  702. I40E_DEBUG_AQ_MESSAGE,
  703. "AQTX: Command completed with error 0x%X.\n",
  704. retval);
  705. /* strip off FW internal code */
  706. retval &= 0xff;
  707. }
  708. cmd_completed = true;
  709. if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
  710. status = 0;
  711. else
  712. status = I40E_ERR_ADMIN_QUEUE_ERROR;
  713. hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
  714. }
  715. /* update the error if time out occurred */
  716. if ((!cmd_completed) &&
  717. (!details->async && !details->postpone)) {
  718. i40e_debug(hw,
  719. I40E_DEBUG_AQ_MESSAGE,
  720. "AQTX: Writeback timeout.\n");
  721. status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
  722. }
  723. asq_send_command_error:
  724. mutex_unlock(&hw->aq.asq_mutex);
  725. asq_send_command_exit:
  726. return status;
  727. }
  728. /**
  729. * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
  730. * @desc: pointer to the temp descriptor (non DMA mem)
  731. * @opcode: the opcode can be used to decide which flags to turn off or on
  732. *
  733. * Fill the desc with default values
  734. **/
  735. void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
  736. u16 opcode)
  737. {
  738. /* zero out the desc */
  739. memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
  740. desc->opcode = cpu_to_le16(opcode);
  741. desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
  742. }
  743. /**
  744. * i40e_clean_arq_element
  745. * @hw: pointer to the hw struct
  746. * @e: event info from the receive descriptor, includes any buffers
  747. * @pending: number of events that could be left to process
  748. *
  749. * This function cleans one Admin Receive Queue element and returns
  750. * the contents through e. It can also return how many events are
  751. * left to process through 'pending'
  752. **/
  753. i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
  754. struct i40e_arq_event_info *e,
  755. u16 *pending)
  756. {
  757. i40e_status ret_code = 0;
  758. u16 ntc = hw->aq.arq.next_to_clean;
  759. struct i40e_aq_desc *desc;
  760. struct i40e_dma_mem *bi;
  761. u16 desc_idx;
  762. u16 datalen;
  763. u16 flags;
  764. u16 ntu;
  765. /* take the lock before we start messing with the ring */
  766. mutex_lock(&hw->aq.arq_mutex);
  767. /* set next_to_use to head */
  768. ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
  769. if (ntu == ntc) {
  770. /* nothing to do - shouldn't need to update ring's values */
  771. i40e_debug(hw,
  772. I40E_DEBUG_AQ_MESSAGE,
  773. "AQRX: Queue is empty.\n");
  774. ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
  775. goto clean_arq_element_out;
  776. }
  777. /* now clean the next descriptor */
  778. desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
  779. desc_idx = ntc;
  780. i40e_debug_aq(hw,
  781. I40E_DEBUG_AQ_COMMAND,
  782. (void *)desc,
  783. hw->aq.arq.r.arq_bi[desc_idx].va);
  784. flags = le16_to_cpu(desc->flags);
  785. if (flags & I40E_AQ_FLAG_ERR) {
  786. ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
  787. hw->aq.arq_last_status =
  788. (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
  789. i40e_debug(hw,
  790. I40E_DEBUG_AQ_MESSAGE,
  791. "AQRX: Event received with error 0x%X.\n",
  792. hw->aq.arq_last_status);
  793. } else {
  794. memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
  795. datalen = le16_to_cpu(desc->datalen);
  796. e->msg_size = min(datalen, e->msg_size);
  797. if (e->msg_buf != NULL && (e->msg_size != 0))
  798. memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
  799. e->msg_size);
  800. }
  801. /* Restore the original datalen and buffer address in the desc,
  802. * FW updates datalen to indicate the event message
  803. * size
  804. */
  805. bi = &hw->aq.arq.r.arq_bi[ntc];
  806. desc->datalen = cpu_to_le16((u16)bi->size);
  807. desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
  808. desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
  809. /* set tail = the last cleaned desc index. */
  810. wr32(hw, hw->aq.arq.tail, ntc);
  811. /* ntc is updated to tail + 1 */
  812. ntc++;
  813. if (ntc == hw->aq.num_arq_entries)
  814. ntc = 0;
  815. hw->aq.arq.next_to_clean = ntc;
  816. hw->aq.arq.next_to_use = ntu;
  817. clean_arq_element_out:
  818. /* Set pending if needed, unlock and return */
  819. if (pending != NULL)
  820. *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
  821. mutex_unlock(&hw->aq.arq_mutex);
  822. return ret_code;
  823. }
  824. void i40e_resume_aq(struct i40e_hw *hw)
  825. {
  826. u32 reg = 0;
  827. /* Registers are reset after PF reset */
  828. hw->aq.asq.next_to_use = 0;
  829. hw->aq.asq.next_to_clean = 0;
  830. i40e_config_asq_regs(hw);
  831. reg = hw->aq.num_asq_entries;
  832. if (hw->mac.type == I40E_MAC_VF) {
  833. reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
  834. wr32(hw, I40E_VF_ATQLEN1, reg);
  835. } else {
  836. reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
  837. wr32(hw, I40E_PF_ATQLEN, reg);
  838. }
  839. hw->aq.arq.next_to_use = 0;
  840. hw->aq.arq.next_to_clean = 0;
  841. i40e_config_arq_regs(hw);
  842. reg = hw->aq.num_arq_entries;
  843. if (hw->mac.type == I40E_MAC_VF) {
  844. reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
  845. wr32(hw, I40E_VF_ARQLEN1, reg);
  846. } else {
  847. reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
  848. wr32(hw, I40E_PF_ARQLEN, reg);
  849. }
  850. }