qlcnic_ctx.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. /*
  2. * Copyright (C) 2009 - QLogic Corporation.
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  18. * MA 02111-1307, USA.
  19. *
  20. * The full GNU General Public License is included in this distribution
  21. * in the file called "COPYING".
  22. *
  23. */
  24. #include "qlcnic.h"
  25. static u32
  26. qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
  27. {
  28. u32 rsp;
  29. int timeout = 0;
  30. do {
  31. /* give atleast 1ms for firmware to respond */
  32. msleep(1);
  33. if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
  34. return QLCNIC_CDRP_RSP_TIMEOUT;
  35. rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
  36. } while (!QLCNIC_CDRP_IS_RSP(rsp));
  37. return rsp;
  38. }
  39. u32
  40. qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
  41. u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
  42. {
  43. u32 rsp;
  44. u32 signature;
  45. u32 rcode = QLCNIC_RCODE_SUCCESS;
  46. struct pci_dev *pdev = adapter->pdev;
  47. signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
  48. /* Acquire semaphore before accessing CRB */
  49. if (qlcnic_api_lock(adapter))
  50. return QLCNIC_RCODE_TIMEOUT;
  51. QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
  52. QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
  53. QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
  54. QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
  55. QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
  56. rsp = qlcnic_poll_rsp(adapter);
  57. if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
  58. dev_err(&pdev->dev, "card response timeout.\n");
  59. rcode = QLCNIC_RCODE_TIMEOUT;
  60. } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
  61. rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
  62. dev_err(&pdev->dev, "failed card response code:0x%x\n",
  63. rcode);
  64. }
  65. /* Release semaphore */
  66. qlcnic_api_unlock(adapter);
  67. return rcode;
  68. }
  69. int
  70. qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
  71. {
  72. struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
  73. if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
  74. if (qlcnic_issue_cmd(adapter,
  75. adapter->ahw.pci_func,
  76. adapter->fw_hal_version,
  77. recv_ctx->context_id,
  78. mtu,
  79. 0,
  80. QLCNIC_CDRP_CMD_SET_MTU)) {
  81. dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
  82. return -EIO;
  83. }
  84. }
  85. return 0;
  86. }
  87. static int
  88. qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
  89. {
  90. void *addr;
  91. struct qlcnic_hostrq_rx_ctx *prq;
  92. struct qlcnic_cardrsp_rx_ctx *prsp;
  93. struct qlcnic_hostrq_rds_ring *prq_rds;
  94. struct qlcnic_hostrq_sds_ring *prq_sds;
  95. struct qlcnic_cardrsp_rds_ring *prsp_rds;
  96. struct qlcnic_cardrsp_sds_ring *prsp_sds;
  97. struct qlcnic_host_rds_ring *rds_ring;
  98. struct qlcnic_host_sds_ring *sds_ring;
  99. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  100. u64 phys_addr;
  101. int i, nrds_rings, nsds_rings;
  102. size_t rq_size, rsp_size;
  103. u32 cap, reg, val, reg2;
  104. int err;
  105. struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
  106. nrds_rings = adapter->max_rds_rings;
  107. nsds_rings = adapter->max_sds_rings;
  108. rq_size =
  109. SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
  110. nsds_rings);
  111. rsp_size =
  112. SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
  113. nsds_rings);
  114. addr = pci_alloc_consistent(adapter->pdev,
  115. rq_size, &hostrq_phys_addr);
  116. if (addr == NULL)
  117. return -ENOMEM;
  118. prq = (struct qlcnic_hostrq_rx_ctx *)addr;
  119. addr = pci_alloc_consistent(adapter->pdev,
  120. rsp_size, &cardrsp_phys_addr);
  121. if (addr == NULL) {
  122. err = -ENOMEM;
  123. goto out_free_rq;
  124. }
  125. prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
  126. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  127. cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
  128. | QLCNIC_CAP0_VALIDOFF);
  129. cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
  130. prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
  131. msix_handler);
  132. prq->txrx_sds_binding = nsds_rings - 1;
  133. prq->capabilities[0] = cpu_to_le32(cap);
  134. prq->host_int_crb_mode =
  135. cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
  136. prq->host_rds_crb_mode =
  137. cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
  138. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  139. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  140. prq->rds_ring_offset = cpu_to_le32(0);
  141. val = le32_to_cpu(prq->rds_ring_offset) +
  142. (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
  143. prq->sds_ring_offset = cpu_to_le32(val);
  144. prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
  145. le32_to_cpu(prq->rds_ring_offset));
  146. for (i = 0; i < nrds_rings; i++) {
  147. rds_ring = &recv_ctx->rds_rings[i];
  148. rds_ring->producer = 0;
  149. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  150. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  151. prq_rds[i].ring_kind = cpu_to_le32(i);
  152. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  153. }
  154. prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
  155. le32_to_cpu(prq->sds_ring_offset));
  156. for (i = 0; i < nsds_rings; i++) {
  157. sds_ring = &recv_ctx->sds_rings[i];
  158. sds_ring->consumer = 0;
  159. memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
  160. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  161. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  162. prq_sds[i].msi_index = cpu_to_le16(i);
  163. }
  164. phys_addr = hostrq_phys_addr;
  165. err = qlcnic_issue_cmd(adapter,
  166. adapter->ahw.pci_func,
  167. adapter->fw_hal_version,
  168. (u32)(phys_addr >> 32),
  169. (u32)(phys_addr & 0xffffffff),
  170. rq_size,
  171. QLCNIC_CDRP_CMD_CREATE_RX_CTX);
  172. if (err) {
  173. dev_err(&adapter->pdev->dev,
  174. "Failed to create rx ctx in firmware%d\n", err);
  175. goto out_free_rsp;
  176. }
  177. prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
  178. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  179. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  180. rds_ring = &recv_ctx->rds_rings[i];
  181. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  182. rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg;
  183. }
  184. prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
  185. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  186. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  187. sds_ring = &recv_ctx->sds_rings[i];
  188. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  189. reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
  190. sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg;
  191. sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
  192. }
  193. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  194. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  195. recv_ctx->virt_port = prsp->virt_port;
  196. out_free_rsp:
  197. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  198. out_free_rq:
  199. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  200. return err;
  201. }
  202. static void
  203. qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
  204. {
  205. struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
  206. if (qlcnic_issue_cmd(adapter,
  207. adapter->ahw.pci_func,
  208. adapter->fw_hal_version,
  209. recv_ctx->context_id,
  210. QLCNIC_DESTROY_CTX_RESET,
  211. 0,
  212. QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
  213. dev_err(&adapter->pdev->dev,
  214. "Failed to destroy rx ctx in firmware\n");
  215. }
  216. recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
  217. }
  218. static int
  219. qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
  220. {
  221. struct qlcnic_hostrq_tx_ctx *prq;
  222. struct qlcnic_hostrq_cds_ring *prq_cds;
  223. struct qlcnic_cardrsp_tx_ctx *prsp;
  224. void *rq_addr, *rsp_addr;
  225. size_t rq_size, rsp_size;
  226. u32 temp;
  227. int err;
  228. u64 phys_addr;
  229. dma_addr_t rq_phys_addr, rsp_phys_addr;
  230. struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
  231. /* reset host resources */
  232. tx_ring->producer = 0;
  233. tx_ring->sw_consumer = 0;
  234. *(tx_ring->hw_consumer) = 0;
  235. rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
  236. rq_addr = pci_alloc_consistent(adapter->pdev,
  237. rq_size, &rq_phys_addr);
  238. if (!rq_addr)
  239. return -ENOMEM;
  240. rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
  241. rsp_addr = pci_alloc_consistent(adapter->pdev,
  242. rsp_size, &rsp_phys_addr);
  243. if (!rsp_addr) {
  244. err = -ENOMEM;
  245. goto out_free_rq;
  246. }
  247. memset(rq_addr, 0, rq_size);
  248. prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
  249. memset(rsp_addr, 0, rsp_size);
  250. prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
  251. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  252. temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
  253. QLCNIC_CAP0_LSO);
  254. prq->capabilities[0] = cpu_to_le32(temp);
  255. prq->host_int_crb_mode =
  256. cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
  257. prq->interrupt_ctl = 0;
  258. prq->msi_index = 0;
  259. prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
  260. prq_cds = &prq->cds_ring;
  261. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  262. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  263. phys_addr = rq_phys_addr;
  264. err = qlcnic_issue_cmd(adapter,
  265. adapter->ahw.pci_func,
  266. adapter->fw_hal_version,
  267. (u32)(phys_addr >> 32),
  268. ((u32)phys_addr & 0xffffffff),
  269. rq_size,
  270. QLCNIC_CDRP_CMD_CREATE_TX_CTX);
  271. if (err == QLCNIC_RCODE_SUCCESS) {
  272. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  273. tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp;
  274. adapter->tx_context_id =
  275. le16_to_cpu(prsp->context_id);
  276. } else {
  277. dev_err(&adapter->pdev->dev,
  278. "Failed to create tx ctx in firmware%d\n", err);
  279. err = -EIO;
  280. }
  281. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  282. out_free_rq:
  283. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  284. return err;
  285. }
  286. static void
  287. qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
  288. {
  289. if (qlcnic_issue_cmd(adapter,
  290. adapter->ahw.pci_func,
  291. adapter->fw_hal_version,
  292. adapter->tx_context_id,
  293. QLCNIC_DESTROY_CTX_RESET,
  294. 0,
  295. QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
  296. dev_err(&adapter->pdev->dev,
  297. "Failed to destroy tx ctx in firmware\n");
  298. }
  299. }
  300. int
  301. qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
  302. {
  303. if (qlcnic_issue_cmd(adapter,
  304. adapter->ahw.pci_func,
  305. adapter->fw_hal_version,
  306. reg,
  307. 0,
  308. 0,
  309. QLCNIC_CDRP_CMD_READ_PHY)) {
  310. return -EIO;
  311. }
  312. return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
  313. }
  314. int
  315. qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
  316. {
  317. return qlcnic_issue_cmd(adapter,
  318. adapter->ahw.pci_func,
  319. adapter->fw_hal_version,
  320. reg,
  321. val,
  322. 0,
  323. QLCNIC_CDRP_CMD_WRITE_PHY);
  324. }
  325. int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
  326. {
  327. void *addr;
  328. int err;
  329. int ring;
  330. struct qlcnic_recv_context *recv_ctx;
  331. struct qlcnic_host_rds_ring *rds_ring;
  332. struct qlcnic_host_sds_ring *sds_ring;
  333. struct qlcnic_host_tx_ring *tx_ring;
  334. struct pci_dev *pdev = adapter->pdev;
  335. recv_ctx = &adapter->recv_ctx;
  336. tx_ring = adapter->tx_ring;
  337. tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
  338. &tx_ring->hw_cons_phys_addr);
  339. if (tx_ring->hw_consumer == NULL) {
  340. dev_err(&pdev->dev, "failed to allocate tx consumer\n");
  341. return -ENOMEM;
  342. }
  343. *(tx_ring->hw_consumer) = 0;
  344. /* cmd desc ring */
  345. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  346. &tx_ring->phys_addr);
  347. if (addr == NULL) {
  348. dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
  349. err = -ENOMEM;
  350. goto err_out_free;
  351. }
  352. tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
  353. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  354. rds_ring = &recv_ctx->rds_rings[ring];
  355. addr = pci_alloc_consistent(adapter->pdev,
  356. RCV_DESC_RINGSIZE(rds_ring),
  357. &rds_ring->phys_addr);
  358. if (addr == NULL) {
  359. dev_err(&pdev->dev,
  360. "failed to allocate rds ring [%d]\n", ring);
  361. err = -ENOMEM;
  362. goto err_out_free;
  363. }
  364. rds_ring->desc_head = (struct rcv_desc *)addr;
  365. }
  366. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  367. sds_ring = &recv_ctx->sds_rings[ring];
  368. addr = pci_alloc_consistent(adapter->pdev,
  369. STATUS_DESC_RINGSIZE(sds_ring),
  370. &sds_ring->phys_addr);
  371. if (addr == NULL) {
  372. dev_err(&pdev->dev,
  373. "failed to allocate sds ring [%d]\n", ring);
  374. err = -ENOMEM;
  375. goto err_out_free;
  376. }
  377. sds_ring->desc_head = (struct status_desc *)addr;
  378. }
  379. return 0;
  380. err_out_free:
  381. qlcnic_free_hw_resources(adapter);
  382. return err;
  383. }
  384. int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
  385. {
  386. int err;
  387. if (adapter->flags & QLCNIC_NEED_FLR) {
  388. pci_reset_function(adapter->pdev);
  389. adapter->flags &= ~QLCNIC_NEED_FLR;
  390. }
  391. err = qlcnic_fw_cmd_create_rx_ctx(adapter);
  392. if (err)
  393. return err;
  394. err = qlcnic_fw_cmd_create_tx_ctx(adapter);
  395. if (err) {
  396. qlcnic_fw_cmd_destroy_rx_ctx(adapter);
  397. return err;
  398. }
  399. set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
  400. return 0;
  401. }
  402. void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
  403. {
  404. if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
  405. qlcnic_fw_cmd_destroy_rx_ctx(adapter);
  406. qlcnic_fw_cmd_destroy_tx_ctx(adapter);
  407. /* Allow dma queues to drain after context reset */
  408. msleep(20);
  409. }
  410. }
  411. void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
  412. {
  413. struct qlcnic_recv_context *recv_ctx;
  414. struct qlcnic_host_rds_ring *rds_ring;
  415. struct qlcnic_host_sds_ring *sds_ring;
  416. struct qlcnic_host_tx_ring *tx_ring;
  417. int ring;
  418. recv_ctx = &adapter->recv_ctx;
  419. tx_ring = adapter->tx_ring;
  420. if (tx_ring->hw_consumer != NULL) {
  421. pci_free_consistent(adapter->pdev,
  422. sizeof(u32),
  423. tx_ring->hw_consumer,
  424. tx_ring->hw_cons_phys_addr);
  425. tx_ring->hw_consumer = NULL;
  426. }
  427. if (tx_ring->desc_head != NULL) {
  428. pci_free_consistent(adapter->pdev,
  429. TX_DESC_RINGSIZE(tx_ring),
  430. tx_ring->desc_head, tx_ring->phys_addr);
  431. tx_ring->desc_head = NULL;
  432. }
  433. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  434. rds_ring = &recv_ctx->rds_rings[ring];
  435. if (rds_ring->desc_head != NULL) {
  436. pci_free_consistent(adapter->pdev,
  437. RCV_DESC_RINGSIZE(rds_ring),
  438. rds_ring->desc_head,
  439. rds_ring->phys_addr);
  440. rds_ring->desc_head = NULL;
  441. }
  442. }
  443. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  444. sds_ring = &recv_ctx->sds_rings[ring];
  445. if (sds_ring->desc_head != NULL) {
  446. pci_free_consistent(adapter->pdev,
  447. STATUS_DESC_RINGSIZE(sds_ring),
  448. sds_ring->desc_head,
  449. sds_ring->phys_addr);
  450. sds_ring->desc_head = NULL;
  451. }
  452. }
  453. }
  454. /* Get MAC address of a NIC partition */
  455. int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
  456. {
  457. int err;
  458. u32 arg1;
  459. arg1 = adapter->ahw.pci_func | BIT_8;
  460. err = qlcnic_issue_cmd(adapter,
  461. adapter->ahw.pci_func,
  462. adapter->fw_hal_version,
  463. arg1,
  464. 0,
  465. 0,
  466. QLCNIC_CDRP_CMD_MAC_ADDRESS);
  467. if (err == QLCNIC_RCODE_SUCCESS)
  468. qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
  469. QLCNIC_ARG2_CRB_OFFSET, 0, mac);
  470. else {
  471. dev_err(&adapter->pdev->dev,
  472. "Failed to get mac address%d\n", err);
  473. err = -EIO;
  474. }
  475. return err;
  476. }
  477. /* Get info of a NIC partition */
  478. int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
  479. struct qlcnic_info *npar_info, u8 func_id)
  480. {
  481. int err;
  482. dma_addr_t nic_dma_t;
  483. struct qlcnic_info *nic_info;
  484. void *nic_info_addr;
  485. size_t nic_size = sizeof(struct qlcnic_info);
  486. nic_info_addr = pci_alloc_consistent(adapter->pdev,
  487. nic_size, &nic_dma_t);
  488. if (!nic_info_addr)
  489. return -ENOMEM;
  490. memset(nic_info_addr, 0, nic_size);
  491. nic_info = (struct qlcnic_info *) nic_info_addr;
  492. err = qlcnic_issue_cmd(adapter,
  493. adapter->ahw.pci_func,
  494. adapter->fw_hal_version,
  495. MSD(nic_dma_t),
  496. LSD(nic_dma_t),
  497. (func_id << 16 | nic_size),
  498. QLCNIC_CDRP_CMD_GET_NIC_INFO);
  499. if (err == QLCNIC_RCODE_SUCCESS) {
  500. npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
  501. npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
  502. npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
  503. npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
  504. npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
  505. npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
  506. npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
  507. npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
  508. npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
  509. npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
  510. dev_info(&adapter->pdev->dev,
  511. "phy port: %d switch_mode: %d,\n"
  512. "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
  513. "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
  514. npar_info->phys_port, npar_info->switch_mode,
  515. npar_info->max_tx_ques, npar_info->max_rx_ques,
  516. npar_info->min_tx_bw, npar_info->max_tx_bw,
  517. npar_info->max_mtu, npar_info->capabilities);
  518. } else {
  519. dev_err(&adapter->pdev->dev,
  520. "Failed to get nic info%d\n", err);
  521. err = -EIO;
  522. }
  523. pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
  524. return err;
  525. }
  526. /* Configure a NIC partition */
  527. int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
  528. {
  529. int err = -EIO;
  530. dma_addr_t nic_dma_t;
  531. void *nic_info_addr;
  532. struct qlcnic_info *nic_info;
  533. size_t nic_size = sizeof(struct qlcnic_info);
  534. if (adapter->op_mode != QLCNIC_MGMT_FUNC)
  535. return err;
  536. nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
  537. &nic_dma_t);
  538. if (!nic_info_addr)
  539. return -ENOMEM;
  540. memset(nic_info_addr, 0, nic_size);
  541. nic_info = (struct qlcnic_info *)nic_info_addr;
  542. nic_info->pci_func = cpu_to_le16(nic->pci_func);
  543. nic_info->op_mode = cpu_to_le16(nic->op_mode);
  544. nic_info->phys_port = cpu_to_le16(nic->phys_port);
  545. nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
  546. nic_info->capabilities = cpu_to_le32(nic->capabilities);
  547. nic_info->max_mac_filters = nic->max_mac_filters;
  548. nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
  549. nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
  550. nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
  551. nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
  552. err = qlcnic_issue_cmd(adapter,
  553. adapter->ahw.pci_func,
  554. adapter->fw_hal_version,
  555. MSD(nic_dma_t),
  556. LSD(nic_dma_t),
  557. ((nic->pci_func << 16) | nic_size),
  558. QLCNIC_CDRP_CMD_SET_NIC_INFO);
  559. if (err != QLCNIC_RCODE_SUCCESS) {
  560. dev_err(&adapter->pdev->dev,
  561. "Failed to set nic info%d\n", err);
  562. err = -EIO;
  563. }
  564. pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
  565. return err;
  566. }
  567. /* Get PCI Info of a partition */
  568. int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
  569. struct qlcnic_pci_info *pci_info)
  570. {
  571. int err = 0, i;
  572. dma_addr_t pci_info_dma_t;
  573. struct qlcnic_pci_info *npar;
  574. void *pci_info_addr;
  575. size_t npar_size = sizeof(struct qlcnic_pci_info);
  576. size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
  577. pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
  578. &pci_info_dma_t);
  579. if (!pci_info_addr)
  580. return -ENOMEM;
  581. memset(pci_info_addr, 0, pci_size);
  582. npar = (struct qlcnic_pci_info *) pci_info_addr;
  583. err = qlcnic_issue_cmd(adapter,
  584. adapter->ahw.pci_func,
  585. adapter->fw_hal_version,
  586. MSD(pci_info_dma_t),
  587. LSD(pci_info_dma_t),
  588. pci_size,
  589. QLCNIC_CDRP_CMD_GET_PCI_INFO);
  590. if (err == QLCNIC_RCODE_SUCCESS) {
  591. for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
  592. pci_info->id = le16_to_cpu(npar->id);
  593. pci_info->active = le16_to_cpu(npar->active);
  594. pci_info->type = le16_to_cpu(npar->type);
  595. pci_info->default_port =
  596. le16_to_cpu(npar->default_port);
  597. pci_info->tx_min_bw =
  598. le16_to_cpu(npar->tx_min_bw);
  599. pci_info->tx_max_bw =
  600. le16_to_cpu(npar->tx_max_bw);
  601. memcpy(pci_info->mac, npar->mac, ETH_ALEN);
  602. }
  603. } else {
  604. dev_err(&adapter->pdev->dev,
  605. "Failed to get PCI Info%d\n", err);
  606. err = -EIO;
  607. }
  608. pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
  609. pci_info_dma_t);
  610. return err;
  611. }
  612. /* Configure eSwitch for port mirroring */
  613. int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
  614. u8 enable_mirroring, u8 pci_func)
  615. {
  616. int err = -EIO;
  617. u32 arg1;
  618. if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
  619. !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
  620. return err;
  621. arg1 = id | (enable_mirroring ? BIT_4 : 0);
  622. arg1 |= pci_func << 8;
  623. err = qlcnic_issue_cmd(adapter,
  624. adapter->ahw.pci_func,
  625. adapter->fw_hal_version,
  626. arg1,
  627. 0,
  628. 0,
  629. QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
  630. if (err != QLCNIC_RCODE_SUCCESS) {
  631. dev_err(&adapter->pdev->dev,
  632. "Failed to configure port mirroring%d on eswitch:%d\n",
  633. pci_func, id);
  634. } else {
  635. dev_info(&adapter->pdev->dev,
  636. "Configured eSwitch %d for port mirroring:%d\n",
  637. id, pci_func);
  638. }
  639. return err;
  640. }
  641. int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
  642. const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
  643. size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
  644. struct __qlcnic_esw_statistics *stats;
  645. dma_addr_t stats_dma_t;
  646. void *stats_addr;
  647. u32 arg1;
  648. int err;
  649. if (esw_stats == NULL)
  650. return -ENOMEM;
  651. if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
  652. func != adapter->ahw.pci_func) {
  653. dev_err(&adapter->pdev->dev,
  654. "Not privilege to query stats for func=%d", func);
  655. return -EIO;
  656. }
  657. stats_addr = pci_alloc_consistent(adapter->pdev, stats_size,
  658. &stats_dma_t);
  659. if (!stats_addr) {
  660. dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
  661. return -ENOMEM;
  662. }
  663. memset(stats_addr, 0, stats_size);
  664. arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
  665. arg1 |= rx_tx << 15 | stats_size << 16;
  666. err = qlcnic_issue_cmd(adapter,
  667. adapter->ahw.pci_func,
  668. adapter->fw_hal_version,
  669. arg1,
  670. MSD(stats_dma_t),
  671. LSD(stats_dma_t),
  672. QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
  673. if (!err) {
  674. stats = (struct __qlcnic_esw_statistics *)stats_addr;
  675. esw_stats->context_id = le16_to_cpu(stats->context_id);
  676. esw_stats->version = le16_to_cpu(stats->version);
  677. esw_stats->size = le16_to_cpu(stats->size);
  678. esw_stats->multicast_frames =
  679. le64_to_cpu(stats->multicast_frames);
  680. esw_stats->broadcast_frames =
  681. le64_to_cpu(stats->broadcast_frames);
  682. esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
  683. esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
  684. esw_stats->local_frames = le64_to_cpu(stats->local_frames);
  685. esw_stats->errors = le64_to_cpu(stats->errors);
  686. esw_stats->numbytes = le64_to_cpu(stats->numbytes);
  687. }
  688. pci_free_consistent(adapter->pdev, stats_size, stats_addr,
  689. stats_dma_t);
  690. return err;
  691. }
  692. int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
  693. const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
  694. struct __qlcnic_esw_statistics port_stats;
  695. u8 i;
  696. int ret = -EIO;
  697. if (esw_stats == NULL)
  698. return -ENOMEM;
  699. if (adapter->op_mode != QLCNIC_MGMT_FUNC)
  700. return -EIO;
  701. if (adapter->npars == NULL)
  702. return -EIO;
  703. memset(esw_stats, 0, sizeof(u64));
  704. esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
  705. esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
  706. esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
  707. esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
  708. esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
  709. esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
  710. esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
  711. esw_stats->context_id = eswitch;
  712. for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
  713. if (adapter->npars[i].phy_port != eswitch)
  714. continue;
  715. memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
  716. if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
  717. continue;
  718. esw_stats->size = port_stats.size;
  719. esw_stats->version = port_stats.version;
  720. QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
  721. port_stats.unicast_frames);
  722. QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
  723. port_stats.multicast_frames);
  724. QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
  725. port_stats.broadcast_frames);
  726. QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
  727. port_stats.dropped_frames);
  728. QLCNIC_ADD_ESW_STATS(esw_stats->errors,
  729. port_stats.errors);
  730. QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
  731. port_stats.local_frames);
  732. QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
  733. port_stats.numbytes);
  734. ret = 0;
  735. }
  736. return ret;
  737. }
  738. int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
  739. const u8 port, const u8 rx_tx)
  740. {
  741. u32 arg1;
  742. if (adapter->op_mode != QLCNIC_MGMT_FUNC)
  743. return -EIO;
  744. if (func_esw == QLCNIC_STATS_PORT) {
  745. if (port >= QLCNIC_MAX_PCI_FUNC)
  746. goto err_ret;
  747. } else if (func_esw == QLCNIC_STATS_ESWITCH) {
  748. if (port >= QLCNIC_NIU_MAX_XG_PORTS)
  749. goto err_ret;
  750. } else {
  751. goto err_ret;
  752. }
  753. if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
  754. goto err_ret;
  755. arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
  756. arg1 |= BIT_14 | rx_tx << 15;
  757. return qlcnic_issue_cmd(adapter,
  758. adapter->ahw.pci_func,
  759. adapter->fw_hal_version,
  760. arg1,
  761. 0,
  762. 0,
  763. QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
  764. err_ret:
  765. dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
  766. "rx_ctx=%d\n", func_esw, port, rx_tx);
  767. return -EIO;
  768. }
  769. static int
  770. __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
  771. u32 *arg1, u32 *arg2)
  772. {
  773. int err = -EIO;
  774. u8 pci_func;
  775. pci_func = (*arg1 >> 8);
  776. err = qlcnic_issue_cmd(adapter,
  777. adapter->ahw.pci_func,
  778. adapter->fw_hal_version,
  779. *arg1,
  780. 0,
  781. 0,
  782. QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
  783. if (err == QLCNIC_RCODE_SUCCESS) {
  784. *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
  785. *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
  786. dev_info(&adapter->pdev->dev,
  787. "eSwitch port config for pci func %d\n", pci_func);
  788. } else {
  789. dev_err(&adapter->pdev->dev,
  790. "Failed to get eswitch port config for pci func %d\n",
  791. pci_func);
  792. }
  793. return err;
  794. }
  795. /* Configure eSwitch port
  796. op_mode = 0 for setting default port behavior
  797. op_mode = 1 for setting vlan id
  798. op_mode = 2 for deleting vlan id
  799. op_type = 0 for vlan_id
  800. op_type = 1 for port vlan_id
  801. */
  802. int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
  803. struct qlcnic_esw_func_cfg *esw_cfg)
  804. {
  805. int err = -EIO;
  806. u32 arg1, arg2 = 0;
  807. u8 pci_func;
  808. if (adapter->op_mode != QLCNIC_MGMT_FUNC)
  809. return err;
  810. pci_func = esw_cfg->pci_func;
  811. arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
  812. arg1 |= (pci_func << 8);
  813. if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
  814. return err;
  815. arg1 &= ~(0x0ff << 8);
  816. arg1 |= (pci_func << 8);
  817. arg1 &= ~(BIT_2 | BIT_3);
  818. switch (esw_cfg->op_mode) {
  819. case QLCNIC_PORT_DEFAULTS:
  820. arg1 |= (BIT_4 | BIT_6 | BIT_7);
  821. arg2 |= (BIT_0 | BIT_1);
  822. if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
  823. arg2 |= (BIT_2 | BIT_3);
  824. if (!(esw_cfg->discard_tagged))
  825. arg1 &= ~BIT_4;
  826. if (!(esw_cfg->promisc_mode))
  827. arg1 &= ~BIT_6;
  828. if (!(esw_cfg->mac_override))
  829. arg1 &= ~BIT_7;
  830. if (!(esw_cfg->mac_anti_spoof))
  831. arg2 &= ~BIT_0;
  832. if (!(esw_cfg->offload_flags & BIT_0))
  833. arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
  834. if (!(esw_cfg->offload_flags & BIT_1))
  835. arg2 &= ~BIT_2;
  836. if (!(esw_cfg->offload_flags & BIT_2))
  837. arg2 &= ~BIT_3;
  838. break;
  839. case QLCNIC_ADD_VLAN:
  840. arg1 |= (BIT_2 | BIT_5);
  841. arg1 |= (esw_cfg->vlan_id << 16);
  842. break;
  843. case QLCNIC_DEL_VLAN:
  844. arg1 |= (BIT_3 | BIT_5);
  845. arg1 &= ~(0x0ffff << 16);
  846. break;
  847. default:
  848. return err;
  849. }
  850. err = qlcnic_issue_cmd(adapter,
  851. adapter->ahw.pci_func,
  852. adapter->fw_hal_version,
  853. arg1,
  854. arg2,
  855. 0,
  856. QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
  857. if (err != QLCNIC_RCODE_SUCCESS) {
  858. dev_err(&adapter->pdev->dev,
  859. "Failed to configure eswitch pci func %d\n", pci_func);
  860. } else {
  861. dev_info(&adapter->pdev->dev,
  862. "Configured eSwitch for pci func %d\n", pci_func);
  863. }
  864. return err;
  865. }
  866. int
  867. qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
  868. struct qlcnic_esw_func_cfg *esw_cfg)
  869. {
  870. u32 arg1, arg2;
  871. u8 phy_port;
  872. if (adapter->op_mode == QLCNIC_MGMT_FUNC)
  873. phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
  874. else
  875. phy_port = adapter->physical_port;
  876. arg1 = phy_port;
  877. arg1 |= (esw_cfg->pci_func << 8);
  878. if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
  879. return -EIO;
  880. esw_cfg->discard_tagged = !!(arg1 & BIT_4);
  881. esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
  882. esw_cfg->promisc_mode = !!(arg1 & BIT_6);
  883. esw_cfg->mac_override = !!(arg1 & BIT_7);
  884. esw_cfg->vlan_id = LSW(arg1 >> 16);
  885. esw_cfg->mac_anti_spoof = (arg2 & 0x1);
  886. esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
  887. return 0;
  888. }