netxen_nic_ctx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  18. * MA 02111-1307, USA.
  19. *
  20. * The full GNU General Public License is included in this distribution
  21. * in the file called LICENSE.
  22. *
  23. * Contact Information:
  24. * info@netxen.com
  25. * NetXen Inc,
  26. * 18922 Forge Drive
  27. * Cupertino, CA 95014-0701
  28. *
  29. */
  30. #include "netxen_nic_hw.h"
  31. #include "netxen_nic.h"
  32. #include "netxen_nic_phan_reg.h"
  33. #define NXHAL_VERSION 1
  34. static int
  35. netxen_api_lock(struct netxen_adapter *adapter)
  36. {
  37. u32 done = 0, timeout = 0;
  38. for (;;) {
  39. /* Acquire PCIE HW semaphore5 */
  40. done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
  41. if (done == 1)
  42. break;
  43. if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
  44. printk(KERN_ERR "%s: lock timeout.\n", __func__);
  45. return -1;
  46. }
  47. msleep(1);
  48. }
  49. #if 0
  50. NXWR32(adapter,
  51. NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
  52. #endif
  53. return 0;
  54. }
  55. static int
  56. netxen_api_unlock(struct netxen_adapter *adapter)
  57. {
  58. /* Release PCIE HW semaphore5 */
  59. NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
  60. return 0;
  61. }
  62. static u32
  63. netxen_poll_rsp(struct netxen_adapter *adapter)
  64. {
  65. u32 rsp = NX_CDRP_RSP_OK;
  66. int timeout = 0;
  67. do {
  68. /* give atleast 1ms for firmware to respond */
  69. msleep(1);
  70. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  71. return NX_CDRP_RSP_TIMEOUT;
  72. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  73. } while (!NX_CDRP_IS_RSP(rsp));
  74. return rsp;
  75. }
  76. static u32
  77. netxen_issue_cmd(struct netxen_adapter *adapter,
  78. u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
  79. {
  80. u32 rsp;
  81. u32 signature = 0;
  82. u32 rcode = NX_RCODE_SUCCESS;
  83. signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
  84. /* Acquire semaphore before accessing CRB */
  85. if (netxen_api_lock(adapter))
  86. return NX_RCODE_TIMEOUT;
  87. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  88. NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
  89. NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
  90. NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
  91. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
  92. rsp = netxen_poll_rsp(adapter);
  93. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  94. printk(KERN_ERR "%s: card response timeout.\n",
  95. netxen_nic_driver_name);
  96. rcode = NX_RCODE_TIMEOUT;
  97. } else if (rsp == NX_CDRP_RSP_FAIL) {
  98. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  99. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  100. netxen_nic_driver_name, rcode);
  101. }
  102. /* Release semaphore */
  103. netxen_api_unlock(adapter);
  104. return rcode;
  105. }
  106. int
  107. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  108. {
  109. u32 rcode = NX_RCODE_SUCCESS;
  110. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  111. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  112. rcode = netxen_issue_cmd(adapter,
  113. adapter->ahw.pci_func,
  114. NXHAL_VERSION,
  115. recv_ctx->context_id,
  116. mtu,
  117. 0,
  118. NX_CDRP_CMD_SET_MTU);
  119. if (rcode != NX_RCODE_SUCCESS)
  120. return -EIO;
  121. return 0;
  122. }
  123. static int
  124. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  125. {
  126. void *addr;
  127. nx_hostrq_rx_ctx_t *prq;
  128. nx_cardrsp_rx_ctx_t *prsp;
  129. nx_hostrq_rds_ring_t *prq_rds;
  130. nx_hostrq_sds_ring_t *prq_sds;
  131. nx_cardrsp_rds_ring_t *prsp_rds;
  132. nx_cardrsp_sds_ring_t *prsp_sds;
  133. struct nx_host_rds_ring *rds_ring;
  134. struct nx_host_sds_ring *sds_ring;
  135. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  136. u64 phys_addr;
  137. int i, nrds_rings, nsds_rings;
  138. size_t rq_size, rsp_size;
  139. u32 cap, reg, val;
  140. int err;
  141. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  142. nrds_rings = adapter->max_rds_rings;
  143. nsds_rings = adapter->max_sds_rings;
  144. rq_size =
  145. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  146. rsp_size =
  147. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  148. addr = pci_alloc_consistent(adapter->pdev,
  149. rq_size, &hostrq_phys_addr);
  150. if (addr == NULL)
  151. return -ENOMEM;
  152. prq = (nx_hostrq_rx_ctx_t *)addr;
  153. addr = pci_alloc_consistent(adapter->pdev,
  154. rsp_size, &cardrsp_phys_addr);
  155. if (addr == NULL) {
  156. err = -ENOMEM;
  157. goto out_free_rq;
  158. }
  159. prsp = (nx_cardrsp_rx_ctx_t *)addr;
  160. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  161. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  162. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  163. prq->capabilities[0] = cpu_to_le32(cap);
  164. prq->host_int_crb_mode =
  165. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  166. prq->host_rds_crb_mode =
  167. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  168. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  169. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  170. prq->rds_ring_offset = cpu_to_le32(0);
  171. val = le32_to_cpu(prq->rds_ring_offset) +
  172. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  173. prq->sds_ring_offset = cpu_to_le32(val);
  174. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  175. le32_to_cpu(prq->rds_ring_offset));
  176. for (i = 0; i < nrds_rings; i++) {
  177. rds_ring = &recv_ctx->rds_rings[i];
  178. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  179. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  180. prq_rds[i].ring_kind = cpu_to_le32(i);
  181. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  182. }
  183. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  184. le32_to_cpu(prq->sds_ring_offset));
  185. for (i = 0; i < nsds_rings; i++) {
  186. sds_ring = &recv_ctx->sds_rings[i];
  187. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  188. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  189. prq_sds[i].msi_index = cpu_to_le16(i);
  190. }
  191. phys_addr = hostrq_phys_addr;
  192. err = netxen_issue_cmd(adapter,
  193. adapter->ahw.pci_func,
  194. NXHAL_VERSION,
  195. (u32)(phys_addr >> 32),
  196. (u32)(phys_addr & 0xffffffff),
  197. rq_size,
  198. NX_CDRP_CMD_CREATE_RX_CTX);
  199. if (err) {
  200. printk(KERN_WARNING
  201. "Failed to create rx ctx in firmware%d\n", err);
  202. goto out_free_rsp;
  203. }
  204. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  205. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  206. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  207. rds_ring = &recv_ctx->rds_rings[i];
  208. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  209. rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
  210. }
  211. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  212. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  213. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  214. sds_ring = &recv_ctx->sds_rings[i];
  215. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  216. sds_ring->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
  217. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  218. sds_ring->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
  219. }
  220. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  221. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  222. recv_ctx->virt_port = prsp->virt_port;
  223. out_free_rsp:
  224. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  225. out_free_rq:
  226. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  227. return err;
  228. }
  229. static void
  230. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  231. {
  232. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  233. if (netxen_issue_cmd(adapter,
  234. adapter->ahw.pci_func,
  235. NXHAL_VERSION,
  236. recv_ctx->context_id,
  237. NX_DESTROY_CTX_RESET,
  238. 0,
  239. NX_CDRP_CMD_DESTROY_RX_CTX)) {
  240. printk(KERN_WARNING
  241. "%s: Failed to destroy rx ctx in firmware\n",
  242. netxen_nic_driver_name);
  243. }
  244. }
  245. static int
  246. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  247. {
  248. nx_hostrq_tx_ctx_t *prq;
  249. nx_hostrq_cds_ring_t *prq_cds;
  250. nx_cardrsp_tx_ctx_t *prsp;
  251. void *rq_addr, *rsp_addr;
  252. size_t rq_size, rsp_size;
  253. u32 temp;
  254. int err = 0;
  255. u64 offset, phys_addr;
  256. dma_addr_t rq_phys_addr, rsp_phys_addr;
  257. struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
  258. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  259. rq_addr = pci_alloc_consistent(adapter->pdev,
  260. rq_size, &rq_phys_addr);
  261. if (!rq_addr)
  262. return -ENOMEM;
  263. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  264. rsp_addr = pci_alloc_consistent(adapter->pdev,
  265. rsp_size, &rsp_phys_addr);
  266. if (!rsp_addr) {
  267. err = -ENOMEM;
  268. goto out_free_rq;
  269. }
  270. memset(rq_addr, 0, rq_size);
  271. prq = (nx_hostrq_tx_ctx_t *)rq_addr;
  272. memset(rsp_addr, 0, rsp_size);
  273. prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
  274. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  275. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  276. prq->capabilities[0] = cpu_to_le32(temp);
  277. prq->host_int_crb_mode =
  278. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  279. prq->interrupt_ctl = 0;
  280. prq->msi_index = 0;
  281. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  282. offset = adapter->ctx_desc_phys_addr+sizeof(struct netxen_ring_ctx);
  283. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  284. prq_cds = &prq->cds_ring;
  285. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  286. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  287. phys_addr = rq_phys_addr;
  288. err = netxen_issue_cmd(adapter,
  289. adapter->ahw.pci_func,
  290. NXHAL_VERSION,
  291. (u32)(phys_addr >> 32),
  292. ((u32)phys_addr & 0xffffffff),
  293. rq_size,
  294. NX_CDRP_CMD_CREATE_TX_CTX);
  295. if (err == NX_RCODE_SUCCESS) {
  296. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  297. tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
  298. #if 0
  299. adapter->tx_state =
  300. le32_to_cpu(prsp->host_ctx_state);
  301. #endif
  302. adapter->tx_context_id =
  303. le16_to_cpu(prsp->context_id);
  304. } else {
  305. printk(KERN_WARNING
  306. "Failed to create tx ctx in firmware%d\n", err);
  307. err = -EIO;
  308. }
  309. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  310. out_free_rq:
  311. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  312. return err;
  313. }
  314. static void
  315. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  316. {
  317. if (netxen_issue_cmd(adapter,
  318. adapter->ahw.pci_func,
  319. NXHAL_VERSION,
  320. adapter->tx_context_id,
  321. NX_DESTROY_CTX_RESET,
  322. 0,
  323. NX_CDRP_CMD_DESTROY_TX_CTX)) {
  324. printk(KERN_WARNING
  325. "%s: Failed to destroy tx ctx in firmware\n",
  326. netxen_nic_driver_name);
  327. }
  328. }
  329. static u64 ctx_addr_sig_regs[][3] = {
  330. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  331. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  332. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  333. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  334. };
  335. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  336. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  337. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  338. #define lower32(x) ((u32)((x) & 0xffffffff))
  339. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  340. static struct netxen_recv_crb recv_crb_registers[] = {
  341. /* Instance 0 */
  342. {
  343. /* crb_rcv_producer: */
  344. {
  345. NETXEN_NIC_REG(0x100),
  346. /* Jumbo frames */
  347. NETXEN_NIC_REG(0x110),
  348. /* LRO */
  349. NETXEN_NIC_REG(0x120)
  350. },
  351. /* crb_sts_consumer: */
  352. NETXEN_NIC_REG(0x138),
  353. },
  354. /* Instance 1 */
  355. {
  356. /* crb_rcv_producer: */
  357. {
  358. NETXEN_NIC_REG(0x144),
  359. /* Jumbo frames */
  360. NETXEN_NIC_REG(0x154),
  361. /* LRO */
  362. NETXEN_NIC_REG(0x164)
  363. },
  364. /* crb_sts_consumer: */
  365. NETXEN_NIC_REG(0x17c),
  366. },
  367. /* Instance 2 */
  368. {
  369. /* crb_rcv_producer: */
  370. {
  371. NETXEN_NIC_REG(0x1d8),
  372. /* Jumbo frames */
  373. NETXEN_NIC_REG(0x1f8),
  374. /* LRO */
  375. NETXEN_NIC_REG(0x208)
  376. },
  377. /* crb_sts_consumer: */
  378. NETXEN_NIC_REG(0x220),
  379. },
  380. /* Instance 3 */
  381. {
  382. /* crb_rcv_producer: */
  383. {
  384. NETXEN_NIC_REG(0x22c),
  385. /* Jumbo frames */
  386. NETXEN_NIC_REG(0x23c),
  387. /* LRO */
  388. NETXEN_NIC_REG(0x24c)
  389. },
  390. /* crb_sts_consumer: */
  391. NETXEN_NIC_REG(0x264),
  392. },
  393. };
  394. static int
  395. netxen_init_old_ctx(struct netxen_adapter *adapter)
  396. {
  397. struct netxen_recv_context *recv_ctx;
  398. struct nx_host_rds_ring *rds_ring;
  399. struct nx_host_sds_ring *sds_ring;
  400. struct nx_host_tx_ring *tx_ring;
  401. int ring;
  402. int func_id = adapter->portnum;
  403. tx_ring = &adapter->tx_ring;
  404. adapter->ctx_desc->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  405. adapter->ctx_desc->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  406. recv_ctx = &adapter->recv_ctx;
  407. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  408. rds_ring = &recv_ctx->rds_rings[ring];
  409. adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
  410. cpu_to_le64(rds_ring->phys_addr);
  411. adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
  412. cpu_to_le32(rds_ring->num_desc);
  413. }
  414. sds_ring = &recv_ctx->sds_rings[0];
  415. adapter->ctx_desc->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  416. adapter->ctx_desc->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  417. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(func_id),
  418. lower32(adapter->ctx_desc_phys_addr));
  419. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(func_id),
  420. upper32(adapter->ctx_desc_phys_addr));
  421. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(func_id),
  422. NETXEN_CTX_SIGNATURE | func_id);
  423. return 0;
  424. }
  425. static uint32_t sw_int_mask[4] = {
  426. CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
  427. CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
  428. };
  429. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  430. {
  431. void *addr;
  432. int err = 0;
  433. int ring;
  434. struct netxen_recv_context *recv_ctx;
  435. struct nx_host_rds_ring *rds_ring;
  436. struct nx_host_sds_ring *sds_ring;
  437. struct nx_host_tx_ring *tx_ring = &adapter->tx_ring;
  438. struct pci_dev *pdev = adapter->pdev;
  439. struct net_device *netdev = adapter->netdev;
  440. addr = pci_alloc_consistent(pdev,
  441. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  442. &adapter->ctx_desc_phys_addr);
  443. if (addr == NULL) {
  444. dev_err(&pdev->dev, "failed to allocate hw context\n");
  445. return -ENOMEM;
  446. }
  447. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  448. adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
  449. adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
  450. adapter->ctx_desc->cmd_consumer_offset =
  451. cpu_to_le64(adapter->ctx_desc_phys_addr +
  452. sizeof(struct netxen_ring_ctx));
  453. tx_ring->hw_consumer =
  454. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  455. /* cmd desc ring */
  456. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  457. &tx_ring->phys_addr);
  458. if (addr == NULL) {
  459. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  460. netdev->name);
  461. return -ENOMEM;
  462. }
  463. tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
  464. recv_ctx = &adapter->recv_ctx;
  465. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  466. rds_ring = &recv_ctx->rds_rings[ring];
  467. addr = pci_alloc_consistent(adapter->pdev,
  468. RCV_DESC_RINGSIZE(rds_ring),
  469. &rds_ring->phys_addr);
  470. if (addr == NULL) {
  471. dev_err(&pdev->dev,
  472. "%s: failed to allocate rds ring [%d]\n",
  473. netdev->name, ring);
  474. err = -ENOMEM;
  475. goto err_out_free;
  476. }
  477. rds_ring->desc_head = (struct rcv_desc *)addr;
  478. if (adapter->fw_major < 4)
  479. rds_ring->crb_rcv_producer =
  480. recv_crb_registers[adapter->portnum].
  481. crb_rcv_producer[ring];
  482. }
  483. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  484. sds_ring = &recv_ctx->sds_rings[ring];
  485. addr = pci_alloc_consistent(adapter->pdev,
  486. STATUS_DESC_RINGSIZE(sds_ring),
  487. &sds_ring->phys_addr);
  488. if (addr == NULL) {
  489. dev_err(&pdev->dev,
  490. "%s: failed to allocate sds ring [%d]\n",
  491. netdev->name, ring);
  492. err = -ENOMEM;
  493. goto err_out_free;
  494. }
  495. sds_ring->desc_head = (struct status_desc *)addr;
  496. }
  497. if (adapter->fw_major >= 4) {
  498. err = nx_fw_cmd_create_rx_ctx(adapter);
  499. if (err)
  500. goto err_out_free;
  501. err = nx_fw_cmd_create_tx_ctx(adapter);
  502. if (err)
  503. goto err_out_free;
  504. } else {
  505. sds_ring = &recv_ctx->sds_rings[0];
  506. sds_ring->crb_sts_consumer =
  507. recv_crb_registers[adapter->portnum].crb_sts_consumer;
  508. recv_ctx->sds_rings[0].crb_intr_mask =
  509. sw_int_mask[adapter->portnum];
  510. err = netxen_init_old_ctx(adapter);
  511. if (err) {
  512. netxen_free_hw_resources(adapter);
  513. return err;
  514. }
  515. }
  516. return 0;
  517. err_out_free:
  518. netxen_free_hw_resources(adapter);
  519. return err;
  520. }
  521. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  522. {
  523. struct netxen_recv_context *recv_ctx;
  524. struct nx_host_rds_ring *rds_ring;
  525. struct nx_host_sds_ring *sds_ring;
  526. struct nx_host_tx_ring *tx_ring;
  527. int ring;
  528. if (adapter->fw_major >= 4) {
  529. nx_fw_cmd_destroy_tx_ctx(adapter);
  530. nx_fw_cmd_destroy_rx_ctx(adapter);
  531. }
  532. if (adapter->ctx_desc != NULL) {
  533. pci_free_consistent(adapter->pdev,
  534. sizeof(struct netxen_ring_ctx) +
  535. sizeof(uint32_t),
  536. adapter->ctx_desc,
  537. adapter->ctx_desc_phys_addr);
  538. adapter->ctx_desc = NULL;
  539. }
  540. tx_ring = &adapter->tx_ring;
  541. if (tx_ring->desc_head != NULL) {
  542. pci_free_consistent(adapter->pdev,
  543. TX_DESC_RINGSIZE(tx_ring),
  544. tx_ring->desc_head, tx_ring->phys_addr);
  545. tx_ring->desc_head = NULL;
  546. }
  547. recv_ctx = &adapter->recv_ctx;
  548. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  549. rds_ring = &recv_ctx->rds_rings[ring];
  550. if (rds_ring->desc_head != NULL) {
  551. pci_free_consistent(adapter->pdev,
  552. RCV_DESC_RINGSIZE(rds_ring),
  553. rds_ring->desc_head,
  554. rds_ring->phys_addr);
  555. rds_ring->desc_head = NULL;
  556. }
  557. }
  558. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  559. sds_ring = &recv_ctx->sds_rings[ring];
  560. if (sds_ring->desc_head != NULL) {
  561. pci_free_consistent(adapter->pdev,
  562. STATUS_DESC_RINGSIZE(sds_ring),
  563. sds_ring->desc_head,
  564. sds_ring->phys_addr);
  565. sds_ring->desc_head = NULL;
  566. }
  567. }
  568. }