netxen_nic_ctx.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  18. * MA 02111-1307, USA.
  19. *
  20. * The full GNU General Public License is included in this distribution
  21. * in the file called LICENSE.
  22. *
  23. * Contact Information:
  24. * info@netxen.com
  25. * NetXen Inc,
  26. * 18922 Forge Drive
  27. * Cupertino, CA 95014-0701
  28. *
  29. */
  30. #include "netxen_nic_hw.h"
  31. #include "netxen_nic.h"
  32. #include "netxen_nic_phan_reg.h"
  33. #define NXHAL_VERSION 1
  34. static int
  35. netxen_api_lock(struct netxen_adapter *adapter)
  36. {
  37. u32 done = 0, timeout = 0;
  38. for (;;) {
  39. /* Acquire PCIE HW semaphore5 */
  40. done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
  41. if (done == 1)
  42. break;
  43. if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
  44. printk(KERN_ERR "%s: lock timeout.\n", __func__);
  45. return -1;
  46. }
  47. msleep(1);
  48. }
  49. #if 0
  50. NXWR32(adapter,
  51. NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
  52. #endif
  53. return 0;
  54. }
  55. static int
  56. netxen_api_unlock(struct netxen_adapter *adapter)
  57. {
  58. /* Release PCIE HW semaphore5 */
  59. NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
  60. return 0;
  61. }
  62. static u32
  63. netxen_poll_rsp(struct netxen_adapter *adapter)
  64. {
  65. u32 rsp = NX_CDRP_RSP_OK;
  66. int timeout = 0;
  67. do {
  68. /* give atleast 1ms for firmware to respond */
  69. msleep(1);
  70. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  71. return NX_CDRP_RSP_TIMEOUT;
  72. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  73. } while (!NX_CDRP_IS_RSP(rsp));
  74. return rsp;
  75. }
  76. static u32
  77. netxen_issue_cmd(struct netxen_adapter *adapter,
  78. u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
  79. {
  80. u32 rsp;
  81. u32 signature = 0;
  82. u32 rcode = NX_RCODE_SUCCESS;
  83. signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
  84. /* Acquire semaphore before accessing CRB */
  85. if (netxen_api_lock(adapter))
  86. return NX_RCODE_TIMEOUT;
  87. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  88. NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
  89. NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
  90. NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
  91. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
  92. rsp = netxen_poll_rsp(adapter);
  93. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  94. printk(KERN_ERR "%s: card response timeout.\n",
  95. netxen_nic_driver_name);
  96. rcode = NX_RCODE_TIMEOUT;
  97. } else if (rsp == NX_CDRP_RSP_FAIL) {
  98. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  99. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  100. netxen_nic_driver_name, rcode);
  101. }
  102. /* Release semaphore */
  103. netxen_api_unlock(adapter);
  104. return rcode;
  105. }
  106. int
  107. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  108. {
  109. u32 rcode = NX_RCODE_SUCCESS;
  110. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  111. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  112. rcode = netxen_issue_cmd(adapter,
  113. adapter->ahw.pci_func,
  114. NXHAL_VERSION,
  115. recv_ctx->context_id,
  116. mtu,
  117. 0,
  118. NX_CDRP_CMD_SET_MTU);
  119. if (rcode != NX_RCODE_SUCCESS)
  120. return -EIO;
  121. return 0;
  122. }
  123. static int
  124. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  125. {
  126. void *addr;
  127. nx_hostrq_rx_ctx_t *prq;
  128. nx_cardrsp_rx_ctx_t *prsp;
  129. nx_hostrq_rds_ring_t *prq_rds;
  130. nx_hostrq_sds_ring_t *prq_sds;
  131. nx_cardrsp_rds_ring_t *prsp_rds;
  132. nx_cardrsp_sds_ring_t *prsp_sds;
  133. struct nx_host_rds_ring *rds_ring;
  134. struct nx_host_sds_ring *sds_ring;
  135. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  136. u64 phys_addr;
  137. int i, nrds_rings, nsds_rings;
  138. size_t rq_size, rsp_size;
  139. u32 cap, reg, val;
  140. int err;
  141. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  142. nrds_rings = adapter->max_rds_rings;
  143. nsds_rings = adapter->max_sds_rings;
  144. rq_size =
  145. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  146. rsp_size =
  147. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  148. addr = pci_alloc_consistent(adapter->pdev,
  149. rq_size, &hostrq_phys_addr);
  150. if (addr == NULL)
  151. return -ENOMEM;
  152. prq = (nx_hostrq_rx_ctx_t *)addr;
  153. addr = pci_alloc_consistent(adapter->pdev,
  154. rsp_size, &cardrsp_phys_addr);
  155. if (addr == NULL) {
  156. err = -ENOMEM;
  157. goto out_free_rq;
  158. }
  159. prsp = (nx_cardrsp_rx_ctx_t *)addr;
  160. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  161. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  162. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  163. if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
  164. cap |= NX_CAP0_HW_LRO;
  165. prq->capabilities[0] = cpu_to_le32(cap);
  166. prq->host_int_crb_mode =
  167. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  168. prq->host_rds_crb_mode =
  169. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  170. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  171. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  172. prq->rds_ring_offset = cpu_to_le32(0);
  173. val = le32_to_cpu(prq->rds_ring_offset) +
  174. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  175. prq->sds_ring_offset = cpu_to_le32(val);
  176. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  177. le32_to_cpu(prq->rds_ring_offset));
  178. for (i = 0; i < nrds_rings; i++) {
  179. rds_ring = &recv_ctx->rds_rings[i];
  180. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  181. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  182. prq_rds[i].ring_kind = cpu_to_le32(i);
  183. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  184. }
  185. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  186. le32_to_cpu(prq->sds_ring_offset));
  187. for (i = 0; i < nsds_rings; i++) {
  188. sds_ring = &recv_ctx->sds_rings[i];
  189. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  190. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  191. prq_sds[i].msi_index = cpu_to_le16(i);
  192. }
  193. phys_addr = hostrq_phys_addr;
  194. err = netxen_issue_cmd(adapter,
  195. adapter->ahw.pci_func,
  196. NXHAL_VERSION,
  197. (u32)(phys_addr >> 32),
  198. (u32)(phys_addr & 0xffffffff),
  199. rq_size,
  200. NX_CDRP_CMD_CREATE_RX_CTX);
  201. if (err) {
  202. printk(KERN_WARNING
  203. "Failed to create rx ctx in firmware%d\n", err);
  204. goto out_free_rsp;
  205. }
  206. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  207. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  208. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  209. rds_ring = &recv_ctx->rds_rings[i];
  210. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  211. rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
  212. }
  213. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  214. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  215. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  216. sds_ring = &recv_ctx->sds_rings[i];
  217. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  218. sds_ring->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
  219. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  220. sds_ring->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
  221. }
  222. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  223. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  224. recv_ctx->virt_port = prsp->virt_port;
  225. out_free_rsp:
  226. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  227. out_free_rq:
  228. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  229. return err;
  230. }
  231. static void
  232. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  233. {
  234. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  235. if (netxen_issue_cmd(adapter,
  236. adapter->ahw.pci_func,
  237. NXHAL_VERSION,
  238. recv_ctx->context_id,
  239. NX_DESTROY_CTX_RESET,
  240. 0,
  241. NX_CDRP_CMD_DESTROY_RX_CTX)) {
  242. printk(KERN_WARNING
  243. "%s: Failed to destroy rx ctx in firmware\n",
  244. netxen_nic_driver_name);
  245. }
  246. }
  247. static int
  248. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  249. {
  250. nx_hostrq_tx_ctx_t *prq;
  251. nx_hostrq_cds_ring_t *prq_cds;
  252. nx_cardrsp_tx_ctx_t *prsp;
  253. void *rq_addr, *rsp_addr;
  254. size_t rq_size, rsp_size;
  255. u32 temp;
  256. int err = 0;
  257. u64 offset, phys_addr;
  258. dma_addr_t rq_phys_addr, rsp_phys_addr;
  259. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  260. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  261. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  262. rq_addr = pci_alloc_consistent(adapter->pdev,
  263. rq_size, &rq_phys_addr);
  264. if (!rq_addr)
  265. return -ENOMEM;
  266. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  267. rsp_addr = pci_alloc_consistent(adapter->pdev,
  268. rsp_size, &rsp_phys_addr);
  269. if (!rsp_addr) {
  270. err = -ENOMEM;
  271. goto out_free_rq;
  272. }
  273. memset(rq_addr, 0, rq_size);
  274. prq = (nx_hostrq_tx_ctx_t *)rq_addr;
  275. memset(rsp_addr, 0, rsp_size);
  276. prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
  277. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  278. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  279. prq->capabilities[0] = cpu_to_le32(temp);
  280. prq->host_int_crb_mode =
  281. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  282. prq->interrupt_ctl = 0;
  283. prq->msi_index = 0;
  284. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  285. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  286. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  287. prq_cds = &prq->cds_ring;
  288. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  289. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  290. phys_addr = rq_phys_addr;
  291. err = netxen_issue_cmd(adapter,
  292. adapter->ahw.pci_func,
  293. NXHAL_VERSION,
  294. (u32)(phys_addr >> 32),
  295. ((u32)phys_addr & 0xffffffff),
  296. rq_size,
  297. NX_CDRP_CMD_CREATE_TX_CTX);
  298. if (err == NX_RCODE_SUCCESS) {
  299. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  300. tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
  301. #if 0
  302. adapter->tx_state =
  303. le32_to_cpu(prsp->host_ctx_state);
  304. #endif
  305. adapter->tx_context_id =
  306. le16_to_cpu(prsp->context_id);
  307. } else {
  308. printk(KERN_WARNING
  309. "Failed to create tx ctx in firmware%d\n", err);
  310. err = -EIO;
  311. }
  312. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  313. out_free_rq:
  314. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  315. return err;
  316. }
  317. static void
  318. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  319. {
  320. if (netxen_issue_cmd(adapter,
  321. adapter->ahw.pci_func,
  322. NXHAL_VERSION,
  323. adapter->tx_context_id,
  324. NX_DESTROY_CTX_RESET,
  325. 0,
  326. NX_CDRP_CMD_DESTROY_TX_CTX)) {
  327. printk(KERN_WARNING
  328. "%s: Failed to destroy tx ctx in firmware\n",
  329. netxen_nic_driver_name);
  330. }
  331. }
  332. static u64 ctx_addr_sig_regs[][3] = {
  333. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  334. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  335. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  336. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  337. };
  338. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  339. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  340. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  341. #define lower32(x) ((u32)((x) & 0xffffffff))
  342. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  343. static struct netxen_recv_crb recv_crb_registers[] = {
  344. /* Instance 0 */
  345. {
  346. /* crb_rcv_producer: */
  347. {
  348. NETXEN_NIC_REG(0x100),
  349. /* Jumbo frames */
  350. NETXEN_NIC_REG(0x110),
  351. /* LRO */
  352. NETXEN_NIC_REG(0x120)
  353. },
  354. /* crb_sts_consumer: */
  355. {
  356. NETXEN_NIC_REG(0x138),
  357. NETXEN_NIC_REG_2(0x000),
  358. NETXEN_NIC_REG_2(0x004),
  359. NETXEN_NIC_REG_2(0x008),
  360. },
  361. /* sw_int_mask */
  362. {
  363. CRB_SW_INT_MASK_0,
  364. NETXEN_NIC_REG_2(0x044),
  365. NETXEN_NIC_REG_2(0x048),
  366. NETXEN_NIC_REG_2(0x04c),
  367. },
  368. },
  369. /* Instance 1 */
  370. {
  371. /* crb_rcv_producer: */
  372. {
  373. NETXEN_NIC_REG(0x144),
  374. /* Jumbo frames */
  375. NETXEN_NIC_REG(0x154),
  376. /* LRO */
  377. NETXEN_NIC_REG(0x164)
  378. },
  379. /* crb_sts_consumer: */
  380. {
  381. NETXEN_NIC_REG(0x17c),
  382. NETXEN_NIC_REG_2(0x020),
  383. NETXEN_NIC_REG_2(0x024),
  384. NETXEN_NIC_REG_2(0x028),
  385. },
  386. /* sw_int_mask */
  387. {
  388. CRB_SW_INT_MASK_1,
  389. NETXEN_NIC_REG_2(0x064),
  390. NETXEN_NIC_REG_2(0x068),
  391. NETXEN_NIC_REG_2(0x06c),
  392. },
  393. },
  394. /* Instance 2 */
  395. {
  396. /* crb_rcv_producer: */
  397. {
  398. NETXEN_NIC_REG(0x1d8),
  399. /* Jumbo frames */
  400. NETXEN_NIC_REG(0x1f8),
  401. /* LRO */
  402. NETXEN_NIC_REG(0x208)
  403. },
  404. /* crb_sts_consumer: */
  405. {
  406. NETXEN_NIC_REG(0x220),
  407. NETXEN_NIC_REG_2(0x03c),
  408. NETXEN_NIC_REG_2(0x03c),
  409. NETXEN_NIC_REG_2(0x03c),
  410. },
  411. /* sw_int_mask */
  412. {
  413. CRB_SW_INT_MASK_2,
  414. NETXEN_NIC_REG_2(0x03c),
  415. NETXEN_NIC_REG_2(0x03c),
  416. NETXEN_NIC_REG_2(0x03c),
  417. },
  418. },
  419. /* Instance 3 */
  420. {
  421. /* crb_rcv_producer: */
  422. {
  423. NETXEN_NIC_REG(0x22c),
  424. /* Jumbo frames */
  425. NETXEN_NIC_REG(0x23c),
  426. /* LRO */
  427. NETXEN_NIC_REG(0x24c)
  428. },
  429. /* crb_sts_consumer: */
  430. {
  431. NETXEN_NIC_REG(0x264),
  432. NETXEN_NIC_REG_2(0x03c),
  433. NETXEN_NIC_REG_2(0x03c),
  434. NETXEN_NIC_REG_2(0x03c),
  435. },
  436. /* sw_int_mask */
  437. {
  438. CRB_SW_INT_MASK_3,
  439. NETXEN_NIC_REG_2(0x03c),
  440. NETXEN_NIC_REG_2(0x03c),
  441. NETXEN_NIC_REG_2(0x03c),
  442. },
  443. },
  444. };
  445. static int
  446. netxen_init_old_ctx(struct netxen_adapter *adapter)
  447. {
  448. struct netxen_recv_context *recv_ctx;
  449. struct nx_host_rds_ring *rds_ring;
  450. struct nx_host_sds_ring *sds_ring;
  451. struct nx_host_tx_ring *tx_ring;
  452. int ring;
  453. int port = adapter->portnum;
  454. struct netxen_ring_ctx *hwctx;
  455. u32 signature;
  456. tx_ring = adapter->tx_ring;
  457. recv_ctx = &adapter->recv_ctx;
  458. hwctx = recv_ctx->hwctx;
  459. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  460. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  461. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  462. rds_ring = &recv_ctx->rds_rings[ring];
  463. hwctx->rcv_rings[ring].addr =
  464. cpu_to_le64(rds_ring->phys_addr);
  465. hwctx->rcv_rings[ring].size =
  466. cpu_to_le32(rds_ring->num_desc);
  467. }
  468. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  469. sds_ring = &recv_ctx->sds_rings[ring];
  470. if (ring == 0) {
  471. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  472. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  473. }
  474. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  475. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  476. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  477. }
  478. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  479. signature = (adapter->max_sds_rings > 1) ?
  480. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  481. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  482. lower32(recv_ctx->phys_addr));
  483. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  484. upper32(recv_ctx->phys_addr));
  485. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  486. signature | port);
  487. return 0;
  488. }
  489. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  490. {
  491. void *addr;
  492. int err = 0;
  493. int ring;
  494. struct netxen_recv_context *recv_ctx;
  495. struct nx_host_rds_ring *rds_ring;
  496. struct nx_host_sds_ring *sds_ring;
  497. struct nx_host_tx_ring *tx_ring;
  498. struct pci_dev *pdev = adapter->pdev;
  499. struct net_device *netdev = adapter->netdev;
  500. int port = adapter->portnum;
  501. recv_ctx = &adapter->recv_ctx;
  502. tx_ring = adapter->tx_ring;
  503. addr = pci_alloc_consistent(pdev,
  504. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  505. &recv_ctx->phys_addr);
  506. if (addr == NULL) {
  507. dev_err(&pdev->dev, "failed to allocate hw context\n");
  508. return -ENOMEM;
  509. }
  510. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  511. recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
  512. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  513. recv_ctx->hwctx->cmd_consumer_offset =
  514. cpu_to_le64(recv_ctx->phys_addr +
  515. sizeof(struct netxen_ring_ctx));
  516. tx_ring->hw_consumer =
  517. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  518. /* cmd desc ring */
  519. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  520. &tx_ring->phys_addr);
  521. if (addr == NULL) {
  522. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  523. netdev->name);
  524. return -ENOMEM;
  525. }
  526. tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
  527. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  528. rds_ring = &recv_ctx->rds_rings[ring];
  529. addr = pci_alloc_consistent(adapter->pdev,
  530. RCV_DESC_RINGSIZE(rds_ring),
  531. &rds_ring->phys_addr);
  532. if (addr == NULL) {
  533. dev_err(&pdev->dev,
  534. "%s: failed to allocate rds ring [%d]\n",
  535. netdev->name, ring);
  536. err = -ENOMEM;
  537. goto err_out_free;
  538. }
  539. rds_ring->desc_head = (struct rcv_desc *)addr;
  540. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  541. rds_ring->crb_rcv_producer =
  542. recv_crb_registers[port].crb_rcv_producer[ring];
  543. }
  544. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  545. sds_ring = &recv_ctx->sds_rings[ring];
  546. addr = pci_alloc_consistent(adapter->pdev,
  547. STATUS_DESC_RINGSIZE(sds_ring),
  548. &sds_ring->phys_addr);
  549. if (addr == NULL) {
  550. dev_err(&pdev->dev,
  551. "%s: failed to allocate sds ring [%d]\n",
  552. netdev->name, ring);
  553. err = -ENOMEM;
  554. goto err_out_free;
  555. }
  556. sds_ring->desc_head = (struct status_desc *)addr;
  557. sds_ring->crb_sts_consumer =
  558. recv_crb_registers[port].crb_sts_consumer[ring];
  559. sds_ring->crb_intr_mask =
  560. recv_crb_registers[port].sw_int_mask[ring];
  561. }
  562. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  563. err = nx_fw_cmd_create_rx_ctx(adapter);
  564. if (err)
  565. goto err_out_free;
  566. err = nx_fw_cmd_create_tx_ctx(adapter);
  567. if (err)
  568. goto err_out_free;
  569. } else {
  570. err = netxen_init_old_ctx(adapter);
  571. if (err)
  572. goto err_out_free;
  573. }
  574. return 0;
  575. err_out_free:
  576. netxen_free_hw_resources(adapter);
  577. return err;
  578. }
  579. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  580. {
  581. struct netxen_recv_context *recv_ctx;
  582. struct nx_host_rds_ring *rds_ring;
  583. struct nx_host_sds_ring *sds_ring;
  584. struct nx_host_tx_ring *tx_ring;
  585. int ring;
  586. int port = adapter->portnum;
  587. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  588. nx_fw_cmd_destroy_rx_ctx(adapter);
  589. nx_fw_cmd_destroy_tx_ctx(adapter);
  590. } else {
  591. netxen_api_lock(adapter);
  592. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  593. NETXEN_CTX_D3_RESET | port);
  594. netxen_api_unlock(adapter);
  595. }
  596. /* Allow dma queues to drain after context reset */
  597. msleep(20);
  598. recv_ctx = &adapter->recv_ctx;
  599. if (recv_ctx->hwctx != NULL) {
  600. pci_free_consistent(adapter->pdev,
  601. sizeof(struct netxen_ring_ctx) +
  602. sizeof(uint32_t),
  603. recv_ctx->hwctx,
  604. recv_ctx->phys_addr);
  605. recv_ctx->hwctx = NULL;
  606. }
  607. tx_ring = adapter->tx_ring;
  608. if (tx_ring->desc_head != NULL) {
  609. pci_free_consistent(adapter->pdev,
  610. TX_DESC_RINGSIZE(tx_ring),
  611. tx_ring->desc_head, tx_ring->phys_addr);
  612. tx_ring->desc_head = NULL;
  613. }
  614. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  615. rds_ring = &recv_ctx->rds_rings[ring];
  616. if (rds_ring->desc_head != NULL) {
  617. pci_free_consistent(adapter->pdev,
  618. RCV_DESC_RINGSIZE(rds_ring),
  619. rds_ring->desc_head,
  620. rds_ring->phys_addr);
  621. rds_ring->desc_head = NULL;
  622. }
  623. }
  624. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  625. sds_ring = &recv_ctx->sds_rings[ring];
  626. if (sds_ring->desc_head != NULL) {
  627. pci_free_consistent(adapter->pdev,
  628. STATUS_DESC_RINGSIZE(sds_ring),
  629. sds_ring->desc_head,
  630. sds_ring->phys_addr);
  631. sds_ring->desc_head = NULL;
  632. }
  633. }
  634. }