netxen_nic_ctx.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  18. * MA 02111-1307, USA.
  19. *
  20. * The full GNU General Public License is included in this distribution
  21. * in the file called LICENSE.
  22. *
  23. * Contact Information:
  24. * info@netxen.com
  25. * NetXen Inc,
  26. * 18922 Forge Drive
  27. * Cupertino, CA 95014-0701
  28. *
  29. */
  30. #include "netxen_nic_hw.h"
  31. #include "netxen_nic.h"
  32. #define NXHAL_VERSION 1
  33. static int
  34. netxen_api_lock(struct netxen_adapter *adapter)
  35. {
  36. u32 done = 0, timeout = 0;
  37. for (;;) {
  38. /* Acquire PCIE HW semaphore5 */
  39. done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
  40. if (done == 1)
  41. break;
  42. if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
  43. printk(KERN_ERR "%s: lock timeout.\n", __func__);
  44. return -1;
  45. }
  46. msleep(1);
  47. }
  48. #if 0
  49. NXWR32(adapter,
  50. NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
  51. #endif
  52. return 0;
  53. }
  54. static int
  55. netxen_api_unlock(struct netxen_adapter *adapter)
  56. {
  57. /* Release PCIE HW semaphore5 */
  58. NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
  59. return 0;
  60. }
  61. static u32
  62. netxen_poll_rsp(struct netxen_adapter *adapter)
  63. {
  64. u32 rsp = NX_CDRP_RSP_OK;
  65. int timeout = 0;
  66. do {
  67. /* give atleast 1ms for firmware to respond */
  68. msleep(1);
  69. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  70. return NX_CDRP_RSP_TIMEOUT;
  71. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  72. } while (!NX_CDRP_IS_RSP(rsp));
  73. return rsp;
  74. }
  75. static u32
  76. netxen_issue_cmd(struct netxen_adapter *adapter,
  77. u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
  78. {
  79. u32 rsp;
  80. u32 signature = 0;
  81. u32 rcode = NX_RCODE_SUCCESS;
  82. signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
  83. /* Acquire semaphore before accessing CRB */
  84. if (netxen_api_lock(adapter))
  85. return NX_RCODE_TIMEOUT;
  86. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  87. NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
  88. NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
  89. NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
  90. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
  91. rsp = netxen_poll_rsp(adapter);
  92. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  93. printk(KERN_ERR "%s: card response timeout.\n",
  94. netxen_nic_driver_name);
  95. rcode = NX_RCODE_TIMEOUT;
  96. } else if (rsp == NX_CDRP_RSP_FAIL) {
  97. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  98. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  99. netxen_nic_driver_name, rcode);
  100. }
  101. /* Release semaphore */
  102. netxen_api_unlock(adapter);
  103. return rcode;
  104. }
  105. int
  106. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  107. {
  108. u32 rcode = NX_RCODE_SUCCESS;
  109. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  110. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  111. rcode = netxen_issue_cmd(adapter,
  112. adapter->ahw.pci_func,
  113. NXHAL_VERSION,
  114. recv_ctx->context_id,
  115. mtu,
  116. 0,
  117. NX_CDRP_CMD_SET_MTU);
  118. if (rcode != NX_RCODE_SUCCESS)
  119. return -EIO;
  120. return 0;
  121. }
  122. static int
  123. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  124. {
  125. void *addr;
  126. nx_hostrq_rx_ctx_t *prq;
  127. nx_cardrsp_rx_ctx_t *prsp;
  128. nx_hostrq_rds_ring_t *prq_rds;
  129. nx_hostrq_sds_ring_t *prq_sds;
  130. nx_cardrsp_rds_ring_t *prsp_rds;
  131. nx_cardrsp_sds_ring_t *prsp_sds;
  132. struct nx_host_rds_ring *rds_ring;
  133. struct nx_host_sds_ring *sds_ring;
  134. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  135. u64 phys_addr;
  136. int i, nrds_rings, nsds_rings;
  137. size_t rq_size, rsp_size;
  138. u32 cap, reg, val;
  139. int err;
  140. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  141. nrds_rings = adapter->max_rds_rings;
  142. nsds_rings = adapter->max_sds_rings;
  143. rq_size =
  144. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  145. rsp_size =
  146. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  147. addr = pci_alloc_consistent(adapter->pdev,
  148. rq_size, &hostrq_phys_addr);
  149. if (addr == NULL)
  150. return -ENOMEM;
  151. prq = (nx_hostrq_rx_ctx_t *)addr;
  152. addr = pci_alloc_consistent(adapter->pdev,
  153. rsp_size, &cardrsp_phys_addr);
  154. if (addr == NULL) {
  155. err = -ENOMEM;
  156. goto out_free_rq;
  157. }
  158. prsp = (nx_cardrsp_rx_ctx_t *)addr;
  159. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  160. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  161. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  162. prq->capabilities[0] = cpu_to_le32(cap);
  163. prq->host_int_crb_mode =
  164. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  165. prq->host_rds_crb_mode =
  166. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  167. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  168. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  169. prq->rds_ring_offset = cpu_to_le32(0);
  170. val = le32_to_cpu(prq->rds_ring_offset) +
  171. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  172. prq->sds_ring_offset = cpu_to_le32(val);
  173. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  174. le32_to_cpu(prq->rds_ring_offset));
  175. for (i = 0; i < nrds_rings; i++) {
  176. rds_ring = &recv_ctx->rds_rings[i];
  177. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  178. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  179. prq_rds[i].ring_kind = cpu_to_le32(i);
  180. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  181. }
  182. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  183. le32_to_cpu(prq->sds_ring_offset));
  184. for (i = 0; i < nsds_rings; i++) {
  185. sds_ring = &recv_ctx->sds_rings[i];
  186. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  187. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  188. prq_sds[i].msi_index = cpu_to_le16(i);
  189. }
  190. phys_addr = hostrq_phys_addr;
  191. err = netxen_issue_cmd(adapter,
  192. adapter->ahw.pci_func,
  193. NXHAL_VERSION,
  194. (u32)(phys_addr >> 32),
  195. (u32)(phys_addr & 0xffffffff),
  196. rq_size,
  197. NX_CDRP_CMD_CREATE_RX_CTX);
  198. if (err) {
  199. printk(KERN_WARNING
  200. "Failed to create rx ctx in firmware%d\n", err);
  201. goto out_free_rsp;
  202. }
  203. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  204. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  205. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  206. rds_ring = &recv_ctx->rds_rings[i];
  207. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  208. rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
  209. }
  210. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  211. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  212. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  213. sds_ring = &recv_ctx->sds_rings[i];
  214. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  215. sds_ring->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
  216. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  217. sds_ring->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
  218. }
  219. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  220. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  221. recv_ctx->virt_port = prsp->virt_port;
  222. out_free_rsp:
  223. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  224. out_free_rq:
  225. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  226. return err;
  227. }
  228. static void
  229. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  230. {
  231. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  232. if (netxen_issue_cmd(adapter,
  233. adapter->ahw.pci_func,
  234. NXHAL_VERSION,
  235. recv_ctx->context_id,
  236. NX_DESTROY_CTX_RESET,
  237. 0,
  238. NX_CDRP_CMD_DESTROY_RX_CTX)) {
  239. printk(KERN_WARNING
  240. "%s: Failed to destroy rx ctx in firmware\n",
  241. netxen_nic_driver_name);
  242. }
  243. }
  244. static int
  245. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  246. {
  247. nx_hostrq_tx_ctx_t *prq;
  248. nx_hostrq_cds_ring_t *prq_cds;
  249. nx_cardrsp_tx_ctx_t *prsp;
  250. void *rq_addr, *rsp_addr;
  251. size_t rq_size, rsp_size;
  252. u32 temp;
  253. int err = 0;
  254. u64 offset, phys_addr;
  255. dma_addr_t rq_phys_addr, rsp_phys_addr;
  256. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  257. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  258. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  259. rq_addr = pci_alloc_consistent(adapter->pdev,
  260. rq_size, &rq_phys_addr);
  261. if (!rq_addr)
  262. return -ENOMEM;
  263. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  264. rsp_addr = pci_alloc_consistent(adapter->pdev,
  265. rsp_size, &rsp_phys_addr);
  266. if (!rsp_addr) {
  267. err = -ENOMEM;
  268. goto out_free_rq;
  269. }
  270. memset(rq_addr, 0, rq_size);
  271. prq = (nx_hostrq_tx_ctx_t *)rq_addr;
  272. memset(rsp_addr, 0, rsp_size);
  273. prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
  274. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  275. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  276. prq->capabilities[0] = cpu_to_le32(temp);
  277. prq->host_int_crb_mode =
  278. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  279. prq->interrupt_ctl = 0;
  280. prq->msi_index = 0;
  281. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  282. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  283. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  284. prq_cds = &prq->cds_ring;
  285. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  286. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  287. phys_addr = rq_phys_addr;
  288. err = netxen_issue_cmd(adapter,
  289. adapter->ahw.pci_func,
  290. NXHAL_VERSION,
  291. (u32)(phys_addr >> 32),
  292. ((u32)phys_addr & 0xffffffff),
  293. rq_size,
  294. NX_CDRP_CMD_CREATE_TX_CTX);
  295. if (err == NX_RCODE_SUCCESS) {
  296. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  297. tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
  298. #if 0
  299. adapter->tx_state =
  300. le32_to_cpu(prsp->host_ctx_state);
  301. #endif
  302. adapter->tx_context_id =
  303. le16_to_cpu(prsp->context_id);
  304. } else {
  305. printk(KERN_WARNING
  306. "Failed to create tx ctx in firmware%d\n", err);
  307. err = -EIO;
  308. }
  309. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  310. out_free_rq:
  311. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  312. return err;
  313. }
  314. static void
  315. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  316. {
  317. if (netxen_issue_cmd(adapter,
  318. adapter->ahw.pci_func,
  319. NXHAL_VERSION,
  320. adapter->tx_context_id,
  321. NX_DESTROY_CTX_RESET,
  322. 0,
  323. NX_CDRP_CMD_DESTROY_TX_CTX)) {
  324. printk(KERN_WARNING
  325. "%s: Failed to destroy tx ctx in firmware\n",
  326. netxen_nic_driver_name);
  327. }
  328. }
  329. static u64 ctx_addr_sig_regs[][3] = {
  330. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  331. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  332. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  333. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  334. };
  335. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  336. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  337. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  338. #define lower32(x) ((u32)((x) & 0xffffffff))
  339. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  340. static struct netxen_recv_crb recv_crb_registers[] = {
  341. /* Instance 0 */
  342. {
  343. /* crb_rcv_producer: */
  344. {
  345. NETXEN_NIC_REG(0x100),
  346. /* Jumbo frames */
  347. NETXEN_NIC_REG(0x110),
  348. /* LRO */
  349. NETXEN_NIC_REG(0x120)
  350. },
  351. /* crb_sts_consumer: */
  352. {
  353. NETXEN_NIC_REG(0x138),
  354. NETXEN_NIC_REG_2(0x000),
  355. NETXEN_NIC_REG_2(0x004),
  356. NETXEN_NIC_REG_2(0x008),
  357. },
  358. /* sw_int_mask */
  359. {
  360. CRB_SW_INT_MASK_0,
  361. NETXEN_NIC_REG_2(0x044),
  362. NETXEN_NIC_REG_2(0x048),
  363. NETXEN_NIC_REG_2(0x04c),
  364. },
  365. },
  366. /* Instance 1 */
  367. {
  368. /* crb_rcv_producer: */
  369. {
  370. NETXEN_NIC_REG(0x144),
  371. /* Jumbo frames */
  372. NETXEN_NIC_REG(0x154),
  373. /* LRO */
  374. NETXEN_NIC_REG(0x164)
  375. },
  376. /* crb_sts_consumer: */
  377. {
  378. NETXEN_NIC_REG(0x17c),
  379. NETXEN_NIC_REG_2(0x020),
  380. NETXEN_NIC_REG_2(0x024),
  381. NETXEN_NIC_REG_2(0x028),
  382. },
  383. /* sw_int_mask */
  384. {
  385. CRB_SW_INT_MASK_1,
  386. NETXEN_NIC_REG_2(0x064),
  387. NETXEN_NIC_REG_2(0x068),
  388. NETXEN_NIC_REG_2(0x06c),
  389. },
  390. },
  391. /* Instance 2 */
  392. {
  393. /* crb_rcv_producer: */
  394. {
  395. NETXEN_NIC_REG(0x1d8),
  396. /* Jumbo frames */
  397. NETXEN_NIC_REG(0x1f8),
  398. /* LRO */
  399. NETXEN_NIC_REG(0x208)
  400. },
  401. /* crb_sts_consumer: */
  402. {
  403. NETXEN_NIC_REG(0x220),
  404. NETXEN_NIC_REG_2(0x03c),
  405. NETXEN_NIC_REG_2(0x03c),
  406. NETXEN_NIC_REG_2(0x03c),
  407. },
  408. /* sw_int_mask */
  409. {
  410. CRB_SW_INT_MASK_2,
  411. NETXEN_NIC_REG_2(0x03c),
  412. NETXEN_NIC_REG_2(0x03c),
  413. NETXEN_NIC_REG_2(0x03c),
  414. },
  415. },
  416. /* Instance 3 */
  417. {
  418. /* crb_rcv_producer: */
  419. {
  420. NETXEN_NIC_REG(0x22c),
  421. /* Jumbo frames */
  422. NETXEN_NIC_REG(0x23c),
  423. /* LRO */
  424. NETXEN_NIC_REG(0x24c)
  425. },
  426. /* crb_sts_consumer: */
  427. {
  428. NETXEN_NIC_REG(0x264),
  429. NETXEN_NIC_REG_2(0x03c),
  430. NETXEN_NIC_REG_2(0x03c),
  431. NETXEN_NIC_REG_2(0x03c),
  432. },
  433. /* sw_int_mask */
  434. {
  435. CRB_SW_INT_MASK_3,
  436. NETXEN_NIC_REG_2(0x03c),
  437. NETXEN_NIC_REG_2(0x03c),
  438. NETXEN_NIC_REG_2(0x03c),
  439. },
  440. },
  441. };
  442. static int
  443. netxen_init_old_ctx(struct netxen_adapter *adapter)
  444. {
  445. struct netxen_recv_context *recv_ctx;
  446. struct nx_host_rds_ring *rds_ring;
  447. struct nx_host_sds_ring *sds_ring;
  448. struct nx_host_tx_ring *tx_ring;
  449. int ring;
  450. int port = adapter->portnum;
  451. struct netxen_ring_ctx *hwctx;
  452. u32 signature;
  453. tx_ring = adapter->tx_ring;
  454. recv_ctx = &adapter->recv_ctx;
  455. hwctx = recv_ctx->hwctx;
  456. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  457. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  458. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  459. rds_ring = &recv_ctx->rds_rings[ring];
  460. hwctx->rcv_rings[ring].addr =
  461. cpu_to_le64(rds_ring->phys_addr);
  462. hwctx->rcv_rings[ring].size =
  463. cpu_to_le32(rds_ring->num_desc);
  464. }
  465. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  466. sds_ring = &recv_ctx->sds_rings[ring];
  467. if (ring == 0) {
  468. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  469. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  470. }
  471. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  472. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  473. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  474. }
  475. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  476. signature = (adapter->max_sds_rings > 1) ?
  477. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  478. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  479. lower32(recv_ctx->phys_addr));
  480. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  481. upper32(recv_ctx->phys_addr));
  482. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  483. signature | port);
  484. return 0;
  485. }
  486. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  487. {
  488. void *addr;
  489. int err = 0;
  490. int ring;
  491. struct netxen_recv_context *recv_ctx;
  492. struct nx_host_rds_ring *rds_ring;
  493. struct nx_host_sds_ring *sds_ring;
  494. struct nx_host_tx_ring *tx_ring;
  495. struct pci_dev *pdev = adapter->pdev;
  496. struct net_device *netdev = adapter->netdev;
  497. int port = adapter->portnum;
  498. recv_ctx = &adapter->recv_ctx;
  499. tx_ring = adapter->tx_ring;
  500. addr = pci_alloc_consistent(pdev,
  501. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  502. &recv_ctx->phys_addr);
  503. if (addr == NULL) {
  504. dev_err(&pdev->dev, "failed to allocate hw context\n");
  505. return -ENOMEM;
  506. }
  507. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  508. recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
  509. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  510. recv_ctx->hwctx->cmd_consumer_offset =
  511. cpu_to_le64(recv_ctx->phys_addr +
  512. sizeof(struct netxen_ring_ctx));
  513. tx_ring->hw_consumer =
  514. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  515. /* cmd desc ring */
  516. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  517. &tx_ring->phys_addr);
  518. if (addr == NULL) {
  519. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  520. netdev->name);
  521. return -ENOMEM;
  522. }
  523. tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
  524. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  525. rds_ring = &recv_ctx->rds_rings[ring];
  526. addr = pci_alloc_consistent(adapter->pdev,
  527. RCV_DESC_RINGSIZE(rds_ring),
  528. &rds_ring->phys_addr);
  529. if (addr == NULL) {
  530. dev_err(&pdev->dev,
  531. "%s: failed to allocate rds ring [%d]\n",
  532. netdev->name, ring);
  533. err = -ENOMEM;
  534. goto err_out_free;
  535. }
  536. rds_ring->desc_head = (struct rcv_desc *)addr;
  537. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  538. rds_ring->crb_rcv_producer =
  539. recv_crb_registers[port].crb_rcv_producer[ring];
  540. }
  541. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  542. sds_ring = &recv_ctx->sds_rings[ring];
  543. addr = pci_alloc_consistent(adapter->pdev,
  544. STATUS_DESC_RINGSIZE(sds_ring),
  545. &sds_ring->phys_addr);
  546. if (addr == NULL) {
  547. dev_err(&pdev->dev,
  548. "%s: failed to allocate sds ring [%d]\n",
  549. netdev->name, ring);
  550. err = -ENOMEM;
  551. goto err_out_free;
  552. }
  553. sds_ring->desc_head = (struct status_desc *)addr;
  554. sds_ring->crb_sts_consumer =
  555. recv_crb_registers[port].crb_sts_consumer[ring];
  556. sds_ring->crb_intr_mask =
  557. recv_crb_registers[port].sw_int_mask[ring];
  558. }
  559. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  560. err = nx_fw_cmd_create_rx_ctx(adapter);
  561. if (err)
  562. goto err_out_free;
  563. err = nx_fw_cmd_create_tx_ctx(adapter);
  564. if (err)
  565. goto err_out_free;
  566. } else {
  567. err = netxen_init_old_ctx(adapter);
  568. if (err)
  569. goto err_out_free;
  570. }
  571. return 0;
  572. err_out_free:
  573. netxen_free_hw_resources(adapter);
  574. return err;
  575. }
  576. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  577. {
  578. struct netxen_recv_context *recv_ctx;
  579. struct nx_host_rds_ring *rds_ring;
  580. struct nx_host_sds_ring *sds_ring;
  581. struct nx_host_tx_ring *tx_ring;
  582. int ring;
  583. int port = adapter->portnum;
  584. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  585. nx_fw_cmd_destroy_rx_ctx(adapter);
  586. nx_fw_cmd_destroy_tx_ctx(adapter);
  587. } else {
  588. netxen_api_lock(adapter);
  589. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  590. NETXEN_CTX_D3_RESET | port);
  591. netxen_api_unlock(adapter);
  592. }
  593. /* Allow dma queues to drain after context reset */
  594. msleep(20);
  595. recv_ctx = &adapter->recv_ctx;
  596. if (recv_ctx->hwctx != NULL) {
  597. pci_free_consistent(adapter->pdev,
  598. sizeof(struct netxen_ring_ctx) +
  599. sizeof(uint32_t),
  600. recv_ctx->hwctx,
  601. recv_ctx->phys_addr);
  602. recv_ctx->hwctx = NULL;
  603. }
  604. tx_ring = adapter->tx_ring;
  605. if (tx_ring->desc_head != NULL) {
  606. pci_free_consistent(adapter->pdev,
  607. TX_DESC_RINGSIZE(tx_ring),
  608. tx_ring->desc_head, tx_ring->phys_addr);
  609. tx_ring->desc_head = NULL;
  610. }
  611. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  612. rds_ring = &recv_ctx->rds_rings[ring];
  613. if (rds_ring->desc_head != NULL) {
  614. pci_free_consistent(adapter->pdev,
  615. RCV_DESC_RINGSIZE(rds_ring),
  616. rds_ring->desc_head,
  617. rds_ring->phys_addr);
  618. rds_ring->desc_head = NULL;
  619. }
  620. }
  621. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  622. sds_ring = &recv_ctx->sds_rings[ring];
  623. if (sds_ring->desc_head != NULL) {
  624. pci_free_consistent(adapter->pdev,
  625. STATUS_DESC_RINGSIZE(sds_ring),
  626. sds_ring->desc_head,
  627. sds_ring->phys_addr);
  628. sds_ring->desc_head = NULL;
  629. }
  630. }
  631. }