netxen_nic_ctx.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version 2
  8. * of the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  18. * MA 02111-1307, USA.
  19. *
  20. * The full GNU General Public License is included in this distribution
  21. * in the file called LICENSE.
  22. *
  23. * Contact Information:
  24. * info@netxen.com
  25. * NetXen Inc,
  26. * 18922 Forge Drive
  27. * Cupertino, CA 95014-0701
  28. *
  29. */
  30. #include "netxen_nic_hw.h"
  31. #include "netxen_nic.h"
  32. #include "netxen_nic_phan_reg.h"
  33. #define NXHAL_VERSION 1
  34. static int
  35. netxen_api_lock(struct netxen_adapter *adapter)
  36. {
  37. u32 done = 0, timeout = 0;
  38. for (;;) {
  39. /* Acquire PCIE HW semaphore5 */
  40. done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_LOCK));
  41. if (done == 1)
  42. break;
  43. if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
  44. printk(KERN_ERR "%s: lock timeout.\n", __func__);
  45. return -1;
  46. }
  47. msleep(1);
  48. }
  49. #if 0
  50. NXWR32(adapter,
  51. NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
  52. #endif
  53. return 0;
  54. }
  55. static int
  56. netxen_api_unlock(struct netxen_adapter *adapter)
  57. {
  58. /* Release PCIE HW semaphore5 */
  59. NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK));
  60. return 0;
  61. }
  62. static u32
  63. netxen_poll_rsp(struct netxen_adapter *adapter)
  64. {
  65. u32 rsp = NX_CDRP_RSP_OK;
  66. int timeout = 0;
  67. do {
  68. /* give atleast 1ms for firmware to respond */
  69. msleep(1);
  70. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  71. return NX_CDRP_RSP_TIMEOUT;
  72. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  73. } while (!NX_CDRP_IS_RSP(rsp));
  74. return rsp;
  75. }
  76. static u32
  77. netxen_issue_cmd(struct netxen_adapter *adapter,
  78. u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
  79. {
  80. u32 rsp;
  81. u32 signature = 0;
  82. u32 rcode = NX_RCODE_SUCCESS;
  83. signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
  84. /* Acquire semaphore before accessing CRB */
  85. if (netxen_api_lock(adapter))
  86. return NX_RCODE_TIMEOUT;
  87. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  88. NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
  89. NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
  90. NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
  91. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
  92. rsp = netxen_poll_rsp(adapter);
  93. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  94. printk(KERN_ERR "%s: card response timeout.\n",
  95. netxen_nic_driver_name);
  96. rcode = NX_RCODE_TIMEOUT;
  97. } else if (rsp == NX_CDRP_RSP_FAIL) {
  98. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  99. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  100. netxen_nic_driver_name, rcode);
  101. }
  102. /* Release semaphore */
  103. netxen_api_unlock(adapter);
  104. return rcode;
  105. }
  106. int
  107. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  108. {
  109. u32 rcode = NX_RCODE_SUCCESS;
  110. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  111. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  112. rcode = netxen_issue_cmd(adapter,
  113. adapter->ahw.pci_func,
  114. NXHAL_VERSION,
  115. recv_ctx->context_id,
  116. mtu,
  117. 0,
  118. NX_CDRP_CMD_SET_MTU);
  119. if (rcode != NX_RCODE_SUCCESS)
  120. return -EIO;
  121. return 0;
  122. }
  123. static int
  124. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  125. {
  126. void *addr;
  127. nx_hostrq_rx_ctx_t *prq;
  128. nx_cardrsp_rx_ctx_t *prsp;
  129. nx_hostrq_rds_ring_t *prq_rds;
  130. nx_hostrq_sds_ring_t *prq_sds;
  131. nx_cardrsp_rds_ring_t *prsp_rds;
  132. nx_cardrsp_sds_ring_t *prsp_sds;
  133. struct nx_host_rds_ring *rds_ring;
  134. struct nx_host_sds_ring *sds_ring;
  135. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  136. u64 phys_addr;
  137. int i, nrds_rings, nsds_rings;
  138. size_t rq_size, rsp_size;
  139. u32 cap, reg, val;
  140. int err;
  141. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  142. nrds_rings = adapter->max_rds_rings;
  143. nsds_rings = adapter->max_sds_rings;
  144. rq_size =
  145. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  146. rsp_size =
  147. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  148. addr = pci_alloc_consistent(adapter->pdev,
  149. rq_size, &hostrq_phys_addr);
  150. if (addr == NULL)
  151. return -ENOMEM;
  152. prq = (nx_hostrq_rx_ctx_t *)addr;
  153. addr = pci_alloc_consistent(adapter->pdev,
  154. rsp_size, &cardrsp_phys_addr);
  155. if (addr == NULL) {
  156. err = -ENOMEM;
  157. goto out_free_rq;
  158. }
  159. prsp = (nx_cardrsp_rx_ctx_t *)addr;
  160. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  161. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  162. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  163. prq->capabilities[0] = cpu_to_le32(cap);
  164. prq->host_int_crb_mode =
  165. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  166. prq->host_rds_crb_mode =
  167. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  168. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  169. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  170. prq->rds_ring_offset = cpu_to_le32(0);
  171. val = le32_to_cpu(prq->rds_ring_offset) +
  172. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  173. prq->sds_ring_offset = cpu_to_le32(val);
  174. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  175. le32_to_cpu(prq->rds_ring_offset));
  176. for (i = 0; i < nrds_rings; i++) {
  177. rds_ring = &recv_ctx->rds_rings[i];
  178. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  179. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  180. prq_rds[i].ring_kind = cpu_to_le32(i);
  181. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  182. }
  183. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  184. le32_to_cpu(prq->sds_ring_offset));
  185. for (i = 0; i < nsds_rings; i++) {
  186. sds_ring = &recv_ctx->sds_rings[i];
  187. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  188. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  189. prq_sds[i].msi_index = cpu_to_le16(i);
  190. }
  191. phys_addr = hostrq_phys_addr;
  192. err = netxen_issue_cmd(adapter,
  193. adapter->ahw.pci_func,
  194. NXHAL_VERSION,
  195. (u32)(phys_addr >> 32),
  196. (u32)(phys_addr & 0xffffffff),
  197. rq_size,
  198. NX_CDRP_CMD_CREATE_RX_CTX);
  199. if (err) {
  200. printk(KERN_WARNING
  201. "Failed to create rx ctx in firmware%d\n", err);
  202. goto out_free_rsp;
  203. }
  204. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  205. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  206. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  207. rds_ring = &recv_ctx->rds_rings[i];
  208. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  209. rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
  210. }
  211. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  212. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  213. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  214. sds_ring = &recv_ctx->sds_rings[i];
  215. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  216. sds_ring->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
  217. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  218. sds_ring->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
  219. }
  220. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  221. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  222. recv_ctx->virt_port = prsp->virt_port;
  223. out_free_rsp:
  224. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  225. out_free_rq:
  226. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  227. return err;
  228. }
  229. static void
  230. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  231. {
  232. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  233. if (netxen_issue_cmd(adapter,
  234. adapter->ahw.pci_func,
  235. NXHAL_VERSION,
  236. recv_ctx->context_id,
  237. NX_DESTROY_CTX_RESET,
  238. 0,
  239. NX_CDRP_CMD_DESTROY_RX_CTX)) {
  240. printk(KERN_WARNING
  241. "%s: Failed to destroy rx ctx in firmware\n",
  242. netxen_nic_driver_name);
  243. }
  244. }
  245. static int
  246. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  247. {
  248. nx_hostrq_tx_ctx_t *prq;
  249. nx_hostrq_cds_ring_t *prq_cds;
  250. nx_cardrsp_tx_ctx_t *prsp;
  251. void *rq_addr, *rsp_addr;
  252. size_t rq_size, rsp_size;
  253. u32 temp;
  254. int err = 0;
  255. u64 offset, phys_addr;
  256. dma_addr_t rq_phys_addr, rsp_phys_addr;
  257. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  258. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  259. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  260. rq_addr = pci_alloc_consistent(adapter->pdev,
  261. rq_size, &rq_phys_addr);
  262. if (!rq_addr)
  263. return -ENOMEM;
  264. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  265. rsp_addr = pci_alloc_consistent(adapter->pdev,
  266. rsp_size, &rsp_phys_addr);
  267. if (!rsp_addr) {
  268. err = -ENOMEM;
  269. goto out_free_rq;
  270. }
  271. memset(rq_addr, 0, rq_size);
  272. prq = (nx_hostrq_tx_ctx_t *)rq_addr;
  273. memset(rsp_addr, 0, rsp_size);
  274. prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
  275. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  276. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  277. prq->capabilities[0] = cpu_to_le32(temp);
  278. prq->host_int_crb_mode =
  279. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  280. prq->interrupt_ctl = 0;
  281. prq->msi_index = 0;
  282. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  283. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  284. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  285. prq_cds = &prq->cds_ring;
  286. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  287. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  288. phys_addr = rq_phys_addr;
  289. err = netxen_issue_cmd(adapter,
  290. adapter->ahw.pci_func,
  291. NXHAL_VERSION,
  292. (u32)(phys_addr >> 32),
  293. ((u32)phys_addr & 0xffffffff),
  294. rq_size,
  295. NX_CDRP_CMD_CREATE_TX_CTX);
  296. if (err == NX_RCODE_SUCCESS) {
  297. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  298. tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200);
  299. #if 0
  300. adapter->tx_state =
  301. le32_to_cpu(prsp->host_ctx_state);
  302. #endif
  303. adapter->tx_context_id =
  304. le16_to_cpu(prsp->context_id);
  305. } else {
  306. printk(KERN_WARNING
  307. "Failed to create tx ctx in firmware%d\n", err);
  308. err = -EIO;
  309. }
  310. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  311. out_free_rq:
  312. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  313. return err;
  314. }
  315. static void
  316. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  317. {
  318. if (netxen_issue_cmd(adapter,
  319. adapter->ahw.pci_func,
  320. NXHAL_VERSION,
  321. adapter->tx_context_id,
  322. NX_DESTROY_CTX_RESET,
  323. 0,
  324. NX_CDRP_CMD_DESTROY_TX_CTX)) {
  325. printk(KERN_WARNING
  326. "%s: Failed to destroy tx ctx in firmware\n",
  327. netxen_nic_driver_name);
  328. }
  329. }
  330. static u64 ctx_addr_sig_regs[][3] = {
  331. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  332. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  333. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  334. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  335. };
  336. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  337. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  338. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  339. #define lower32(x) ((u32)((x) & 0xffffffff))
  340. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  341. static struct netxen_recv_crb recv_crb_registers[] = {
  342. /* Instance 0 */
  343. {
  344. /* crb_rcv_producer: */
  345. {
  346. NETXEN_NIC_REG(0x100),
  347. /* Jumbo frames */
  348. NETXEN_NIC_REG(0x110),
  349. /* LRO */
  350. NETXEN_NIC_REG(0x120)
  351. },
  352. /* crb_sts_consumer: */
  353. {
  354. NETXEN_NIC_REG(0x138),
  355. NETXEN_NIC_REG_2(0x000),
  356. NETXEN_NIC_REG_2(0x004),
  357. NETXEN_NIC_REG_2(0x008),
  358. },
  359. /* sw_int_mask */
  360. {
  361. CRB_SW_INT_MASK_0,
  362. NETXEN_NIC_REG_2(0x044),
  363. NETXEN_NIC_REG_2(0x048),
  364. NETXEN_NIC_REG_2(0x04c),
  365. },
  366. },
  367. /* Instance 1 */
  368. {
  369. /* crb_rcv_producer: */
  370. {
  371. NETXEN_NIC_REG(0x144),
  372. /* Jumbo frames */
  373. NETXEN_NIC_REG(0x154),
  374. /* LRO */
  375. NETXEN_NIC_REG(0x164)
  376. },
  377. /* crb_sts_consumer: */
  378. {
  379. NETXEN_NIC_REG(0x17c),
  380. NETXEN_NIC_REG_2(0x020),
  381. NETXEN_NIC_REG_2(0x024),
  382. NETXEN_NIC_REG_2(0x028),
  383. },
  384. /* sw_int_mask */
  385. {
  386. CRB_SW_INT_MASK_1,
  387. NETXEN_NIC_REG_2(0x064),
  388. NETXEN_NIC_REG_2(0x068),
  389. NETXEN_NIC_REG_2(0x06c),
  390. },
  391. },
  392. /* Instance 2 */
  393. {
  394. /* crb_rcv_producer: */
  395. {
  396. NETXEN_NIC_REG(0x1d8),
  397. /* Jumbo frames */
  398. NETXEN_NIC_REG(0x1f8),
  399. /* LRO */
  400. NETXEN_NIC_REG(0x208)
  401. },
  402. /* crb_sts_consumer: */
  403. {
  404. NETXEN_NIC_REG(0x220),
  405. NETXEN_NIC_REG_2(0x03c),
  406. NETXEN_NIC_REG_2(0x03c),
  407. NETXEN_NIC_REG_2(0x03c),
  408. },
  409. /* sw_int_mask */
  410. {
  411. CRB_SW_INT_MASK_2,
  412. NETXEN_NIC_REG_2(0x03c),
  413. NETXEN_NIC_REG_2(0x03c),
  414. NETXEN_NIC_REG_2(0x03c),
  415. },
  416. },
  417. /* Instance 3 */
  418. {
  419. /* crb_rcv_producer: */
  420. {
  421. NETXEN_NIC_REG(0x22c),
  422. /* Jumbo frames */
  423. NETXEN_NIC_REG(0x23c),
  424. /* LRO */
  425. NETXEN_NIC_REG(0x24c)
  426. },
  427. /* crb_sts_consumer: */
  428. {
  429. NETXEN_NIC_REG(0x264),
  430. NETXEN_NIC_REG_2(0x03c),
  431. NETXEN_NIC_REG_2(0x03c),
  432. NETXEN_NIC_REG_2(0x03c),
  433. },
  434. /* sw_int_mask */
  435. {
  436. CRB_SW_INT_MASK_3,
  437. NETXEN_NIC_REG_2(0x03c),
  438. NETXEN_NIC_REG_2(0x03c),
  439. NETXEN_NIC_REG_2(0x03c),
  440. },
  441. },
  442. };
  443. static int
  444. netxen_init_old_ctx(struct netxen_adapter *adapter)
  445. {
  446. struct netxen_recv_context *recv_ctx;
  447. struct nx_host_rds_ring *rds_ring;
  448. struct nx_host_sds_ring *sds_ring;
  449. struct nx_host_tx_ring *tx_ring;
  450. int ring;
  451. int port = adapter->portnum;
  452. struct netxen_ring_ctx *hwctx;
  453. u32 signature;
  454. tx_ring = adapter->tx_ring;
  455. recv_ctx = &adapter->recv_ctx;
  456. hwctx = recv_ctx->hwctx;
  457. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  458. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  459. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  460. rds_ring = &recv_ctx->rds_rings[ring];
  461. hwctx->rcv_rings[ring].addr =
  462. cpu_to_le64(rds_ring->phys_addr);
  463. hwctx->rcv_rings[ring].size =
  464. cpu_to_le32(rds_ring->num_desc);
  465. }
  466. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  467. sds_ring = &recv_ctx->sds_rings[ring];
  468. if (ring == 0) {
  469. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  470. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  471. }
  472. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  473. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  474. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  475. }
  476. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  477. signature = (adapter->max_sds_rings > 1) ?
  478. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  479. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  480. lower32(recv_ctx->phys_addr));
  481. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  482. upper32(recv_ctx->phys_addr));
  483. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  484. signature | port);
  485. return 0;
  486. }
  487. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  488. {
  489. void *addr;
  490. int err = 0;
  491. int ring;
  492. struct netxen_recv_context *recv_ctx;
  493. struct nx_host_rds_ring *rds_ring;
  494. struct nx_host_sds_ring *sds_ring;
  495. struct nx_host_tx_ring *tx_ring;
  496. struct pci_dev *pdev = adapter->pdev;
  497. struct net_device *netdev = adapter->netdev;
  498. int port = adapter->portnum;
  499. recv_ctx = &adapter->recv_ctx;
  500. tx_ring = adapter->tx_ring;
  501. addr = pci_alloc_consistent(pdev,
  502. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  503. &recv_ctx->phys_addr);
  504. if (addr == NULL) {
  505. dev_err(&pdev->dev, "failed to allocate hw context\n");
  506. return -ENOMEM;
  507. }
  508. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  509. recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
  510. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  511. recv_ctx->hwctx->cmd_consumer_offset =
  512. cpu_to_le64(recv_ctx->phys_addr +
  513. sizeof(struct netxen_ring_ctx));
  514. tx_ring->hw_consumer =
  515. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  516. /* cmd desc ring */
  517. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  518. &tx_ring->phys_addr);
  519. if (addr == NULL) {
  520. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  521. netdev->name);
  522. return -ENOMEM;
  523. }
  524. tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
  525. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  526. rds_ring = &recv_ctx->rds_rings[ring];
  527. addr = pci_alloc_consistent(adapter->pdev,
  528. RCV_DESC_RINGSIZE(rds_ring),
  529. &rds_ring->phys_addr);
  530. if (addr == NULL) {
  531. dev_err(&pdev->dev,
  532. "%s: failed to allocate rds ring [%d]\n",
  533. netdev->name, ring);
  534. err = -ENOMEM;
  535. goto err_out_free;
  536. }
  537. rds_ring->desc_head = (struct rcv_desc *)addr;
  538. if (adapter->fw_major < 4)
  539. rds_ring->crb_rcv_producer =
  540. recv_crb_registers[port].crb_rcv_producer[ring];
  541. }
  542. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  543. sds_ring = &recv_ctx->sds_rings[ring];
  544. addr = pci_alloc_consistent(adapter->pdev,
  545. STATUS_DESC_RINGSIZE(sds_ring),
  546. &sds_ring->phys_addr);
  547. if (addr == NULL) {
  548. dev_err(&pdev->dev,
  549. "%s: failed to allocate sds ring [%d]\n",
  550. netdev->name, ring);
  551. err = -ENOMEM;
  552. goto err_out_free;
  553. }
  554. sds_ring->desc_head = (struct status_desc *)addr;
  555. sds_ring->crb_sts_consumer =
  556. recv_crb_registers[port].crb_sts_consumer[ring];
  557. sds_ring->crb_intr_mask =
  558. recv_crb_registers[port].sw_int_mask[ring];
  559. }
  560. if (adapter->fw_major >= 4) {
  561. err = nx_fw_cmd_create_rx_ctx(adapter);
  562. if (err)
  563. goto err_out_free;
  564. err = nx_fw_cmd_create_tx_ctx(adapter);
  565. if (err)
  566. goto err_out_free;
  567. } else {
  568. err = netxen_init_old_ctx(adapter);
  569. if (err)
  570. goto err_out_free;
  571. }
  572. return 0;
  573. err_out_free:
  574. netxen_free_hw_resources(adapter);
  575. return err;
  576. }
  577. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  578. {
  579. struct netxen_recv_context *recv_ctx;
  580. struct nx_host_rds_ring *rds_ring;
  581. struct nx_host_sds_ring *sds_ring;
  582. struct nx_host_tx_ring *tx_ring;
  583. int ring;
  584. int port = adapter->portnum;
  585. if (adapter->fw_major >= 4) {
  586. nx_fw_cmd_destroy_rx_ctx(adapter);
  587. nx_fw_cmd_destroy_tx_ctx(adapter);
  588. } else {
  589. netxen_api_lock(adapter);
  590. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  591. NETXEN_CTX_D3_RESET | port);
  592. netxen_api_unlock(adapter);
  593. }
  594. /* Allow dma queues to drain after context reset */
  595. msleep(20);
  596. recv_ctx = &adapter->recv_ctx;
  597. if (recv_ctx->hwctx != NULL) {
  598. pci_free_consistent(adapter->pdev,
  599. sizeof(struct netxen_ring_ctx) +
  600. sizeof(uint32_t),
  601. recv_ctx->hwctx,
  602. recv_ctx->phys_addr);
  603. recv_ctx->hwctx = NULL;
  604. }
  605. tx_ring = adapter->tx_ring;
  606. if (tx_ring->desc_head != NULL) {
  607. pci_free_consistent(adapter->pdev,
  608. TX_DESC_RINGSIZE(tx_ring),
  609. tx_ring->desc_head, tx_ring->phys_addr);
  610. tx_ring->desc_head = NULL;
  611. }
  612. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  613. rds_ring = &recv_ctx->rds_rings[ring];
  614. if (rds_ring->desc_head != NULL) {
  615. pci_free_consistent(adapter->pdev,
  616. RCV_DESC_RINGSIZE(rds_ring),
  617. rds_ring->desc_head,
  618. rds_ring->phys_addr);
  619. rds_ring->desc_head = NULL;
  620. }
  621. }
  622. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  623. sds_ring = &recv_ctx->sds_rings[ring];
  624. if (sds_ring->desc_head != NULL) {
  625. pci_free_consistent(adapter->pdev,
  626. STATUS_DESC_RINGSIZE(sds_ring),
  627. sds_ring->desc_head,
  628. sds_ring->phys_addr);
  629. sds_ring->desc_head = NULL;
  630. }
  631. }
  632. }