netxen_nic_ctx.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * Copyright (C) 2009 - QLogic Corporation.
  4. * All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  19. * MA 02111-1307, USA.
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called "COPYING".
  23. *
  24. */
  25. #include "netxen_nic_hw.h"
  26. #include "netxen_nic.h"
  27. #define NXHAL_VERSION 1
  28. static u32
  29. netxen_poll_rsp(struct netxen_adapter *adapter)
  30. {
  31. u32 rsp = NX_CDRP_RSP_OK;
  32. int timeout = 0;
  33. do {
  34. /* give atleast 1ms for firmware to respond */
  35. msleep(1);
  36. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  37. return NX_CDRP_RSP_TIMEOUT;
  38. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  39. } while (!NX_CDRP_IS_RSP(rsp));
  40. return rsp;
  41. }
  42. static u32
  43. netxen_issue_cmd(struct netxen_adapter *adapter,
  44. u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
  45. {
  46. u32 rsp;
  47. u32 signature = 0;
  48. u32 rcode = NX_RCODE_SUCCESS;
  49. signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
  50. /* Acquire semaphore before accessing CRB */
  51. if (netxen_api_lock(adapter))
  52. return NX_RCODE_TIMEOUT;
  53. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  54. NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
  55. NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
  56. NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
  57. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
  58. rsp = netxen_poll_rsp(adapter);
  59. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  60. printk(KERN_ERR "%s: card response timeout.\n",
  61. netxen_nic_driver_name);
  62. rcode = NX_RCODE_TIMEOUT;
  63. } else if (rsp == NX_CDRP_RSP_FAIL) {
  64. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  65. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  66. netxen_nic_driver_name, rcode);
  67. }
  68. /* Release semaphore */
  69. netxen_api_unlock(adapter);
  70. return rcode;
  71. }
  72. int
  73. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  74. {
  75. u32 rcode = NX_RCODE_SUCCESS;
  76. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  77. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  78. rcode = netxen_issue_cmd(adapter,
  79. adapter->ahw.pci_func,
  80. NXHAL_VERSION,
  81. recv_ctx->context_id,
  82. mtu,
  83. 0,
  84. NX_CDRP_CMD_SET_MTU);
  85. if (rcode != NX_RCODE_SUCCESS)
  86. return -EIO;
  87. return 0;
  88. }
  89. static int
  90. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  91. {
  92. void *addr;
  93. nx_hostrq_rx_ctx_t *prq;
  94. nx_cardrsp_rx_ctx_t *prsp;
  95. nx_hostrq_rds_ring_t *prq_rds;
  96. nx_hostrq_sds_ring_t *prq_sds;
  97. nx_cardrsp_rds_ring_t *prsp_rds;
  98. nx_cardrsp_sds_ring_t *prsp_sds;
  99. struct nx_host_rds_ring *rds_ring;
  100. struct nx_host_sds_ring *sds_ring;
  101. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  102. u64 phys_addr;
  103. int i, nrds_rings, nsds_rings;
  104. size_t rq_size, rsp_size;
  105. u32 cap, reg, val;
  106. int err;
  107. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  108. nrds_rings = adapter->max_rds_rings;
  109. nsds_rings = adapter->max_sds_rings;
  110. rq_size =
  111. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  112. rsp_size =
  113. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  114. addr = pci_alloc_consistent(adapter->pdev,
  115. rq_size, &hostrq_phys_addr);
  116. if (addr == NULL)
  117. return -ENOMEM;
  118. prq = (nx_hostrq_rx_ctx_t *)addr;
  119. addr = pci_alloc_consistent(adapter->pdev,
  120. rsp_size, &cardrsp_phys_addr);
  121. if (addr == NULL) {
  122. err = -ENOMEM;
  123. goto out_free_rq;
  124. }
  125. prsp = (nx_cardrsp_rx_ctx_t *)addr;
  126. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  127. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  128. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  129. prq->capabilities[0] = cpu_to_le32(cap);
  130. prq->host_int_crb_mode =
  131. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  132. prq->host_rds_crb_mode =
  133. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  134. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  135. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  136. prq->rds_ring_offset = cpu_to_le32(0);
  137. val = le32_to_cpu(prq->rds_ring_offset) +
  138. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  139. prq->sds_ring_offset = cpu_to_le32(val);
  140. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  141. le32_to_cpu(prq->rds_ring_offset));
  142. for (i = 0; i < nrds_rings; i++) {
  143. rds_ring = &recv_ctx->rds_rings[i];
  144. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  145. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  146. prq_rds[i].ring_kind = cpu_to_le32(i);
  147. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  148. }
  149. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  150. le32_to_cpu(prq->sds_ring_offset));
  151. for (i = 0; i < nsds_rings; i++) {
  152. sds_ring = &recv_ctx->sds_rings[i];
  153. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  154. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  155. prq_sds[i].msi_index = cpu_to_le16(i);
  156. }
  157. phys_addr = hostrq_phys_addr;
  158. err = netxen_issue_cmd(adapter,
  159. adapter->ahw.pci_func,
  160. NXHAL_VERSION,
  161. (u32)(phys_addr >> 32),
  162. (u32)(phys_addr & 0xffffffff),
  163. rq_size,
  164. NX_CDRP_CMD_CREATE_RX_CTX);
  165. if (err) {
  166. printk(KERN_WARNING
  167. "Failed to create rx ctx in firmware%d\n", err);
  168. goto out_free_rsp;
  169. }
  170. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  171. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  172. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  173. rds_ring = &recv_ctx->rds_rings[i];
  174. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  175. rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
  176. NETXEN_NIC_REG(reg - 0x200));
  177. }
  178. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  179. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  180. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  181. sds_ring = &recv_ctx->sds_rings[i];
  182. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  183. sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
  184. NETXEN_NIC_REG(reg - 0x200));
  185. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  186. sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
  187. NETXEN_NIC_REG(reg - 0x200));
  188. }
  189. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  190. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  191. recv_ctx->virt_port = prsp->virt_port;
  192. out_free_rsp:
  193. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  194. out_free_rq:
  195. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  196. return err;
  197. }
  198. static void
  199. nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
  200. {
  201. netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
  202. adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
  203. NX_CDRP_CMD_DESTROY_RX_CTX);
  204. netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
  205. adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
  206. NX_CDRP_CMD_DESTROY_TX_CTX);
  207. }
  208. static void
  209. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  210. {
  211. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  212. if (netxen_issue_cmd(adapter,
  213. adapter->ahw.pci_func,
  214. NXHAL_VERSION,
  215. recv_ctx->context_id,
  216. NX_DESTROY_CTX_RESET,
  217. 0,
  218. NX_CDRP_CMD_DESTROY_RX_CTX)) {
  219. printk(KERN_WARNING
  220. "%s: Failed to destroy rx ctx in firmware\n",
  221. netxen_nic_driver_name);
  222. }
  223. }
  224. static int
  225. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  226. {
  227. nx_hostrq_tx_ctx_t *prq;
  228. nx_hostrq_cds_ring_t *prq_cds;
  229. nx_cardrsp_tx_ctx_t *prsp;
  230. void *rq_addr, *rsp_addr;
  231. size_t rq_size, rsp_size;
  232. u32 temp;
  233. int err = 0;
  234. u64 offset, phys_addr;
  235. dma_addr_t rq_phys_addr, rsp_phys_addr;
  236. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  237. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  238. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  239. rq_addr = pci_alloc_consistent(adapter->pdev,
  240. rq_size, &rq_phys_addr);
  241. if (!rq_addr)
  242. return -ENOMEM;
  243. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  244. rsp_addr = pci_alloc_consistent(adapter->pdev,
  245. rsp_size, &rsp_phys_addr);
  246. if (!rsp_addr) {
  247. err = -ENOMEM;
  248. goto out_free_rq;
  249. }
  250. memset(rq_addr, 0, rq_size);
  251. prq = (nx_hostrq_tx_ctx_t *)rq_addr;
  252. memset(rsp_addr, 0, rsp_size);
  253. prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
  254. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  255. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  256. prq->capabilities[0] = cpu_to_le32(temp);
  257. prq->host_int_crb_mode =
  258. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  259. prq->interrupt_ctl = 0;
  260. prq->msi_index = 0;
  261. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  262. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  263. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  264. prq_cds = &prq->cds_ring;
  265. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  266. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  267. phys_addr = rq_phys_addr;
  268. err = netxen_issue_cmd(adapter,
  269. adapter->ahw.pci_func,
  270. NXHAL_VERSION,
  271. (u32)(phys_addr >> 32),
  272. ((u32)phys_addr & 0xffffffff),
  273. rq_size,
  274. NX_CDRP_CMD_CREATE_TX_CTX);
  275. if (err == NX_RCODE_SUCCESS) {
  276. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  277. tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
  278. NETXEN_NIC_REG(temp - 0x200));
  279. #if 0
  280. adapter->tx_state =
  281. le32_to_cpu(prsp->host_ctx_state);
  282. #endif
  283. adapter->tx_context_id =
  284. le16_to_cpu(prsp->context_id);
  285. } else {
  286. printk(KERN_WARNING
  287. "Failed to create tx ctx in firmware%d\n", err);
  288. err = -EIO;
  289. }
  290. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  291. out_free_rq:
  292. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  293. return err;
  294. }
  295. static void
  296. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  297. {
  298. if (netxen_issue_cmd(adapter,
  299. adapter->ahw.pci_func,
  300. NXHAL_VERSION,
  301. adapter->tx_context_id,
  302. NX_DESTROY_CTX_RESET,
  303. 0,
  304. NX_CDRP_CMD_DESTROY_TX_CTX)) {
  305. printk(KERN_WARNING
  306. "%s: Failed to destroy tx ctx in firmware\n",
  307. netxen_nic_driver_name);
  308. }
  309. }
  310. int
  311. nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
  312. {
  313. u32 rcode;
  314. rcode = netxen_issue_cmd(adapter,
  315. adapter->ahw.pci_func,
  316. NXHAL_VERSION,
  317. reg,
  318. 0,
  319. 0,
  320. NX_CDRP_CMD_READ_PHY);
  321. if (rcode != NX_RCODE_SUCCESS)
  322. return -EIO;
  323. return NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  324. }
  325. int
  326. nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
  327. {
  328. u32 rcode;
  329. rcode = netxen_issue_cmd(adapter,
  330. adapter->ahw.pci_func,
  331. NXHAL_VERSION,
  332. reg,
  333. val,
  334. 0,
  335. NX_CDRP_CMD_WRITE_PHY);
  336. if (rcode != NX_RCODE_SUCCESS)
  337. return -EIO;
  338. return 0;
  339. }
  340. static u64 ctx_addr_sig_regs[][3] = {
  341. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  342. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  343. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  344. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  345. };
  346. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  347. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  348. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  349. #define lower32(x) ((u32)((x) & 0xffffffff))
  350. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  351. static struct netxen_recv_crb recv_crb_registers[] = {
  352. /* Instance 0 */
  353. {
  354. /* crb_rcv_producer: */
  355. {
  356. NETXEN_NIC_REG(0x100),
  357. /* Jumbo frames */
  358. NETXEN_NIC_REG(0x110),
  359. /* LRO */
  360. NETXEN_NIC_REG(0x120)
  361. },
  362. /* crb_sts_consumer: */
  363. {
  364. NETXEN_NIC_REG(0x138),
  365. NETXEN_NIC_REG_2(0x000),
  366. NETXEN_NIC_REG_2(0x004),
  367. NETXEN_NIC_REG_2(0x008),
  368. },
  369. /* sw_int_mask */
  370. {
  371. CRB_SW_INT_MASK_0,
  372. NETXEN_NIC_REG_2(0x044),
  373. NETXEN_NIC_REG_2(0x048),
  374. NETXEN_NIC_REG_2(0x04c),
  375. },
  376. },
  377. /* Instance 1 */
  378. {
  379. /* crb_rcv_producer: */
  380. {
  381. NETXEN_NIC_REG(0x144),
  382. /* Jumbo frames */
  383. NETXEN_NIC_REG(0x154),
  384. /* LRO */
  385. NETXEN_NIC_REG(0x164)
  386. },
  387. /* crb_sts_consumer: */
  388. {
  389. NETXEN_NIC_REG(0x17c),
  390. NETXEN_NIC_REG_2(0x020),
  391. NETXEN_NIC_REG_2(0x024),
  392. NETXEN_NIC_REG_2(0x028),
  393. },
  394. /* sw_int_mask */
  395. {
  396. CRB_SW_INT_MASK_1,
  397. NETXEN_NIC_REG_2(0x064),
  398. NETXEN_NIC_REG_2(0x068),
  399. NETXEN_NIC_REG_2(0x06c),
  400. },
  401. },
  402. /* Instance 2 */
  403. {
  404. /* crb_rcv_producer: */
  405. {
  406. NETXEN_NIC_REG(0x1d8),
  407. /* Jumbo frames */
  408. NETXEN_NIC_REG(0x1f8),
  409. /* LRO */
  410. NETXEN_NIC_REG(0x208)
  411. },
  412. /* crb_sts_consumer: */
  413. {
  414. NETXEN_NIC_REG(0x220),
  415. NETXEN_NIC_REG_2(0x03c),
  416. NETXEN_NIC_REG_2(0x03c),
  417. NETXEN_NIC_REG_2(0x03c),
  418. },
  419. /* sw_int_mask */
  420. {
  421. CRB_SW_INT_MASK_2,
  422. NETXEN_NIC_REG_2(0x03c),
  423. NETXEN_NIC_REG_2(0x03c),
  424. NETXEN_NIC_REG_2(0x03c),
  425. },
  426. },
  427. /* Instance 3 */
  428. {
  429. /* crb_rcv_producer: */
  430. {
  431. NETXEN_NIC_REG(0x22c),
  432. /* Jumbo frames */
  433. NETXEN_NIC_REG(0x23c),
  434. /* LRO */
  435. NETXEN_NIC_REG(0x24c)
  436. },
  437. /* crb_sts_consumer: */
  438. {
  439. NETXEN_NIC_REG(0x264),
  440. NETXEN_NIC_REG_2(0x03c),
  441. NETXEN_NIC_REG_2(0x03c),
  442. NETXEN_NIC_REG_2(0x03c),
  443. },
  444. /* sw_int_mask */
  445. {
  446. CRB_SW_INT_MASK_3,
  447. NETXEN_NIC_REG_2(0x03c),
  448. NETXEN_NIC_REG_2(0x03c),
  449. NETXEN_NIC_REG_2(0x03c),
  450. },
  451. },
  452. };
  453. static int
  454. netxen_init_old_ctx(struct netxen_adapter *adapter)
  455. {
  456. struct netxen_recv_context *recv_ctx;
  457. struct nx_host_rds_ring *rds_ring;
  458. struct nx_host_sds_ring *sds_ring;
  459. struct nx_host_tx_ring *tx_ring;
  460. int ring;
  461. int port = adapter->portnum;
  462. struct netxen_ring_ctx *hwctx;
  463. u32 signature;
  464. tx_ring = adapter->tx_ring;
  465. recv_ctx = &adapter->recv_ctx;
  466. hwctx = recv_ctx->hwctx;
  467. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  468. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  469. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  470. rds_ring = &recv_ctx->rds_rings[ring];
  471. hwctx->rcv_rings[ring].addr =
  472. cpu_to_le64(rds_ring->phys_addr);
  473. hwctx->rcv_rings[ring].size =
  474. cpu_to_le32(rds_ring->num_desc);
  475. }
  476. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  477. sds_ring = &recv_ctx->sds_rings[ring];
  478. if (ring == 0) {
  479. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  480. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  481. }
  482. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  483. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  484. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  485. }
  486. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  487. signature = (adapter->max_sds_rings > 1) ?
  488. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  489. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  490. lower32(recv_ctx->phys_addr));
  491. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  492. upper32(recv_ctx->phys_addr));
  493. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  494. signature | port);
  495. return 0;
  496. }
  497. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  498. {
  499. void *addr;
  500. int err = 0;
  501. int ring;
  502. struct netxen_recv_context *recv_ctx;
  503. struct nx_host_rds_ring *rds_ring;
  504. struct nx_host_sds_ring *sds_ring;
  505. struct nx_host_tx_ring *tx_ring;
  506. struct pci_dev *pdev = adapter->pdev;
  507. struct net_device *netdev = adapter->netdev;
  508. int port = adapter->portnum;
  509. recv_ctx = &adapter->recv_ctx;
  510. tx_ring = adapter->tx_ring;
  511. addr = pci_alloc_consistent(pdev,
  512. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  513. &recv_ctx->phys_addr);
  514. if (addr == NULL) {
  515. dev_err(&pdev->dev, "failed to allocate hw context\n");
  516. return -ENOMEM;
  517. }
  518. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  519. recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
  520. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  521. recv_ctx->hwctx->cmd_consumer_offset =
  522. cpu_to_le64(recv_ctx->phys_addr +
  523. sizeof(struct netxen_ring_ctx));
  524. tx_ring->hw_consumer =
  525. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  526. /* cmd desc ring */
  527. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  528. &tx_ring->phys_addr);
  529. if (addr == NULL) {
  530. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  531. netdev->name);
  532. err = -ENOMEM;
  533. goto err_out_free;
  534. }
  535. tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
  536. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  537. rds_ring = &recv_ctx->rds_rings[ring];
  538. addr = pci_alloc_consistent(adapter->pdev,
  539. RCV_DESC_RINGSIZE(rds_ring),
  540. &rds_ring->phys_addr);
  541. if (addr == NULL) {
  542. dev_err(&pdev->dev,
  543. "%s: failed to allocate rds ring [%d]\n",
  544. netdev->name, ring);
  545. err = -ENOMEM;
  546. goto err_out_free;
  547. }
  548. rds_ring->desc_head = (struct rcv_desc *)addr;
  549. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  550. rds_ring->crb_rcv_producer =
  551. netxen_get_ioaddr(adapter,
  552. recv_crb_registers[port].crb_rcv_producer[ring]);
  553. }
  554. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  555. sds_ring = &recv_ctx->sds_rings[ring];
  556. addr = pci_alloc_consistent(adapter->pdev,
  557. STATUS_DESC_RINGSIZE(sds_ring),
  558. &sds_ring->phys_addr);
  559. if (addr == NULL) {
  560. dev_err(&pdev->dev,
  561. "%s: failed to allocate sds ring [%d]\n",
  562. netdev->name, ring);
  563. err = -ENOMEM;
  564. goto err_out_free;
  565. }
  566. sds_ring->desc_head = (struct status_desc *)addr;
  567. if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  568. sds_ring->crb_sts_consumer =
  569. netxen_get_ioaddr(adapter,
  570. recv_crb_registers[port].crb_sts_consumer[ring]);
  571. sds_ring->crb_intr_mask =
  572. netxen_get_ioaddr(adapter,
  573. recv_crb_registers[port].sw_int_mask[ring]);
  574. }
  575. }
  576. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  577. if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
  578. goto done;
  579. if (reset_devices)
  580. nx_fw_cmd_reset_ctx(adapter);
  581. err = nx_fw_cmd_create_rx_ctx(adapter);
  582. if (err)
  583. goto err_out_free;
  584. err = nx_fw_cmd_create_tx_ctx(adapter);
  585. if (err)
  586. goto err_out_free;
  587. } else {
  588. err = netxen_init_old_ctx(adapter);
  589. if (err)
  590. goto err_out_free;
  591. }
  592. done:
  593. return 0;
  594. err_out_free:
  595. netxen_free_hw_resources(adapter);
  596. return err;
  597. }
  598. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  599. {
  600. struct netxen_recv_context *recv_ctx;
  601. struct nx_host_rds_ring *rds_ring;
  602. struct nx_host_sds_ring *sds_ring;
  603. struct nx_host_tx_ring *tx_ring;
  604. int ring;
  605. int port = adapter->portnum;
  606. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  607. if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
  608. goto done;
  609. nx_fw_cmd_destroy_rx_ctx(adapter);
  610. nx_fw_cmd_destroy_tx_ctx(adapter);
  611. } else {
  612. netxen_api_lock(adapter);
  613. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  614. NETXEN_CTX_D3_RESET | port);
  615. netxen_api_unlock(adapter);
  616. }
  617. /* Allow dma queues to drain after context reset */
  618. msleep(20);
  619. done:
  620. recv_ctx = &adapter->recv_ctx;
  621. if (recv_ctx->hwctx != NULL) {
  622. pci_free_consistent(adapter->pdev,
  623. sizeof(struct netxen_ring_ctx) +
  624. sizeof(uint32_t),
  625. recv_ctx->hwctx,
  626. recv_ctx->phys_addr);
  627. recv_ctx->hwctx = NULL;
  628. }
  629. tx_ring = adapter->tx_ring;
  630. if (tx_ring->desc_head != NULL) {
  631. pci_free_consistent(adapter->pdev,
  632. TX_DESC_RINGSIZE(tx_ring),
  633. tx_ring->desc_head, tx_ring->phys_addr);
  634. tx_ring->desc_head = NULL;
  635. }
  636. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  637. rds_ring = &recv_ctx->rds_rings[ring];
  638. if (rds_ring->desc_head != NULL) {
  639. pci_free_consistent(adapter->pdev,
  640. RCV_DESC_RINGSIZE(rds_ring),
  641. rds_ring->desc_head,
  642. rds_ring->phys_addr);
  643. rds_ring->desc_head = NULL;
  644. }
  645. }
  646. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  647. sds_ring = &recv_ctx->sds_rings[ring];
  648. if (sds_ring->desc_head != NULL) {
  649. pci_free_consistent(adapter->pdev,
  650. STATUS_DESC_RINGSIZE(sds_ring),
  651. sds_ring->desc_head,
  652. sds_ring->phys_addr);
  653. sds_ring->desc_head = NULL;
  654. }
  655. }
  656. }