netxen_nic_ctx.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * Copyright (C) 2009 - QLogic Corporation.
  4. * All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  19. * MA 02111-1307, USA.
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called "COPYING".
  23. *
  24. */
  25. #include "netxen_nic_hw.h"
  26. #include "netxen_nic.h"
  27. #define NXHAL_VERSION 1
  28. static u32
  29. netxen_poll_rsp(struct netxen_adapter *adapter)
  30. {
  31. u32 rsp = NX_CDRP_RSP_OK;
  32. int timeout = 0;
  33. do {
  34. /* give atleast 1ms for firmware to respond */
  35. msleep(1);
  36. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  37. return NX_CDRP_RSP_TIMEOUT;
  38. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  39. } while (!NX_CDRP_IS_RSP(rsp));
  40. return rsp;
  41. }
  42. static u32
  43. netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
  44. {
  45. u32 rsp;
  46. u32 signature = 0;
  47. u32 rcode = NX_RCODE_SUCCESS;
  48. signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
  49. NXHAL_VERSION);
  50. /* Acquire semaphore before accessing CRB */
  51. if (netxen_api_lock(adapter))
  52. return NX_RCODE_TIMEOUT;
  53. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  54. NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
  55. NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
  56. NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
  57. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
  58. rsp = netxen_poll_rsp(adapter);
  59. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  60. printk(KERN_ERR "%s: card response timeout.\n",
  61. netxen_nic_driver_name);
  62. rcode = NX_RCODE_TIMEOUT;
  63. } else if (rsp == NX_CDRP_RSP_FAIL) {
  64. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  65. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  66. netxen_nic_driver_name, rcode);
  67. } else if (rsp == NX_CDRP_RSP_OK) {
  68. if (cmd->rsp.arg2)
  69. cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
  70. if (cmd->rsp.arg3)
  71. cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
  72. }
  73. if (cmd->rsp.arg1)
  74. cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  75. /* Release semaphore */
  76. netxen_api_unlock(adapter);
  77. return rcode;
  78. }
  79. int
  80. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  81. {
  82. u32 rcode = NX_RCODE_SUCCESS;
  83. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  84. struct netxen_cmd_args cmd;
  85. memset(&cmd, 0, sizeof(cmd));
  86. cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
  87. cmd.req.arg1 = recv_ctx->context_id;
  88. cmd.req.arg2 = mtu;
  89. cmd.req.arg3 = 0;
  90. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  91. netxen_issue_cmd(adapter, &cmd);
  92. if (rcode != NX_RCODE_SUCCESS)
  93. return -EIO;
  94. return 0;
  95. }
  96. int
  97. nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
  98. u32 speed, u32 duplex, u32 autoneg)
  99. {
  100. struct netxen_cmd_args cmd;
  101. memset(&cmd, 0, sizeof(cmd));
  102. cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
  103. cmd.req.arg1 = speed;
  104. cmd.req.arg2 = duplex;
  105. cmd.req.arg3 = autoneg;
  106. return netxen_issue_cmd(adapter, &cmd);
  107. }
  108. static int
  109. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  110. {
  111. void *addr;
  112. nx_hostrq_rx_ctx_t *prq;
  113. nx_cardrsp_rx_ctx_t *prsp;
  114. nx_hostrq_rds_ring_t *prq_rds;
  115. nx_hostrq_sds_ring_t *prq_sds;
  116. nx_cardrsp_rds_ring_t *prsp_rds;
  117. nx_cardrsp_sds_ring_t *prsp_sds;
  118. struct nx_host_rds_ring *rds_ring;
  119. struct nx_host_sds_ring *sds_ring;
  120. struct netxen_cmd_args cmd;
  121. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  122. u64 phys_addr;
  123. int i, nrds_rings, nsds_rings;
  124. size_t rq_size, rsp_size;
  125. u32 cap, reg, val;
  126. int err;
  127. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  128. nrds_rings = adapter->max_rds_rings;
  129. nsds_rings = adapter->max_sds_rings;
  130. rq_size =
  131. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  132. rsp_size =
  133. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  134. addr = pci_alloc_consistent(adapter->pdev,
  135. rq_size, &hostrq_phys_addr);
  136. if (addr == NULL)
  137. return -ENOMEM;
  138. prq = addr;
  139. addr = pci_alloc_consistent(adapter->pdev,
  140. rsp_size, &cardrsp_phys_addr);
  141. if (addr == NULL) {
  142. err = -ENOMEM;
  143. goto out_free_rq;
  144. }
  145. prsp = addr;
  146. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  147. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  148. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  149. prq->capabilities[0] = cpu_to_le32(cap);
  150. prq->host_int_crb_mode =
  151. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  152. prq->host_rds_crb_mode =
  153. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  154. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  155. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  156. prq->rds_ring_offset = cpu_to_le32(0);
  157. val = le32_to_cpu(prq->rds_ring_offset) +
  158. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  159. prq->sds_ring_offset = cpu_to_le32(val);
  160. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  161. le32_to_cpu(prq->rds_ring_offset));
  162. for (i = 0; i < nrds_rings; i++) {
  163. rds_ring = &recv_ctx->rds_rings[i];
  164. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  165. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  166. prq_rds[i].ring_kind = cpu_to_le32(i);
  167. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  168. }
  169. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  170. le32_to_cpu(prq->sds_ring_offset));
  171. for (i = 0; i < nsds_rings; i++) {
  172. sds_ring = &recv_ctx->sds_rings[i];
  173. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  174. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  175. prq_sds[i].msi_index = cpu_to_le16(i);
  176. }
  177. phys_addr = hostrq_phys_addr;
  178. memset(&cmd, 0, sizeof(cmd));
  179. cmd.req.arg1 = (u32)(phys_addr >> 32);
  180. cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
  181. cmd.req.arg3 = rq_size;
  182. cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
  183. err = netxen_issue_cmd(adapter, &cmd);
  184. if (err) {
  185. printk(KERN_WARNING
  186. "Failed to create rx ctx in firmware%d\n", err);
  187. goto out_free_rsp;
  188. }
  189. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  190. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  191. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  192. rds_ring = &recv_ctx->rds_rings[i];
  193. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  194. rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
  195. NETXEN_NIC_REG(reg - 0x200));
  196. }
  197. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  198. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  199. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  200. sds_ring = &recv_ctx->sds_rings[i];
  201. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  202. sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
  203. NETXEN_NIC_REG(reg - 0x200));
  204. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  205. sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
  206. NETXEN_NIC_REG(reg - 0x200));
  207. }
  208. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  209. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  210. recv_ctx->virt_port = prsp->virt_port;
  211. out_free_rsp:
  212. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  213. out_free_rq:
  214. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  215. return err;
  216. }
  217. static void
  218. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  219. {
  220. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  221. struct netxen_cmd_args cmd;
  222. memset(&cmd, 0, sizeof(cmd));
  223. cmd.req.arg1 = recv_ctx->context_id;
  224. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  225. cmd.req.arg3 = 0;
  226. cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
  227. if (netxen_issue_cmd(adapter, &cmd)) {
  228. printk(KERN_WARNING
  229. "%s: Failed to destroy rx ctx in firmware\n",
  230. netxen_nic_driver_name);
  231. }
  232. }
  233. static int
  234. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  235. {
  236. nx_hostrq_tx_ctx_t *prq;
  237. nx_hostrq_cds_ring_t *prq_cds;
  238. nx_cardrsp_tx_ctx_t *prsp;
  239. void *rq_addr, *rsp_addr;
  240. size_t rq_size, rsp_size;
  241. u32 temp;
  242. int err = 0;
  243. u64 offset, phys_addr;
  244. dma_addr_t rq_phys_addr, rsp_phys_addr;
  245. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  246. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  247. struct netxen_cmd_args cmd;
  248. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  249. rq_addr = pci_alloc_consistent(adapter->pdev,
  250. rq_size, &rq_phys_addr);
  251. if (!rq_addr)
  252. return -ENOMEM;
  253. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  254. rsp_addr = pci_alloc_consistent(adapter->pdev,
  255. rsp_size, &rsp_phys_addr);
  256. if (!rsp_addr) {
  257. err = -ENOMEM;
  258. goto out_free_rq;
  259. }
  260. memset(rq_addr, 0, rq_size);
  261. prq = rq_addr;
  262. memset(rsp_addr, 0, rsp_size);
  263. prsp = rsp_addr;
  264. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  265. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  266. prq->capabilities[0] = cpu_to_le32(temp);
  267. prq->host_int_crb_mode =
  268. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  269. prq->interrupt_ctl = 0;
  270. prq->msi_index = 0;
  271. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  272. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  273. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  274. prq_cds = &prq->cds_ring;
  275. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  276. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  277. phys_addr = rq_phys_addr;
  278. memset(&cmd, 0, sizeof(cmd));
  279. cmd.req.arg1 = (u32)(phys_addr >> 32);
  280. cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
  281. cmd.req.arg3 = rq_size;
  282. cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
  283. err = netxen_issue_cmd(adapter, &cmd);
  284. if (err == NX_RCODE_SUCCESS) {
  285. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  286. tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
  287. NETXEN_NIC_REG(temp - 0x200));
  288. #if 0
  289. adapter->tx_state =
  290. le32_to_cpu(prsp->host_ctx_state);
  291. #endif
  292. adapter->tx_context_id =
  293. le16_to_cpu(prsp->context_id);
  294. } else {
  295. printk(KERN_WARNING
  296. "Failed to create tx ctx in firmware%d\n", err);
  297. err = -EIO;
  298. }
  299. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  300. out_free_rq:
  301. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  302. return err;
  303. }
  304. static void
  305. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  306. {
  307. struct netxen_cmd_args cmd;
  308. memset(&cmd, 0, sizeof(cmd));
  309. cmd.req.arg1 = adapter->tx_context_id;
  310. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  311. cmd.req.arg3 = 0;
  312. cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
  313. if (netxen_issue_cmd(adapter, &cmd)) {
  314. printk(KERN_WARNING
  315. "%s: Failed to destroy tx ctx in firmware\n",
  316. netxen_nic_driver_name);
  317. }
  318. }
  319. int
  320. nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
  321. {
  322. u32 rcode;
  323. struct netxen_cmd_args cmd;
  324. memset(&cmd, 0, sizeof(cmd));
  325. cmd.req.arg1 = reg;
  326. cmd.req.arg2 = 0;
  327. cmd.req.arg3 = 0;
  328. cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
  329. cmd.rsp.arg1 = 1;
  330. rcode = netxen_issue_cmd(adapter, &cmd);
  331. if (rcode != NX_RCODE_SUCCESS)
  332. return -EIO;
  333. return cmd.rsp.arg1;
  334. }
  335. int
  336. nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
  337. {
  338. u32 rcode;
  339. struct netxen_cmd_args cmd;
  340. memset(&cmd, 0, sizeof(cmd));
  341. cmd.req.arg1 = reg;
  342. cmd.req.arg2 = val;
  343. cmd.req.arg3 = 0;
  344. cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
  345. rcode = netxen_issue_cmd(adapter, &cmd);
  346. if (rcode != NX_RCODE_SUCCESS)
  347. return -EIO;
  348. return 0;
  349. }
  350. static u64 ctx_addr_sig_regs[][3] = {
  351. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  352. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  353. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  354. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  355. };
  356. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  357. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  358. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  359. #define lower32(x) ((u32)((x) & 0xffffffff))
  360. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  361. static struct netxen_recv_crb recv_crb_registers[] = {
  362. /* Instance 0 */
  363. {
  364. /* crb_rcv_producer: */
  365. {
  366. NETXEN_NIC_REG(0x100),
  367. /* Jumbo frames */
  368. NETXEN_NIC_REG(0x110),
  369. /* LRO */
  370. NETXEN_NIC_REG(0x120)
  371. },
  372. /* crb_sts_consumer: */
  373. {
  374. NETXEN_NIC_REG(0x138),
  375. NETXEN_NIC_REG_2(0x000),
  376. NETXEN_NIC_REG_2(0x004),
  377. NETXEN_NIC_REG_2(0x008),
  378. },
  379. /* sw_int_mask */
  380. {
  381. CRB_SW_INT_MASK_0,
  382. NETXEN_NIC_REG_2(0x044),
  383. NETXEN_NIC_REG_2(0x048),
  384. NETXEN_NIC_REG_2(0x04c),
  385. },
  386. },
  387. /* Instance 1 */
  388. {
  389. /* crb_rcv_producer: */
  390. {
  391. NETXEN_NIC_REG(0x144),
  392. /* Jumbo frames */
  393. NETXEN_NIC_REG(0x154),
  394. /* LRO */
  395. NETXEN_NIC_REG(0x164)
  396. },
  397. /* crb_sts_consumer: */
  398. {
  399. NETXEN_NIC_REG(0x17c),
  400. NETXEN_NIC_REG_2(0x020),
  401. NETXEN_NIC_REG_2(0x024),
  402. NETXEN_NIC_REG_2(0x028),
  403. },
  404. /* sw_int_mask */
  405. {
  406. CRB_SW_INT_MASK_1,
  407. NETXEN_NIC_REG_2(0x064),
  408. NETXEN_NIC_REG_2(0x068),
  409. NETXEN_NIC_REG_2(0x06c),
  410. },
  411. },
  412. /* Instance 2 */
  413. {
  414. /* crb_rcv_producer: */
  415. {
  416. NETXEN_NIC_REG(0x1d8),
  417. /* Jumbo frames */
  418. NETXEN_NIC_REG(0x1f8),
  419. /* LRO */
  420. NETXEN_NIC_REG(0x208)
  421. },
  422. /* crb_sts_consumer: */
  423. {
  424. NETXEN_NIC_REG(0x220),
  425. NETXEN_NIC_REG_2(0x03c),
  426. NETXEN_NIC_REG_2(0x03c),
  427. NETXEN_NIC_REG_2(0x03c),
  428. },
  429. /* sw_int_mask */
  430. {
  431. CRB_SW_INT_MASK_2,
  432. NETXEN_NIC_REG_2(0x03c),
  433. NETXEN_NIC_REG_2(0x03c),
  434. NETXEN_NIC_REG_2(0x03c),
  435. },
  436. },
  437. /* Instance 3 */
  438. {
  439. /* crb_rcv_producer: */
  440. {
  441. NETXEN_NIC_REG(0x22c),
  442. /* Jumbo frames */
  443. NETXEN_NIC_REG(0x23c),
  444. /* LRO */
  445. NETXEN_NIC_REG(0x24c)
  446. },
  447. /* crb_sts_consumer: */
  448. {
  449. NETXEN_NIC_REG(0x264),
  450. NETXEN_NIC_REG_2(0x03c),
  451. NETXEN_NIC_REG_2(0x03c),
  452. NETXEN_NIC_REG_2(0x03c),
  453. },
  454. /* sw_int_mask */
  455. {
  456. CRB_SW_INT_MASK_3,
  457. NETXEN_NIC_REG_2(0x03c),
  458. NETXEN_NIC_REG_2(0x03c),
  459. NETXEN_NIC_REG_2(0x03c),
  460. },
  461. },
  462. };
  463. static int
  464. netxen_init_old_ctx(struct netxen_adapter *adapter)
  465. {
  466. struct netxen_recv_context *recv_ctx;
  467. struct nx_host_rds_ring *rds_ring;
  468. struct nx_host_sds_ring *sds_ring;
  469. struct nx_host_tx_ring *tx_ring;
  470. int ring;
  471. int port = adapter->portnum;
  472. struct netxen_ring_ctx *hwctx;
  473. u32 signature;
  474. tx_ring = adapter->tx_ring;
  475. recv_ctx = &adapter->recv_ctx;
  476. hwctx = recv_ctx->hwctx;
  477. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  478. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  479. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  480. rds_ring = &recv_ctx->rds_rings[ring];
  481. hwctx->rcv_rings[ring].addr =
  482. cpu_to_le64(rds_ring->phys_addr);
  483. hwctx->rcv_rings[ring].size =
  484. cpu_to_le32(rds_ring->num_desc);
  485. }
  486. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  487. sds_ring = &recv_ctx->sds_rings[ring];
  488. if (ring == 0) {
  489. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  490. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  491. }
  492. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  493. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  494. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  495. }
  496. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  497. signature = (adapter->max_sds_rings > 1) ?
  498. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  499. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  500. lower32(recv_ctx->phys_addr));
  501. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  502. upper32(recv_ctx->phys_addr));
  503. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  504. signature | port);
  505. return 0;
  506. }
  507. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  508. {
  509. void *addr;
  510. int err = 0;
  511. int ring;
  512. struct netxen_recv_context *recv_ctx;
  513. struct nx_host_rds_ring *rds_ring;
  514. struct nx_host_sds_ring *sds_ring;
  515. struct nx_host_tx_ring *tx_ring;
  516. struct pci_dev *pdev = adapter->pdev;
  517. struct net_device *netdev = adapter->netdev;
  518. int port = adapter->portnum;
  519. recv_ctx = &adapter->recv_ctx;
  520. tx_ring = adapter->tx_ring;
  521. addr = pci_alloc_consistent(pdev,
  522. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  523. &recv_ctx->phys_addr);
  524. if (addr == NULL) {
  525. dev_err(&pdev->dev, "failed to allocate hw context\n");
  526. return -ENOMEM;
  527. }
  528. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  529. recv_ctx->hwctx = addr;
  530. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  531. recv_ctx->hwctx->cmd_consumer_offset =
  532. cpu_to_le64(recv_ctx->phys_addr +
  533. sizeof(struct netxen_ring_ctx));
  534. tx_ring->hw_consumer =
  535. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  536. /* cmd desc ring */
  537. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  538. &tx_ring->phys_addr);
  539. if (addr == NULL) {
  540. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  541. netdev->name);
  542. err = -ENOMEM;
  543. goto err_out_free;
  544. }
  545. tx_ring->desc_head = addr;
  546. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  547. rds_ring = &recv_ctx->rds_rings[ring];
  548. addr = pci_alloc_consistent(adapter->pdev,
  549. RCV_DESC_RINGSIZE(rds_ring),
  550. &rds_ring->phys_addr);
  551. if (addr == NULL) {
  552. dev_err(&pdev->dev,
  553. "%s: failed to allocate rds ring [%d]\n",
  554. netdev->name, ring);
  555. err = -ENOMEM;
  556. goto err_out_free;
  557. }
  558. rds_ring->desc_head = addr;
  559. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  560. rds_ring->crb_rcv_producer =
  561. netxen_get_ioaddr(adapter,
  562. recv_crb_registers[port].crb_rcv_producer[ring]);
  563. }
  564. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  565. sds_ring = &recv_ctx->sds_rings[ring];
  566. addr = pci_alloc_consistent(adapter->pdev,
  567. STATUS_DESC_RINGSIZE(sds_ring),
  568. &sds_ring->phys_addr);
  569. if (addr == NULL) {
  570. dev_err(&pdev->dev,
  571. "%s: failed to allocate sds ring [%d]\n",
  572. netdev->name, ring);
  573. err = -ENOMEM;
  574. goto err_out_free;
  575. }
  576. sds_ring->desc_head = addr;
  577. if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  578. sds_ring->crb_sts_consumer =
  579. netxen_get_ioaddr(adapter,
  580. recv_crb_registers[port].crb_sts_consumer[ring]);
  581. sds_ring->crb_intr_mask =
  582. netxen_get_ioaddr(adapter,
  583. recv_crb_registers[port].sw_int_mask[ring]);
  584. }
  585. }
  586. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  587. if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
  588. goto done;
  589. err = nx_fw_cmd_create_rx_ctx(adapter);
  590. if (err)
  591. goto err_out_free;
  592. err = nx_fw_cmd_create_tx_ctx(adapter);
  593. if (err)
  594. goto err_out_free;
  595. } else {
  596. err = netxen_init_old_ctx(adapter);
  597. if (err)
  598. goto err_out_free;
  599. }
  600. done:
  601. return 0;
  602. err_out_free:
  603. netxen_free_hw_resources(adapter);
  604. return err;
  605. }
  606. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  607. {
  608. struct netxen_recv_context *recv_ctx;
  609. struct nx_host_rds_ring *rds_ring;
  610. struct nx_host_sds_ring *sds_ring;
  611. struct nx_host_tx_ring *tx_ring;
  612. int ring;
  613. int port = adapter->portnum;
  614. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  615. if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
  616. goto done;
  617. nx_fw_cmd_destroy_rx_ctx(adapter);
  618. nx_fw_cmd_destroy_tx_ctx(adapter);
  619. } else {
  620. netxen_api_lock(adapter);
  621. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  622. NETXEN_CTX_D3_RESET | port);
  623. netxen_api_unlock(adapter);
  624. }
  625. /* Allow dma queues to drain after context reset */
  626. msleep(20);
  627. done:
  628. recv_ctx = &adapter->recv_ctx;
  629. if (recv_ctx->hwctx != NULL) {
  630. pci_free_consistent(adapter->pdev,
  631. sizeof(struct netxen_ring_ctx) +
  632. sizeof(uint32_t),
  633. recv_ctx->hwctx,
  634. recv_ctx->phys_addr);
  635. recv_ctx->hwctx = NULL;
  636. }
  637. tx_ring = adapter->tx_ring;
  638. if (tx_ring->desc_head != NULL) {
  639. pci_free_consistent(adapter->pdev,
  640. TX_DESC_RINGSIZE(tx_ring),
  641. tx_ring->desc_head, tx_ring->phys_addr);
  642. tx_ring->desc_head = NULL;
  643. }
  644. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  645. rds_ring = &recv_ctx->rds_rings[ring];
  646. if (rds_ring->desc_head != NULL) {
  647. pci_free_consistent(adapter->pdev,
  648. RCV_DESC_RINGSIZE(rds_ring),
  649. rds_ring->desc_head,
  650. rds_ring->phys_addr);
  651. rds_ring->desc_head = NULL;
  652. }
  653. }
  654. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  655. sds_ring = &recv_ctx->sds_rings[ring];
  656. if (sds_ring->desc_head != NULL) {
  657. pci_free_consistent(adapter->pdev,
  658. STATUS_DESC_RINGSIZE(sds_ring),
  659. sds_ring->desc_head,
  660. sds_ring->phys_addr);
  661. sds_ring->desc_head = NULL;
  662. }
  663. }
  664. }