netxen_nic_ctx.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. /*
  2. * Copyright (C) 2003 - 2009 NetXen, Inc.
  3. * Copyright (C) 2009 - QLogic Corporation.
  4. * All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  19. * MA 02111-1307, USA.
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called "COPYING".
  23. *
  24. */
  25. #include "netxen_nic_hw.h"
  26. #include "netxen_nic.h"
  27. #define NXHAL_VERSION 1
  28. static u32
  29. netxen_poll_rsp(struct netxen_adapter *adapter)
  30. {
  31. u32 rsp = NX_CDRP_RSP_OK;
  32. int timeout = 0;
  33. do {
  34. /* give atleast 1ms for firmware to respond */
  35. msleep(1);
  36. if (++timeout > NX_OS_CRB_RETRY_COUNT)
  37. return NX_CDRP_RSP_TIMEOUT;
  38. rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
  39. } while (!NX_CDRP_IS_RSP(rsp));
  40. return rsp;
  41. }
  42. static u32
  43. netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
  44. {
  45. u32 rsp;
  46. u32 signature = 0;
  47. u32 rcode = NX_RCODE_SUCCESS;
  48. signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
  49. NXHAL_VERSION);
  50. /* Acquire semaphore before accessing CRB */
  51. if (netxen_api_lock(adapter))
  52. return NX_RCODE_TIMEOUT;
  53. NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
  54. NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
  55. NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
  56. NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
  57. NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
  58. rsp = netxen_poll_rsp(adapter);
  59. if (rsp == NX_CDRP_RSP_TIMEOUT) {
  60. printk(KERN_ERR "%s: card response timeout.\n",
  61. netxen_nic_driver_name);
  62. rcode = NX_RCODE_TIMEOUT;
  63. } else if (rsp == NX_CDRP_RSP_FAIL) {
  64. rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  65. printk(KERN_ERR "%s: failed card response code:0x%x\n",
  66. netxen_nic_driver_name, rcode);
  67. } else if (rsp == NX_CDRP_RSP_OK) {
  68. cmd->rsp.cmd = NX_RCODE_SUCCESS;
  69. if (cmd->rsp.arg2)
  70. cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
  71. if (cmd->rsp.arg3)
  72. cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
  73. }
  74. if (cmd->rsp.arg1)
  75. cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
  76. /* Release semaphore */
  77. netxen_api_unlock(adapter);
  78. return rcode;
  79. }
  80. static int
  81. netxen_get_minidump_template_size(struct netxen_adapter *adapter)
  82. {
  83. struct netxen_cmd_args cmd;
  84. memset(&cmd, 0, sizeof(cmd));
  85. cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
  86. memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
  87. netxen_issue_cmd(adapter, &cmd);
  88. if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
  89. dev_info(&adapter->pdev->dev,
  90. "Can't get template size %d\n", cmd.rsp.cmd);
  91. return -EIO;
  92. }
  93. adapter->mdump.md_template_size = cmd.rsp.arg2;
  94. adapter->mdump.md_template_ver = cmd.rsp.arg3;
  95. return 0;
  96. }
  97. static int
  98. netxen_get_minidump_template(struct netxen_adapter *adapter)
  99. {
  100. dma_addr_t md_template_addr;
  101. void *addr;
  102. u32 size;
  103. struct netxen_cmd_args cmd;
  104. size = adapter->mdump.md_template_size;
  105. if (size == 0) {
  106. dev_err(&adapter->pdev->dev, "Can not capture Minidump "
  107. "template. Invalid template size.\n");
  108. return NX_RCODE_INVALID_ARGS;
  109. }
  110. addr = pci_alloc_consistent(adapter->pdev, size, &md_template_addr);
  111. if (!addr) {
  112. dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
  113. return -ENOMEM;
  114. }
  115. memset(addr, 0, size);
  116. memset(&cmd, 0, sizeof(cmd));
  117. memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
  118. cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
  119. cmd.req.arg1 = LSD(md_template_addr);
  120. cmd.req.arg2 = MSD(md_template_addr);
  121. cmd.req.arg3 |= size;
  122. netxen_issue_cmd(adapter, &cmd);
  123. if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
  124. memcpy(adapter->mdump.md_template, addr, size);
  125. } else {
  126. dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
  127. "err_code : %d, requested_size : %d, actual_size : %d\n ",
  128. cmd.rsp.cmd, size, cmd.rsp.arg2);
  129. }
  130. pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
  131. return 0;
  132. }
  133. static u32
  134. netxen_check_template_checksum(struct netxen_adapter *adapter)
  135. {
  136. u64 sum = 0 ;
  137. u32 *buff = adapter->mdump.md_template;
  138. int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
  139. while (count-- > 0)
  140. sum += *buff++ ;
  141. while (sum >> 32)
  142. sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
  143. return ~sum;
  144. }
  145. int
  146. netxen_setup_minidump(struct netxen_adapter *adapter)
  147. {
  148. int err = 0, i;
  149. u32 *template, *tmp_buf;
  150. struct netxen_minidump_template_hdr *hdr;
  151. err = netxen_get_minidump_template_size(adapter);
  152. if (err) {
  153. adapter->mdump.fw_supports_md = 0;
  154. if ((err == NX_RCODE_CMD_INVALID) ||
  155. (err == NX_RCODE_CMD_NOT_IMPL)) {
  156. dev_info(&adapter->pdev->dev,
  157. "Flashed firmware version does not support minidump, "
  158. "minimum version required is [ %u.%u.%u ].\n ",
  159. NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
  160. NX_MD_SUPPORT_SUBVERSION);
  161. }
  162. return err;
  163. }
  164. if (!adapter->mdump.md_template_size) {
  165. dev_err(&adapter->pdev->dev, "Error : Invalid template size "
  166. ",should be non-zero.\n");
  167. return -EIO;
  168. }
  169. adapter->mdump.md_template =
  170. kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
  171. if (!adapter->mdump.md_template) {
  172. dev_err(&adapter->pdev->dev, "Unable to allocate memory "
  173. "for minidump template.\n");
  174. return -ENOMEM;
  175. }
  176. err = netxen_get_minidump_template(adapter);
  177. if (err) {
  178. if (err == NX_RCODE_CMD_NOT_IMPL)
  179. adapter->mdump.fw_supports_md = 0;
  180. goto free_template;
  181. }
  182. if (netxen_check_template_checksum(adapter)) {
  183. dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
  184. err = -EIO;
  185. goto free_template;
  186. }
  187. adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
  188. tmp_buf = (u32 *) adapter->mdump.md_template;
  189. template = (u32 *) adapter->mdump.md_template;
  190. for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
  191. *template++ = __le32_to_cpu(*tmp_buf++);
  192. hdr = (struct netxen_minidump_template_hdr *)
  193. adapter->mdump.md_template;
  194. adapter->mdump.md_capture_buff = NULL;
  195. adapter->mdump.fw_supports_md = 1;
  196. adapter->mdump.md_enabled = 1;
  197. return err;
  198. free_template:
  199. kfree(adapter->mdump.md_template);
  200. adapter->mdump.md_template = NULL;
  201. return err;
  202. }
  203. int
  204. nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
  205. {
  206. u32 rcode = NX_RCODE_SUCCESS;
  207. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  208. struct netxen_cmd_args cmd;
  209. memset(&cmd, 0, sizeof(cmd));
  210. cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
  211. cmd.req.arg1 = recv_ctx->context_id;
  212. cmd.req.arg2 = mtu;
  213. cmd.req.arg3 = 0;
  214. if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
  215. netxen_issue_cmd(adapter, &cmd);
  216. if (rcode != NX_RCODE_SUCCESS)
  217. return -EIO;
  218. return 0;
  219. }
  220. int
  221. nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
  222. u32 speed, u32 duplex, u32 autoneg)
  223. {
  224. struct netxen_cmd_args cmd;
  225. memset(&cmd, 0, sizeof(cmd));
  226. cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
  227. cmd.req.arg1 = speed;
  228. cmd.req.arg2 = duplex;
  229. cmd.req.arg3 = autoneg;
  230. return netxen_issue_cmd(adapter, &cmd);
  231. }
  232. static int
  233. nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
  234. {
  235. void *addr;
  236. nx_hostrq_rx_ctx_t *prq;
  237. nx_cardrsp_rx_ctx_t *prsp;
  238. nx_hostrq_rds_ring_t *prq_rds;
  239. nx_hostrq_sds_ring_t *prq_sds;
  240. nx_cardrsp_rds_ring_t *prsp_rds;
  241. nx_cardrsp_sds_ring_t *prsp_sds;
  242. struct nx_host_rds_ring *rds_ring;
  243. struct nx_host_sds_ring *sds_ring;
  244. struct netxen_cmd_args cmd;
  245. dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
  246. u64 phys_addr;
  247. int i, nrds_rings, nsds_rings;
  248. size_t rq_size, rsp_size;
  249. u32 cap, reg, val;
  250. int err;
  251. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  252. nrds_rings = adapter->max_rds_rings;
  253. nsds_rings = adapter->max_sds_rings;
  254. rq_size =
  255. SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
  256. rsp_size =
  257. SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
  258. addr = pci_alloc_consistent(adapter->pdev,
  259. rq_size, &hostrq_phys_addr);
  260. if (addr == NULL)
  261. return -ENOMEM;
  262. prq = addr;
  263. addr = pci_alloc_consistent(adapter->pdev,
  264. rsp_size, &cardrsp_phys_addr);
  265. if (addr == NULL) {
  266. err = -ENOMEM;
  267. goto out_free_rq;
  268. }
  269. prsp = addr;
  270. prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
  271. cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
  272. cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
  273. prq->capabilities[0] = cpu_to_le32(cap);
  274. prq->host_int_crb_mode =
  275. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  276. prq->host_rds_crb_mode =
  277. cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
  278. prq->num_rds_rings = cpu_to_le16(nrds_rings);
  279. prq->num_sds_rings = cpu_to_le16(nsds_rings);
  280. prq->rds_ring_offset = cpu_to_le32(0);
  281. val = le32_to_cpu(prq->rds_ring_offset) +
  282. (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
  283. prq->sds_ring_offset = cpu_to_le32(val);
  284. prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
  285. le32_to_cpu(prq->rds_ring_offset));
  286. for (i = 0; i < nrds_rings; i++) {
  287. rds_ring = &recv_ctx->rds_rings[i];
  288. prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
  289. prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
  290. prq_rds[i].ring_kind = cpu_to_le32(i);
  291. prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
  292. }
  293. prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
  294. le32_to_cpu(prq->sds_ring_offset));
  295. for (i = 0; i < nsds_rings; i++) {
  296. sds_ring = &recv_ctx->sds_rings[i];
  297. prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
  298. prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
  299. prq_sds[i].msi_index = cpu_to_le16(i);
  300. }
  301. phys_addr = hostrq_phys_addr;
  302. memset(&cmd, 0, sizeof(cmd));
  303. cmd.req.arg1 = (u32)(phys_addr >> 32);
  304. cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
  305. cmd.req.arg3 = rq_size;
  306. cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
  307. err = netxen_issue_cmd(adapter, &cmd);
  308. if (err) {
  309. printk(KERN_WARNING
  310. "Failed to create rx ctx in firmware%d\n", err);
  311. goto out_free_rsp;
  312. }
  313. prsp_rds = ((nx_cardrsp_rds_ring_t *)
  314. &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
  315. for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
  316. rds_ring = &recv_ctx->rds_rings[i];
  317. reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
  318. rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
  319. NETXEN_NIC_REG(reg - 0x200));
  320. }
  321. prsp_sds = ((nx_cardrsp_sds_ring_t *)
  322. &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
  323. for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
  324. sds_ring = &recv_ctx->sds_rings[i];
  325. reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
  326. sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
  327. NETXEN_NIC_REG(reg - 0x200));
  328. reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
  329. sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
  330. NETXEN_NIC_REG(reg - 0x200));
  331. }
  332. recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
  333. recv_ctx->context_id = le16_to_cpu(prsp->context_id);
  334. recv_ctx->virt_port = prsp->virt_port;
  335. out_free_rsp:
  336. pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
  337. out_free_rq:
  338. pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
  339. return err;
  340. }
  341. static void
  342. nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
  343. {
  344. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  345. struct netxen_cmd_args cmd;
  346. memset(&cmd, 0, sizeof(cmd));
  347. cmd.req.arg1 = recv_ctx->context_id;
  348. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  349. cmd.req.arg3 = 0;
  350. cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
  351. if (netxen_issue_cmd(adapter, &cmd)) {
  352. printk(KERN_WARNING
  353. "%s: Failed to destroy rx ctx in firmware\n",
  354. netxen_nic_driver_name);
  355. }
  356. }
  357. static int
  358. nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
  359. {
  360. nx_hostrq_tx_ctx_t *prq;
  361. nx_hostrq_cds_ring_t *prq_cds;
  362. nx_cardrsp_tx_ctx_t *prsp;
  363. void *rq_addr, *rsp_addr;
  364. size_t rq_size, rsp_size;
  365. u32 temp;
  366. int err = 0;
  367. u64 offset, phys_addr;
  368. dma_addr_t rq_phys_addr, rsp_phys_addr;
  369. struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
  370. struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
  371. struct netxen_cmd_args cmd;
  372. rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
  373. rq_addr = pci_alloc_consistent(adapter->pdev,
  374. rq_size, &rq_phys_addr);
  375. if (!rq_addr)
  376. return -ENOMEM;
  377. rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
  378. rsp_addr = pci_alloc_consistent(adapter->pdev,
  379. rsp_size, &rsp_phys_addr);
  380. if (!rsp_addr) {
  381. err = -ENOMEM;
  382. goto out_free_rq;
  383. }
  384. memset(rq_addr, 0, rq_size);
  385. prq = rq_addr;
  386. memset(rsp_addr, 0, rsp_size);
  387. prsp = rsp_addr;
  388. prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
  389. temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
  390. prq->capabilities[0] = cpu_to_le32(temp);
  391. prq->host_int_crb_mode =
  392. cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
  393. prq->interrupt_ctl = 0;
  394. prq->msi_index = 0;
  395. prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
  396. offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
  397. prq->cmd_cons_dma_addr = cpu_to_le64(offset);
  398. prq_cds = &prq->cds_ring;
  399. prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
  400. prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
  401. phys_addr = rq_phys_addr;
  402. memset(&cmd, 0, sizeof(cmd));
  403. cmd.req.arg1 = (u32)(phys_addr >> 32);
  404. cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
  405. cmd.req.arg3 = rq_size;
  406. cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
  407. err = netxen_issue_cmd(adapter, &cmd);
  408. if (err == NX_RCODE_SUCCESS) {
  409. temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
  410. tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
  411. NETXEN_NIC_REG(temp - 0x200));
  412. #if 0
  413. adapter->tx_state =
  414. le32_to_cpu(prsp->host_ctx_state);
  415. #endif
  416. adapter->tx_context_id =
  417. le16_to_cpu(prsp->context_id);
  418. } else {
  419. printk(KERN_WARNING
  420. "Failed to create tx ctx in firmware%d\n", err);
  421. err = -EIO;
  422. }
  423. pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
  424. out_free_rq:
  425. pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
  426. return err;
  427. }
  428. static void
  429. nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
  430. {
  431. struct netxen_cmd_args cmd;
  432. memset(&cmd, 0, sizeof(cmd));
  433. cmd.req.arg1 = adapter->tx_context_id;
  434. cmd.req.arg2 = NX_DESTROY_CTX_RESET;
  435. cmd.req.arg3 = 0;
  436. cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
  437. if (netxen_issue_cmd(adapter, &cmd)) {
  438. printk(KERN_WARNING
  439. "%s: Failed to destroy tx ctx in firmware\n",
  440. netxen_nic_driver_name);
  441. }
  442. }
  443. int
  444. nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
  445. {
  446. u32 rcode;
  447. struct netxen_cmd_args cmd;
  448. memset(&cmd, 0, sizeof(cmd));
  449. cmd.req.arg1 = reg;
  450. cmd.req.arg2 = 0;
  451. cmd.req.arg3 = 0;
  452. cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
  453. cmd.rsp.arg1 = 1;
  454. rcode = netxen_issue_cmd(adapter, &cmd);
  455. if (rcode != NX_RCODE_SUCCESS)
  456. return -EIO;
  457. if (val == NULL)
  458. return -EIO;
  459. *val = cmd.rsp.arg1;
  460. return 0;
  461. }
  462. int
  463. nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
  464. {
  465. u32 rcode;
  466. struct netxen_cmd_args cmd;
  467. memset(&cmd, 0, sizeof(cmd));
  468. cmd.req.arg1 = reg;
  469. cmd.req.arg2 = val;
  470. cmd.req.arg3 = 0;
  471. cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
  472. rcode = netxen_issue_cmd(adapter, &cmd);
  473. if (rcode != NX_RCODE_SUCCESS)
  474. return -EIO;
  475. return 0;
  476. }
  477. static u64 ctx_addr_sig_regs[][3] = {
  478. {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
  479. {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
  480. {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
  481. {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
  482. };
  483. #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
  484. #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
  485. #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
  486. #define lower32(x) ((u32)((x) & 0xffffffff))
  487. #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
  488. static struct netxen_recv_crb recv_crb_registers[] = {
  489. /* Instance 0 */
  490. {
  491. /* crb_rcv_producer: */
  492. {
  493. NETXEN_NIC_REG(0x100),
  494. /* Jumbo frames */
  495. NETXEN_NIC_REG(0x110),
  496. /* LRO */
  497. NETXEN_NIC_REG(0x120)
  498. },
  499. /* crb_sts_consumer: */
  500. {
  501. NETXEN_NIC_REG(0x138),
  502. NETXEN_NIC_REG_2(0x000),
  503. NETXEN_NIC_REG_2(0x004),
  504. NETXEN_NIC_REG_2(0x008),
  505. },
  506. /* sw_int_mask */
  507. {
  508. CRB_SW_INT_MASK_0,
  509. NETXEN_NIC_REG_2(0x044),
  510. NETXEN_NIC_REG_2(0x048),
  511. NETXEN_NIC_REG_2(0x04c),
  512. },
  513. },
  514. /* Instance 1 */
  515. {
  516. /* crb_rcv_producer: */
  517. {
  518. NETXEN_NIC_REG(0x144),
  519. /* Jumbo frames */
  520. NETXEN_NIC_REG(0x154),
  521. /* LRO */
  522. NETXEN_NIC_REG(0x164)
  523. },
  524. /* crb_sts_consumer: */
  525. {
  526. NETXEN_NIC_REG(0x17c),
  527. NETXEN_NIC_REG_2(0x020),
  528. NETXEN_NIC_REG_2(0x024),
  529. NETXEN_NIC_REG_2(0x028),
  530. },
  531. /* sw_int_mask */
  532. {
  533. CRB_SW_INT_MASK_1,
  534. NETXEN_NIC_REG_2(0x064),
  535. NETXEN_NIC_REG_2(0x068),
  536. NETXEN_NIC_REG_2(0x06c),
  537. },
  538. },
  539. /* Instance 2 */
  540. {
  541. /* crb_rcv_producer: */
  542. {
  543. NETXEN_NIC_REG(0x1d8),
  544. /* Jumbo frames */
  545. NETXEN_NIC_REG(0x1f8),
  546. /* LRO */
  547. NETXEN_NIC_REG(0x208)
  548. },
  549. /* crb_sts_consumer: */
  550. {
  551. NETXEN_NIC_REG(0x220),
  552. NETXEN_NIC_REG_2(0x03c),
  553. NETXEN_NIC_REG_2(0x03c),
  554. NETXEN_NIC_REG_2(0x03c),
  555. },
  556. /* sw_int_mask */
  557. {
  558. CRB_SW_INT_MASK_2,
  559. NETXEN_NIC_REG_2(0x03c),
  560. NETXEN_NIC_REG_2(0x03c),
  561. NETXEN_NIC_REG_2(0x03c),
  562. },
  563. },
  564. /* Instance 3 */
  565. {
  566. /* crb_rcv_producer: */
  567. {
  568. NETXEN_NIC_REG(0x22c),
  569. /* Jumbo frames */
  570. NETXEN_NIC_REG(0x23c),
  571. /* LRO */
  572. NETXEN_NIC_REG(0x24c)
  573. },
  574. /* crb_sts_consumer: */
  575. {
  576. NETXEN_NIC_REG(0x264),
  577. NETXEN_NIC_REG_2(0x03c),
  578. NETXEN_NIC_REG_2(0x03c),
  579. NETXEN_NIC_REG_2(0x03c),
  580. },
  581. /* sw_int_mask */
  582. {
  583. CRB_SW_INT_MASK_3,
  584. NETXEN_NIC_REG_2(0x03c),
  585. NETXEN_NIC_REG_2(0x03c),
  586. NETXEN_NIC_REG_2(0x03c),
  587. },
  588. },
  589. };
  590. static int
  591. netxen_init_old_ctx(struct netxen_adapter *adapter)
  592. {
  593. struct netxen_recv_context *recv_ctx;
  594. struct nx_host_rds_ring *rds_ring;
  595. struct nx_host_sds_ring *sds_ring;
  596. struct nx_host_tx_ring *tx_ring;
  597. int ring;
  598. int port = adapter->portnum;
  599. struct netxen_ring_ctx *hwctx;
  600. u32 signature;
  601. tx_ring = adapter->tx_ring;
  602. recv_ctx = &adapter->recv_ctx;
  603. hwctx = recv_ctx->hwctx;
  604. hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
  605. hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
  606. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  607. rds_ring = &recv_ctx->rds_rings[ring];
  608. hwctx->rcv_rings[ring].addr =
  609. cpu_to_le64(rds_ring->phys_addr);
  610. hwctx->rcv_rings[ring].size =
  611. cpu_to_le32(rds_ring->num_desc);
  612. }
  613. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  614. sds_ring = &recv_ctx->sds_rings[ring];
  615. if (ring == 0) {
  616. hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
  617. hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
  618. }
  619. hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
  620. hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
  621. hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
  622. }
  623. hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
  624. signature = (adapter->max_sds_rings > 1) ?
  625. NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
  626. NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
  627. lower32(recv_ctx->phys_addr));
  628. NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
  629. upper32(recv_ctx->phys_addr));
  630. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  631. signature | port);
  632. return 0;
  633. }
  634. int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
  635. {
  636. void *addr;
  637. int err = 0;
  638. int ring;
  639. struct netxen_recv_context *recv_ctx;
  640. struct nx_host_rds_ring *rds_ring;
  641. struct nx_host_sds_ring *sds_ring;
  642. struct nx_host_tx_ring *tx_ring;
  643. struct pci_dev *pdev = adapter->pdev;
  644. struct net_device *netdev = adapter->netdev;
  645. int port = adapter->portnum;
  646. recv_ctx = &adapter->recv_ctx;
  647. tx_ring = adapter->tx_ring;
  648. addr = pci_alloc_consistent(pdev,
  649. sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
  650. &recv_ctx->phys_addr);
  651. if (addr == NULL) {
  652. dev_err(&pdev->dev, "failed to allocate hw context\n");
  653. return -ENOMEM;
  654. }
  655. memset(addr, 0, sizeof(struct netxen_ring_ctx));
  656. recv_ctx->hwctx = addr;
  657. recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
  658. recv_ctx->hwctx->cmd_consumer_offset =
  659. cpu_to_le64(recv_ctx->phys_addr +
  660. sizeof(struct netxen_ring_ctx));
  661. tx_ring->hw_consumer =
  662. (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
  663. /* cmd desc ring */
  664. addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
  665. &tx_ring->phys_addr);
  666. if (addr == NULL) {
  667. dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
  668. netdev->name);
  669. err = -ENOMEM;
  670. goto err_out_free;
  671. }
  672. tx_ring->desc_head = addr;
  673. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  674. rds_ring = &recv_ctx->rds_rings[ring];
  675. addr = pci_alloc_consistent(adapter->pdev,
  676. RCV_DESC_RINGSIZE(rds_ring),
  677. &rds_ring->phys_addr);
  678. if (addr == NULL) {
  679. dev_err(&pdev->dev,
  680. "%s: failed to allocate rds ring [%d]\n",
  681. netdev->name, ring);
  682. err = -ENOMEM;
  683. goto err_out_free;
  684. }
  685. rds_ring->desc_head = addr;
  686. if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
  687. rds_ring->crb_rcv_producer =
  688. netxen_get_ioaddr(adapter,
  689. recv_crb_registers[port].crb_rcv_producer[ring]);
  690. }
  691. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  692. sds_ring = &recv_ctx->sds_rings[ring];
  693. addr = pci_alloc_consistent(adapter->pdev,
  694. STATUS_DESC_RINGSIZE(sds_ring),
  695. &sds_ring->phys_addr);
  696. if (addr == NULL) {
  697. dev_err(&pdev->dev,
  698. "%s: failed to allocate sds ring [%d]\n",
  699. netdev->name, ring);
  700. err = -ENOMEM;
  701. goto err_out_free;
  702. }
  703. sds_ring->desc_head = addr;
  704. if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  705. sds_ring->crb_sts_consumer =
  706. netxen_get_ioaddr(adapter,
  707. recv_crb_registers[port].crb_sts_consumer[ring]);
  708. sds_ring->crb_intr_mask =
  709. netxen_get_ioaddr(adapter,
  710. recv_crb_registers[port].sw_int_mask[ring]);
  711. }
  712. }
  713. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  714. if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
  715. goto done;
  716. err = nx_fw_cmd_create_rx_ctx(adapter);
  717. if (err)
  718. goto err_out_free;
  719. err = nx_fw_cmd_create_tx_ctx(adapter);
  720. if (err)
  721. goto err_out_free;
  722. } else {
  723. err = netxen_init_old_ctx(adapter);
  724. if (err)
  725. goto err_out_free;
  726. }
  727. done:
  728. return 0;
  729. err_out_free:
  730. netxen_free_hw_resources(adapter);
  731. return err;
  732. }
  733. void netxen_free_hw_resources(struct netxen_adapter *adapter)
  734. {
  735. struct netxen_recv_context *recv_ctx;
  736. struct nx_host_rds_ring *rds_ring;
  737. struct nx_host_sds_ring *sds_ring;
  738. struct nx_host_tx_ring *tx_ring;
  739. int ring;
  740. int port = adapter->portnum;
  741. if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
  742. if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
  743. goto done;
  744. nx_fw_cmd_destroy_rx_ctx(adapter);
  745. nx_fw_cmd_destroy_tx_ctx(adapter);
  746. } else {
  747. netxen_api_lock(adapter);
  748. NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
  749. NETXEN_CTX_D3_RESET | port);
  750. netxen_api_unlock(adapter);
  751. }
  752. /* Allow dma queues to drain after context reset */
  753. msleep(20);
  754. done:
  755. recv_ctx = &adapter->recv_ctx;
  756. if (recv_ctx->hwctx != NULL) {
  757. pci_free_consistent(adapter->pdev,
  758. sizeof(struct netxen_ring_ctx) +
  759. sizeof(uint32_t),
  760. recv_ctx->hwctx,
  761. recv_ctx->phys_addr);
  762. recv_ctx->hwctx = NULL;
  763. }
  764. tx_ring = adapter->tx_ring;
  765. if (tx_ring->desc_head != NULL) {
  766. pci_free_consistent(adapter->pdev,
  767. TX_DESC_RINGSIZE(tx_ring),
  768. tx_ring->desc_head, tx_ring->phys_addr);
  769. tx_ring->desc_head = NULL;
  770. }
  771. for (ring = 0; ring < adapter->max_rds_rings; ring++) {
  772. rds_ring = &recv_ctx->rds_rings[ring];
  773. if (rds_ring->desc_head != NULL) {
  774. pci_free_consistent(adapter->pdev,
  775. RCV_DESC_RINGSIZE(rds_ring),
  776. rds_ring->desc_head,
  777. rds_ring->phys_addr);
  778. rds_ring->desc_head = NULL;
  779. }
  780. }
  781. for (ring = 0; ring < adapter->max_sds_rings; ring++) {
  782. sds_ring = &recv_ctx->sds_rings[ring];
  783. if (sds_ring->desc_head != NULL) {
  784. pci_free_consistent(adapter->pdev,
  785. STATUS_DESC_RINGSIZE(sds_ring),
  786. sds_ring->desc_head,
  787. sds_ring->phys_addr);
  788. sds_ring->desc_head = NULL;
  789. }
  790. }
  791. }