be_cmds.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /**
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_mgmt.h"
  19. #include "be_main.h"
  20. static void be_mcc_notify(struct beiscsi_hba *phba)
  21. {
  22. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  23. u32 val = 0;
  24. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  25. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  26. iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
  27. }
  28. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  29. {
  30. if (compl->flags != 0) {
  31. compl->flags = le32_to_cpu(compl->flags);
  32. WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  33. return true;
  34. } else
  35. return false;
  36. }
  37. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  38. {
  39. compl->flags = 0;
  40. }
  41. static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
  42. struct be_mcc_compl *compl)
  43. {
  44. u16 compl_status, extd_status;
  45. be_dws_le_to_cpu(compl, 4);
  46. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  47. CQE_STATUS_COMPL_MASK;
  48. if (compl_status != MCC_STATUS_SUCCESS) {
  49. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  50. CQE_STATUS_EXTD_MASK;
  51. dev_err(&ctrl->pdev->dev,
  52. "error in cmd completion: status(compl/extd)=%d/%d\n",
  53. compl_status, extd_status);
  54. return -1;
  55. }
  56. return 0;
  57. }
  58. static inline bool is_link_state_evt(u32 trailer)
  59. {
  60. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  61. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  62. ASYNC_EVENT_CODE_LINK_STATE);
  63. }
  64. static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
  65. {
  66. struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
  67. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  68. if (be_mcc_compl_is_new(compl)) {
  69. queue_tail_inc(mcc_cq);
  70. return compl;
  71. }
  72. return NULL;
  73. }
  74. static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
  75. {
  76. iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
  77. }
  78. static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
  79. struct be_async_event_link_state *evt)
  80. {
  81. switch (evt->port_link_status) {
  82. case ASYNC_EVENT_LINK_DOWN:
  83. SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
  84. evt->physical_port);
  85. phba->state |= BE_ADAPTER_LINK_DOWN;
  86. break;
  87. case ASYNC_EVENT_LINK_UP:
  88. phba->state = BE_ADAPTER_UP;
  89. SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
  90. evt->physical_port);
  91. iscsi_host_for_each_session(phba->shost,
  92. be2iscsi_fail_session);
  93. break;
  94. default:
  95. SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
  96. "Physical Port %d \n",
  97. evt->port_link_status,
  98. evt->physical_port);
  99. }
  100. }
  101. static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
  102. u16 num_popped)
  103. {
  104. u32 val = 0;
  105. val |= qid & DB_CQ_RING_ID_MASK;
  106. if (arm)
  107. val |= 1 << DB_CQ_REARM_SHIFT;
  108. val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
  109. iowrite32(val, phba->db_va + DB_CQ_OFFSET);
  110. }
  111. int beiscsi_process_mcc(struct beiscsi_hba *phba)
  112. {
  113. struct be_mcc_compl *compl;
  114. int num = 0, status = 0;
  115. struct be_ctrl_info *ctrl = &phba->ctrl;
  116. spin_lock_bh(&phba->ctrl.mcc_cq_lock);
  117. while ((compl = be_mcc_compl_get(phba))) {
  118. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  119. /* Interpret flags as an async trailer */
  120. BUG_ON(!is_link_state_evt(compl->flags));
  121. /* Interpret compl as a async link evt */
  122. beiscsi_async_link_state_process(phba,
  123. (struct be_async_event_link_state *) compl);
  124. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  125. status = be_mcc_compl_process(ctrl, compl);
  126. atomic_dec(&phba->ctrl.mcc_obj.q.used);
  127. }
  128. be_mcc_compl_use(compl);
  129. num++;
  130. }
  131. if (num)
  132. beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
  133. spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
  134. return status;
  135. }
  136. /* Wait till no more pending mcc requests are present */
  137. static int be_mcc_wait_compl(struct beiscsi_hba *phba)
  138. {
  139. #define mcc_timeout 120000 /* 5s timeout */
  140. int i, status;
  141. for (i = 0; i < mcc_timeout; i++) {
  142. status = beiscsi_process_mcc(phba);
  143. if (status)
  144. return status;
  145. if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
  146. break;
  147. udelay(100);
  148. }
  149. if (i == mcc_timeout) {
  150. dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
  151. return -1;
  152. }
  153. return 0;
  154. }
  155. /* Notify MCC requests and wait for completion */
  156. int be_mcc_notify_wait(struct beiscsi_hba *phba)
  157. {
  158. be_mcc_notify(phba);
  159. return be_mcc_wait_compl(phba);
  160. }
  161. static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
  162. {
  163. #define long_delay 2000
  164. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  165. int cnt = 0, wait = 5; /* in usecs */
  166. u32 ready;
  167. do {
  168. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  169. if (ready)
  170. break;
  171. if (cnt > 6000000) {
  172. dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
  173. return -1;
  174. }
  175. if (cnt > 50) {
  176. wait = long_delay;
  177. mdelay(long_delay / 1000);
  178. } else
  179. udelay(wait);
  180. cnt += wait;
  181. } while (true);
  182. return 0;
  183. }
  184. int be_mbox_notify(struct be_ctrl_info *ctrl)
  185. {
  186. int status;
  187. u32 val = 0;
  188. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  189. struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
  190. struct be_mcc_mailbox *mbox = mbox_mem->va;
  191. struct be_mcc_compl *compl = &mbox->compl;
  192. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  193. val |= MPU_MAILBOX_DB_HI_MASK;
  194. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  195. iowrite32(val, db);
  196. status = be_mbox_db_ready_wait(ctrl);
  197. if (status != 0) {
  198. SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
  199. return status;
  200. }
  201. val = 0;
  202. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  203. val &= ~MPU_MAILBOX_DB_HI_MASK;
  204. val |= (u32) (mbox_mem->dma >> 4) << 2;
  205. iowrite32(val, db);
  206. status = be_mbox_db_ready_wait(ctrl);
  207. if (status != 0) {
  208. SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
  209. return status;
  210. }
  211. if (be_mcc_compl_is_new(compl)) {
  212. status = be_mcc_compl_process(ctrl, &mbox->compl);
  213. be_mcc_compl_use(compl);
  214. if (status) {
  215. SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
  216. return status;
  217. }
  218. } else {
  219. dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
  220. return -1;
  221. }
  222. return 0;
  223. }
  224. /*
  225. * Insert the mailbox address into the doorbell in two steps
  226. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  227. */
  228. static int be_mbox_notify_wait(struct beiscsi_hba *phba)
  229. {
  230. int status;
  231. u32 val = 0;
  232. void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
  233. struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
  234. struct be_mcc_mailbox *mbox = mbox_mem->va;
  235. struct be_mcc_compl *compl = &mbox->compl;
  236. struct be_ctrl_info *ctrl = &phba->ctrl;
  237. val |= MPU_MAILBOX_DB_HI_MASK;
  238. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  239. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  240. iowrite32(val, db);
  241. /* wait for ready to be set */
  242. status = be_mbox_db_ready_wait(ctrl);
  243. if (status != 0)
  244. return status;
  245. val = 0;
  246. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  247. val |= (u32)(mbox_mem->dma >> 4) << 2;
  248. iowrite32(val, db);
  249. status = be_mbox_db_ready_wait(ctrl);
  250. if (status != 0)
  251. return status;
  252. /* A cq entry has been made now */
  253. if (be_mcc_compl_is_new(compl)) {
  254. status = be_mcc_compl_process(ctrl, &mbox->compl);
  255. be_mcc_compl_use(compl);
  256. if (status)
  257. return status;
  258. } else {
  259. dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
  260. return -1;
  261. }
  262. return 0;
  263. }
  264. void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  265. bool embedded, u8 sge_cnt)
  266. {
  267. if (embedded)
  268. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  269. else
  270. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  271. MCC_WRB_SGE_CNT_SHIFT;
  272. wrb->payload_length = payload_len;
  273. be_dws_cpu_to_le(wrb, 8);
  274. }
  275. void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  276. u8 subsystem, u8 opcode, int cmd_len)
  277. {
  278. req_hdr->opcode = opcode;
  279. req_hdr->subsystem = subsystem;
  280. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  281. }
  282. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  283. struct be_dma_mem *mem)
  284. {
  285. int i, buf_pages;
  286. u64 dma = (u64) mem->dma;
  287. buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  288. for (i = 0; i < buf_pages; i++) {
  289. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  290. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  291. dma += PAGE_SIZE_4K;
  292. }
  293. }
  294. static u32 eq_delay_to_mult(u32 usec_delay)
  295. {
  296. #define MAX_INTR_RATE 651042
  297. const u32 round = 10;
  298. u32 multiplier;
  299. if (usec_delay == 0)
  300. multiplier = 0;
  301. else {
  302. u32 interrupt_rate = 1000000 / usec_delay;
  303. if (interrupt_rate == 0)
  304. multiplier = 1023;
  305. else {
  306. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  307. multiplier /= interrupt_rate;
  308. multiplier = (multiplier + round / 2) / round;
  309. multiplier = min(multiplier, (u32) 1023);
  310. }
  311. }
  312. return multiplier;
  313. }
  314. struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  315. {
  316. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  317. }
  318. struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
  319. {
  320. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  321. struct be_mcc_wrb *wrb;
  322. BUG_ON(atomic_read(&mccq->used) >= mccq->len);
  323. wrb = queue_head_node(mccq);
  324. queue_head_inc(mccq);
  325. atomic_inc(&mccq->used);
  326. memset(wrb, 0, sizeof(*wrb));
  327. return wrb;
  328. }
  329. int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
  330. struct be_queue_info *eq, int eq_delay)
  331. {
  332. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  333. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  334. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  335. struct be_dma_mem *q_mem = &eq->dma_mem;
  336. int status;
  337. SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
  338. spin_lock(&ctrl->mbox_lock);
  339. memset(wrb, 0, sizeof(*wrb));
  340. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  341. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  342. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  343. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  344. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  345. PCI_FUNC(ctrl->pdev->devfn));
  346. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  347. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  348. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  349. __ilog2_u32(eq->len / 256));
  350. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  351. eq_delay_to_mult(eq_delay));
  352. be_dws_cpu_to_le(req->context, sizeof(req->context));
  353. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  354. status = be_mbox_notify(ctrl);
  355. if (!status) {
  356. eq->id = le16_to_cpu(resp->eq_id);
  357. eq->created = true;
  358. }
  359. spin_unlock(&ctrl->mbox_lock);
  360. return status;
  361. }
  362. int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
  363. {
  364. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  365. int status;
  366. u8 *endian_check;
  367. SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
  368. spin_lock(&ctrl->mbox_lock);
  369. memset(wrb, 0, sizeof(*wrb));
  370. endian_check = (u8 *) wrb;
  371. *endian_check++ = 0xFF;
  372. *endian_check++ = 0x12;
  373. *endian_check++ = 0x34;
  374. *endian_check++ = 0xFF;
  375. *endian_check++ = 0xFF;
  376. *endian_check++ = 0x56;
  377. *endian_check++ = 0x78;
  378. *endian_check++ = 0xFF;
  379. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  380. status = be_mbox_notify(ctrl);
  381. if (status)
  382. SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
  383. spin_unlock(&ctrl->mbox_lock);
  384. return status;
  385. }
  386. int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
  387. struct be_queue_info *cq, struct be_queue_info *eq,
  388. bool sol_evts, bool no_delay, int coalesce_wm)
  389. {
  390. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  391. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  392. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  393. struct be_dma_mem *q_mem = &cq->dma_mem;
  394. void *ctxt = &req->context;
  395. int status;
  396. SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
  397. spin_lock(&ctrl->mbox_lock);
  398. memset(wrb, 0, sizeof(*wrb));
  399. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  400. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  401. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  402. if (!q_mem->va)
  403. SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
  404. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  405. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  406. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  407. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  408. __ilog2_u32(cq->len / 256));
  409. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  410. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  411. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  412. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  413. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  414. AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
  415. PCI_FUNC(ctrl->pdev->devfn));
  416. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  417. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  418. status = be_mbox_notify(ctrl);
  419. if (!status) {
  420. cq->id = le16_to_cpu(resp->cq_id);
  421. cq->created = true;
  422. } else
  423. SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
  424. status);
  425. spin_unlock(&ctrl->mbox_lock);
  426. return status;
  427. }
  428. static u32 be_encoded_q_len(int q_len)
  429. {
  430. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  431. if (len_encoded == 16)
  432. len_encoded = 0;
  433. return len_encoded;
  434. }
  435. int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
  436. struct be_queue_info *mccq,
  437. struct be_queue_info *cq)
  438. {
  439. struct be_mcc_wrb *wrb;
  440. struct be_cmd_req_mcc_create *req;
  441. struct be_dma_mem *q_mem = &mccq->dma_mem;
  442. struct be_ctrl_info *ctrl;
  443. void *ctxt;
  444. int status;
  445. spin_lock(&phba->ctrl.mbox_lock);
  446. ctrl = &phba->ctrl;
  447. wrb = wrb_from_mbox(&ctrl->mbox_mem);
  448. req = embedded_payload(wrb);
  449. ctxt = &req->context;
  450. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  451. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  452. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  453. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  454. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
  455. PCI_FUNC(phba->pcidev->devfn));
  456. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  457. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  458. be_encoded_q_len(mccq->len));
  459. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  460. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  461. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  462. status = be_mbox_notify_wait(phba);
  463. if (!status) {
  464. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  465. mccq->id = le16_to_cpu(resp->id);
  466. mccq->created = true;
  467. }
  468. spin_unlock(&phba->ctrl.mbox_lock);
  469. return status;
  470. }
  471. int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  472. int queue_type)
  473. {
  474. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  475. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  476. u8 subsys = 0, opcode = 0;
  477. int status;
  478. SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
  479. spin_lock(&ctrl->mbox_lock);
  480. memset(wrb, 0, sizeof(*wrb));
  481. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  482. switch (queue_type) {
  483. case QTYPE_EQ:
  484. subsys = CMD_SUBSYSTEM_COMMON;
  485. opcode = OPCODE_COMMON_EQ_DESTROY;
  486. break;
  487. case QTYPE_CQ:
  488. subsys = CMD_SUBSYSTEM_COMMON;
  489. opcode = OPCODE_COMMON_CQ_DESTROY;
  490. break;
  491. case QTYPE_MCCQ:
  492. subsys = CMD_SUBSYSTEM_COMMON;
  493. opcode = OPCODE_COMMON_MCC_DESTROY;
  494. break;
  495. case QTYPE_WRBQ:
  496. subsys = CMD_SUBSYSTEM_ISCSI;
  497. opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
  498. break;
  499. case QTYPE_DPDUQ:
  500. subsys = CMD_SUBSYSTEM_ISCSI;
  501. opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
  502. break;
  503. case QTYPE_SGL:
  504. subsys = CMD_SUBSYSTEM_ISCSI;
  505. opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
  506. break;
  507. default:
  508. spin_unlock(&ctrl->mbox_lock);
  509. BUG();
  510. return -1;
  511. }
  512. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  513. if (queue_type != QTYPE_SGL)
  514. req->id = cpu_to_le16(q->id);
  515. status = be_mbox_notify(ctrl);
  516. spin_unlock(&ctrl->mbox_lock);
  517. return status;
  518. }
  519. int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
  520. struct be_queue_info *cq,
  521. struct be_queue_info *dq, int length,
  522. int entry_size)
  523. {
  524. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  525. struct be_defq_create_req *req = embedded_payload(wrb);
  526. struct be_dma_mem *q_mem = &dq->dma_mem;
  527. void *ctxt = &req->context;
  528. int status;
  529. SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
  530. spin_lock(&ctrl->mbox_lock);
  531. memset(wrb, 0, sizeof(*wrb));
  532. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  533. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  534. OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
  535. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  536. AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
  537. AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
  538. 1);
  539. AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
  540. PCI_FUNC(ctrl->pdev->devfn));
  541. AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
  542. be_encoded_q_len(length / sizeof(struct phys_addr)));
  543. AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
  544. ctxt, entry_size);
  545. AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
  546. cq->id);
  547. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  548. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  549. status = be_mbox_notify(ctrl);
  550. if (!status) {
  551. struct be_defq_create_resp *resp = embedded_payload(wrb);
  552. dq->id = le16_to_cpu(resp->id);
  553. dq->created = true;
  554. }
  555. spin_unlock(&ctrl->mbox_lock);
  556. return status;
  557. }
  558. int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
  559. struct be_queue_info *wrbq)
  560. {
  561. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  562. struct be_wrbq_create_req *req = embedded_payload(wrb);
  563. struct be_wrbq_create_resp *resp = embedded_payload(wrb);
  564. int status;
  565. spin_lock(&ctrl->mbox_lock);
  566. memset(wrb, 0, sizeof(*wrb));
  567. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  568. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  569. OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
  570. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  571. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  572. status = be_mbox_notify(ctrl);
  573. if (!status) {
  574. wrbq->id = le16_to_cpu(resp->cid);
  575. wrbq->created = true;
  576. }
  577. spin_unlock(&ctrl->mbox_lock);
  578. return status;
  579. }
  580. int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
  581. struct be_dma_mem *q_mem,
  582. u32 page_offset, u32 num_pages)
  583. {
  584. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  585. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  586. int status;
  587. unsigned int curr_pages;
  588. u32 internal_page_offset = 0;
  589. u32 temp_num_pages = num_pages;
  590. if (num_pages == 0xff)
  591. num_pages = 1;
  592. spin_lock(&ctrl->mbox_lock);
  593. do {
  594. memset(wrb, 0, sizeof(*wrb));
  595. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  596. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  597. OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
  598. sizeof(*req));
  599. curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
  600. pages);
  601. req->num_pages = min(num_pages, curr_pages);
  602. req->page_offset = page_offset;
  603. be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
  604. q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
  605. internal_page_offset += req->num_pages;
  606. page_offset += req->num_pages;
  607. num_pages -= req->num_pages;
  608. if (temp_num_pages == 0xff)
  609. req->num_pages = temp_num_pages;
  610. status = be_mbox_notify(ctrl);
  611. if (status) {
  612. SE_DEBUG(DBG_LVL_1,
  613. "FW CMD to map iscsi frags failed.\n");
  614. goto error;
  615. }
  616. } while (num_pages > 0);
  617. error:
  618. spin_unlock(&ctrl->mbox_lock);
  619. if (status != 0)
  620. beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
  621. return status;
  622. }