bfa_msgq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. /* MSGQ module source file. */
  19. #include "bfi.h"
  20. #include "bfa_msgq.h"
  21. #include "bfa_ioc.h"
  22. #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
  23. { \
  24. bfa_msgq_cmdcbfn_t cbfn; \
  25. void *cbarg; \
  26. cbfn = (_cmdq_ent)->cbfn; \
  27. cbarg = (_cmdq_ent)->cbarg; \
  28. (_cmdq_ent)->cbfn = NULL; \
  29. (_cmdq_ent)->cbarg = NULL; \
  30. if (cbfn) { \
  31. cbfn(cbarg, (_status)); \
  32. } \
  33. }
  34. static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
  35. static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
  36. enum cmdq_event {
  37. CMDQ_E_START = 1,
  38. CMDQ_E_STOP = 2,
  39. CMDQ_E_FAIL = 3,
  40. CMDQ_E_POST = 4,
  41. CMDQ_E_INIT_RESP = 5,
  42. CMDQ_E_DB_READY = 6,
  43. };
  44. bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
  45. bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
  46. bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
  47. bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
  48. enum cmdq_event);
  49. static void
  50. cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
  51. {
  52. struct bfa_msgq_cmd_entry *cmdq_ent;
  53. cmdq->producer_index = 0;
  54. cmdq->consumer_index = 0;
  55. cmdq->flags = 0;
  56. cmdq->token = 0;
  57. cmdq->offset = 0;
  58. cmdq->bytes_to_copy = 0;
  59. while (!list_empty(&cmdq->pending_q)) {
  60. bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
  61. bfa_q_qe_init(&cmdq_ent->qe);
  62. call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
  63. }
  64. }
  65. static void
  66. cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  67. {
  68. switch (event) {
  69. case CMDQ_E_START:
  70. bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
  71. break;
  72. case CMDQ_E_STOP:
  73. case CMDQ_E_FAIL:
  74. /* No-op */
  75. break;
  76. case CMDQ_E_POST:
  77. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  78. break;
  79. default:
  80. bfa_sm_fault(event);
  81. }
  82. }
  83. static void
  84. cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
  85. {
  86. bfa_wc_down(&cmdq->msgq->init_wc);
  87. }
  88. static void
  89. cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  90. {
  91. switch (event) {
  92. case CMDQ_E_STOP:
  93. case CMDQ_E_FAIL:
  94. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  95. break;
  96. case CMDQ_E_POST:
  97. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  98. break;
  99. case CMDQ_E_INIT_RESP:
  100. if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
  101. cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
  102. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  103. } else
  104. bfa_fsm_set_state(cmdq, cmdq_sm_ready);
  105. break;
  106. default:
  107. bfa_sm_fault(event);
  108. }
  109. }
  110. static void
  111. cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
  112. {
  113. }
  114. static void
  115. cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  116. {
  117. switch (event) {
  118. case CMDQ_E_STOP:
  119. case CMDQ_E_FAIL:
  120. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  121. break;
  122. case CMDQ_E_POST:
  123. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  124. break;
  125. default:
  126. bfa_sm_fault(event);
  127. }
  128. }
  129. static void
  130. cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
  131. {
  132. bfa_msgq_cmdq_dbell(cmdq);
  133. }
  134. static void
  135. cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  136. {
  137. switch (event) {
  138. case CMDQ_E_STOP:
  139. case CMDQ_E_FAIL:
  140. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  141. break;
  142. case CMDQ_E_POST:
  143. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  144. break;
  145. case CMDQ_E_DB_READY:
  146. if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
  147. cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
  148. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  149. } else
  150. bfa_fsm_set_state(cmdq, cmdq_sm_ready);
  151. break;
  152. default:
  153. bfa_sm_fault(event);
  154. }
  155. }
  156. static void
  157. bfa_msgq_cmdq_dbell_ready(void *arg)
  158. {
  159. struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
  160. bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
  161. }
  162. static void
  163. bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
  164. {
  165. struct bfi_msgq_h2i_db *dbell =
  166. (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
  167. memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
  168. bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
  169. dbell->mh.mtag.i2htok = 0;
  170. dbell->idx.cmdq_pi = htons(cmdq->producer_index);
  171. if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
  172. bfa_msgq_cmdq_dbell_ready, cmdq)) {
  173. bfa_msgq_cmdq_dbell_ready(cmdq);
  174. }
  175. }
  176. static void
  177. __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
  178. {
  179. size_t len = cmd->msg_size;
  180. int num_entries = 0;
  181. size_t to_copy;
  182. u8 *src, *dst;
  183. src = (u8 *)cmd->msg_hdr;
  184. dst = (u8 *)cmdq->addr.kva;
  185. dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
  186. while (len) {
  187. to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
  188. len : BFI_MSGQ_CMD_ENTRY_SIZE;
  189. memcpy(dst, src, to_copy);
  190. len -= to_copy;
  191. src += BFI_MSGQ_CMD_ENTRY_SIZE;
  192. BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
  193. dst = (u8 *)cmdq->addr.kva;
  194. dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
  195. num_entries++;
  196. }
  197. }
  198. static void
  199. bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
  200. {
  201. struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
  202. struct bfa_msgq_cmd_entry *cmd;
  203. int posted = 0;
  204. cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
  205. /* Walk through pending list to see if the command can be posted */
  206. while (!list_empty(&cmdq->pending_q)) {
  207. cmd =
  208. (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
  209. if (ntohs(cmd->msg_hdr->num_entries) <=
  210. BFA_MSGQ_FREE_CNT(cmdq)) {
  211. list_del(&cmd->qe);
  212. __cmd_copy(cmdq, cmd);
  213. posted = 1;
  214. call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
  215. } else {
  216. break;
  217. }
  218. }
  219. if (posted)
  220. bfa_fsm_send_event(cmdq, CMDQ_E_POST);
  221. }
  222. static void
  223. bfa_msgq_cmdq_copy_next(void *arg)
  224. {
  225. struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
  226. if (cmdq->bytes_to_copy)
  227. bfa_msgq_cmdq_copy_rsp(cmdq);
  228. }
  229. static void
  230. bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
  231. {
  232. struct bfi_msgq_i2h_cmdq_copy_req *req =
  233. (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
  234. cmdq->token = 0;
  235. cmdq->offset = ntohs(req->offset);
  236. cmdq->bytes_to_copy = ntohs(req->len);
  237. bfa_msgq_cmdq_copy_rsp(cmdq);
  238. }
  239. static void
  240. bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
  241. {
  242. struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
  243. (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
  244. int copied;
  245. u8 *addr = (u8 *)cmdq->addr.kva;
  246. memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
  247. bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
  248. rsp->mh.mtag.i2htok = htons(cmdq->token);
  249. copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
  250. cmdq->bytes_to_copy;
  251. addr += cmdq->offset;
  252. memcpy(rsp->data, addr, copied);
  253. cmdq->token++;
  254. cmdq->offset += copied;
  255. cmdq->bytes_to_copy -= copied;
  256. if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
  257. bfa_msgq_cmdq_copy_next, cmdq)) {
  258. bfa_msgq_cmdq_copy_next(cmdq);
  259. }
  260. }
  261. static void
  262. bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
  263. {
  264. cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
  265. INIT_LIST_HEAD(&cmdq->pending_q);
  266. cmdq->msgq = msgq;
  267. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  268. }
  269. static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
  270. enum rspq_event {
  271. RSPQ_E_START = 1,
  272. RSPQ_E_STOP = 2,
  273. RSPQ_E_FAIL = 3,
  274. RSPQ_E_RESP = 4,
  275. RSPQ_E_INIT_RESP = 5,
  276. RSPQ_E_DB_READY = 6,
  277. };
  278. bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
  279. bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
  280. enum rspq_event);
  281. bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
  282. bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
  283. enum rspq_event);
  284. static void
  285. rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
  286. {
  287. rspq->producer_index = 0;
  288. rspq->consumer_index = 0;
  289. rspq->flags = 0;
  290. }
  291. static void
  292. rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  293. {
  294. switch (event) {
  295. case RSPQ_E_START:
  296. bfa_fsm_set_state(rspq, rspq_sm_init_wait);
  297. break;
  298. case RSPQ_E_STOP:
  299. case RSPQ_E_FAIL:
  300. /* No-op */
  301. break;
  302. default:
  303. bfa_sm_fault(event);
  304. }
  305. }
  306. static void
  307. rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
  308. {
  309. bfa_wc_down(&rspq->msgq->init_wc);
  310. }
  311. static void
  312. rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  313. {
  314. switch (event) {
  315. case RSPQ_E_FAIL:
  316. case RSPQ_E_STOP:
  317. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  318. break;
  319. case RSPQ_E_INIT_RESP:
  320. bfa_fsm_set_state(rspq, rspq_sm_ready);
  321. break;
  322. default:
  323. bfa_sm_fault(event);
  324. }
  325. }
  326. static void
  327. rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
  328. {
  329. }
  330. static void
  331. rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  332. {
  333. switch (event) {
  334. case RSPQ_E_STOP:
  335. case RSPQ_E_FAIL:
  336. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  337. break;
  338. case RSPQ_E_RESP:
  339. bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
  340. break;
  341. default:
  342. bfa_sm_fault(event);
  343. }
  344. }
  345. static void
  346. rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
  347. {
  348. if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
  349. bfa_msgq_rspq_dbell(rspq);
  350. }
  351. static void
  352. rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  353. {
  354. switch (event) {
  355. case RSPQ_E_STOP:
  356. case RSPQ_E_FAIL:
  357. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  358. break;
  359. case RSPQ_E_RESP:
  360. rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
  361. break;
  362. case RSPQ_E_DB_READY:
  363. if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
  364. rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
  365. bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
  366. } else
  367. bfa_fsm_set_state(rspq, rspq_sm_ready);
  368. break;
  369. default:
  370. bfa_sm_fault(event);
  371. }
  372. }
  373. static void
  374. bfa_msgq_rspq_dbell_ready(void *arg)
  375. {
  376. struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
  377. bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
  378. }
  379. static void
  380. bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
  381. {
  382. struct bfi_msgq_h2i_db *dbell =
  383. (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
  384. memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
  385. bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
  386. dbell->mh.mtag.i2htok = 0;
  387. dbell->idx.rspq_ci = htons(rspq->consumer_index);
  388. if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
  389. bfa_msgq_rspq_dbell_ready, rspq)) {
  390. bfa_msgq_rspq_dbell_ready(rspq);
  391. }
  392. }
  393. static void
  394. bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
  395. {
  396. struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
  397. struct bfi_msgq_mhdr *msghdr;
  398. int num_entries;
  399. int mc;
  400. u8 *rspq_qe;
  401. rspq->producer_index = ntohs(dbell->idx.rspq_pi);
  402. while (rspq->consumer_index != rspq->producer_index) {
  403. rspq_qe = (u8 *)rspq->addr.kva;
  404. rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
  405. msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
  406. mc = msghdr->msg_class;
  407. num_entries = ntohs(msghdr->num_entries);
  408. if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
  409. break;
  410. (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
  411. BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
  412. rspq->depth);
  413. }
  414. bfa_fsm_send_event(rspq, RSPQ_E_RESP);
  415. }
  416. static void
  417. bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
  418. {
  419. rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
  420. rspq->msgq = msgq;
  421. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  422. }
  423. static void
  424. bfa_msgq_init_rsp(struct bfa_msgq *msgq,
  425. struct bfi_mbmsg *mb)
  426. {
  427. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
  428. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
  429. }
  430. static void
  431. bfa_msgq_init(void *arg)
  432. {
  433. struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
  434. struct bfi_msgq_cfg_req *msgq_cfg =
  435. (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
  436. memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
  437. bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
  438. msgq_cfg->mh.mtag.i2htok = 0;
  439. bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
  440. msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
  441. bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
  442. msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
  443. bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
  444. }
  445. static void
  446. bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
  447. {
  448. struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
  449. switch (msg->mh.msg_id) {
  450. case BFI_MSGQ_I2H_INIT_RSP:
  451. bfa_msgq_init_rsp(msgq, msg);
  452. break;
  453. case BFI_MSGQ_I2H_DOORBELL_PI:
  454. bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
  455. break;
  456. case BFI_MSGQ_I2H_DOORBELL_CI:
  457. bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
  458. break;
  459. case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
  460. bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
  461. break;
  462. default:
  463. BUG_ON(1);
  464. }
  465. }
  466. static void
  467. bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
  468. {
  469. struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
  470. switch (event) {
  471. case BFA_IOC_E_ENABLED:
  472. bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
  473. bfa_wc_up(&msgq->init_wc);
  474. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
  475. bfa_wc_up(&msgq->init_wc);
  476. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
  477. bfa_wc_wait(&msgq->init_wc);
  478. break;
  479. case BFA_IOC_E_DISABLED:
  480. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
  481. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
  482. break;
  483. case BFA_IOC_E_FAILED:
  484. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
  485. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
  486. break;
  487. default:
  488. break;
  489. }
  490. }
  491. u32
  492. bfa_msgq_meminfo(void)
  493. {
  494. return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
  495. roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
  496. }
  497. void
  498. bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
  499. {
  500. msgq->cmdq.addr.kva = kva;
  501. msgq->cmdq.addr.pa = pa;
  502. kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
  503. pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
  504. msgq->rspq.addr.kva = kva;
  505. msgq->rspq.addr.pa = pa;
  506. }
  507. void
  508. bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
  509. {
  510. msgq->ioc = ioc;
  511. bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
  512. bfa_msgq_rspq_attach(&msgq->rspq, msgq);
  513. bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
  514. bfa_q_qe_init(&msgq->ioc_notify);
  515. bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
  516. bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
  517. }
  518. void
  519. bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
  520. bfa_msgq_mcfunc_t cbfn, void *cbarg)
  521. {
  522. msgq->rspq.rsphdlr[mc].cbfn = cbfn;
  523. msgq->rspq.rsphdlr[mc].cbarg = cbarg;
  524. }
  525. void
  526. bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
  527. {
  528. if (ntohs(cmd->msg_hdr->num_entries) <=
  529. BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
  530. __cmd_copy(&msgq->cmdq, cmd);
  531. call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
  532. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
  533. } else {
  534. list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
  535. }
  536. }
  537. void
  538. bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
  539. {
  540. struct bfa_msgq_rspq *rspq = &msgq->rspq;
  541. size_t len = buf_len;
  542. size_t to_copy;
  543. int ci;
  544. u8 *src, *dst;
  545. ci = rspq->consumer_index;
  546. src = (u8 *)rspq->addr.kva;
  547. src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
  548. dst = buf;
  549. while (len) {
  550. to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
  551. len : BFI_MSGQ_RSP_ENTRY_SIZE;
  552. memcpy(dst, src, to_copy);
  553. len -= to_copy;
  554. dst += BFI_MSGQ_RSP_ENTRY_SIZE;
  555. BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
  556. src = (u8 *)rspq->addr.kva;
  557. src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
  558. }
  559. }