be_cmds.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. static void be_mcc_notify(struct be_ctrl_info *ctrl)
  19. {
  20. struct be_queue_info *mccq = &ctrl->mcc_obj.q;
  21. u32 val = 0;
  22. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  23. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  24. iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
  25. }
  26. /* To check if valid bit is set, check the entire word as we don't know
  27. * the endianness of the data (old entry is host endian while a new entry is
  28. * little endian) */
  29. static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
  30. {
  31. if (compl->flags != 0) {
  32. compl->flags = le32_to_cpu(compl->flags);
  33. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  34. return true;
  35. } else {
  36. return false;
  37. }
  38. }
  39. /* Need to reset the entire word that houses the valid bit */
  40. static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
  41. {
  42. compl->flags = 0;
  43. }
  44. static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
  45. struct be_mcc_cq_entry *compl)
  46. {
  47. u16 compl_status, extd_status;
  48. /* Just swap the status to host endian; mcc tag is opaquely copied
  49. * from mcc_wrb */
  50. be_dws_le_to_cpu(compl, 4);
  51. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  52. CQE_STATUS_COMPL_MASK;
  53. if (compl_status != MCC_STATUS_SUCCESS) {
  54. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  55. CQE_STATUS_EXTD_MASK;
  56. printk(KERN_WARNING DRV_NAME
  57. " error in cmd completion: status(compl/extd)=%d/%d\n",
  58. compl_status, extd_status);
  59. return -1;
  60. }
  61. return 0;
  62. }
  63. /* Link state evt is a string of bytes; no need for endian swapping */
  64. static void be_async_link_state_process(struct be_ctrl_info *ctrl,
  65. struct be_async_event_link_state *evt)
  66. {
  67. ctrl->async_cb(ctrl->adapter_ctxt,
  68. evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
  69. }
  70. static inline bool is_link_state_evt(u32 trailer)
  71. {
  72. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  73. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  74. ASYNC_EVENT_CODE_LINK_STATE);
  75. }
  76. static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
  77. {
  78. struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
  79. struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
  80. if (be_mcc_compl_is_new(compl)) {
  81. queue_tail_inc(mcc_cq);
  82. return compl;
  83. }
  84. return NULL;
  85. }
  86. void be_process_mcc(struct be_ctrl_info *ctrl)
  87. {
  88. struct be_mcc_cq_entry *compl;
  89. int num = 0;
  90. spin_lock_bh(&ctrl->mcc_cq_lock);
  91. while ((compl = be_mcc_compl_get(ctrl))) {
  92. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  93. /* Interpret flags as an async trailer */
  94. BUG_ON(!is_link_state_evt(compl->flags));
  95. /* Interpret compl as a async link evt */
  96. be_async_link_state_process(ctrl,
  97. (struct be_async_event_link_state *) compl);
  98. } else {
  99. be_mcc_compl_process(ctrl, compl);
  100. atomic_dec(&ctrl->mcc_obj.q.used);
  101. }
  102. be_mcc_compl_use(compl);
  103. num++;
  104. }
  105. if (num)
  106. be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
  107. spin_unlock_bh(&ctrl->mcc_cq_lock);
  108. }
  109. /* Wait till no more pending mcc requests are present */
  110. static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
  111. {
  112. #define mcc_timeout 50000 /* 5s timeout */
  113. int i;
  114. for (i = 0; i < mcc_timeout; i++) {
  115. be_process_mcc(ctrl);
  116. if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
  117. break;
  118. udelay(100);
  119. }
  120. if (i == mcc_timeout)
  121. printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
  122. }
  123. /* Notify MCC requests and wait for completion */
  124. static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
  125. {
  126. be_mcc_notify(ctrl);
  127. be_mcc_wait_compl(ctrl);
  128. }
  129. static int be_mbox_db_ready_wait(void __iomem *db)
  130. {
  131. int cnt = 0, wait = 5;
  132. u32 ready;
  133. do {
  134. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  135. if (ready)
  136. break;
  137. if (cnt > 200000) {
  138. printk(KERN_WARNING DRV_NAME
  139. ": mbox_db poll timed out\n");
  140. return -1;
  141. }
  142. if (cnt > 50)
  143. wait = 200;
  144. cnt += wait;
  145. udelay(wait);
  146. } while (true);
  147. return 0;
  148. }
  149. /*
  150. * Insert the mailbox address into the doorbell in two steps
  151. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  152. */
  153. static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
  154. {
  155. int status;
  156. u32 val = 0;
  157. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  158. struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
  159. struct be_mcc_mailbox *mbox = mbox_mem->va;
  160. struct be_mcc_cq_entry *cqe = &mbox->cqe;
  161. memset(cqe, 0, sizeof(*cqe));
  162. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  163. val |= MPU_MAILBOX_DB_HI_MASK;
  164. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  165. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  166. iowrite32(val, db);
  167. /* wait for ready to be set */
  168. status = be_mbox_db_ready_wait(db);
  169. if (status != 0)
  170. return status;
  171. val = 0;
  172. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  173. val &= ~MPU_MAILBOX_DB_HI_MASK;
  174. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  175. val |= (u32)(mbox_mem->dma >> 4) << 2;
  176. iowrite32(val, db);
  177. status = be_mbox_db_ready_wait(db);
  178. if (status != 0)
  179. return status;
  180. /* A cq entry has been made now */
  181. if (be_mcc_compl_is_new(cqe)) {
  182. status = be_mcc_compl_process(ctrl, &mbox->cqe);
  183. be_mcc_compl_use(cqe);
  184. if (status)
  185. return status;
  186. } else {
  187. printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
  188. return -1;
  189. }
  190. return 0;
  191. }
  192. static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
  193. {
  194. u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
  195. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  196. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  197. return -1;
  198. else
  199. return 0;
  200. }
  201. static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage)
  202. {
  203. u16 stage, cnt, error;
  204. for (cnt = 0; cnt < 5000; cnt++) {
  205. error = be_POST_stage_get(ctrl, &stage);
  206. if (error)
  207. return -1;
  208. if (stage == poll_stage)
  209. break;
  210. udelay(1000);
  211. }
  212. if (stage != poll_stage)
  213. return -1;
  214. return 0;
  215. }
  216. int be_cmd_POST(struct be_ctrl_info *ctrl)
  217. {
  218. u16 stage, error;
  219. error = be_POST_stage_get(ctrl, &stage);
  220. if (error)
  221. goto err;
  222. if (stage == POST_STAGE_ARMFW_RDY)
  223. return 0;
  224. if (stage != POST_STAGE_AWAITING_HOST_RDY)
  225. goto err;
  226. /* On awaiting host rdy, reset and again poll on awaiting host rdy */
  227. iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
  228. error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY);
  229. if (error)
  230. goto err;
  231. /* Now kickoff POST and poll on armfw ready */
  232. iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
  233. error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY);
  234. if (error)
  235. goto err;
  236. return 0;
  237. err:
  238. printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
  239. return -1;
  240. }
  241. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  242. {
  243. return wrb->payload.embedded_payload;
  244. }
  245. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  246. {
  247. return &wrb->payload.sgl[0];
  248. }
  249. /* Don't touch the hdr after it's prepared */
  250. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  251. bool embedded, u8 sge_cnt)
  252. {
  253. if (embedded)
  254. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  255. else
  256. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  257. MCC_WRB_SGE_CNT_SHIFT;
  258. wrb->payload_length = payload_len;
  259. be_dws_cpu_to_le(wrb, 20);
  260. }
  261. /* Don't touch the hdr after it's prepared */
  262. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  263. u8 subsystem, u8 opcode, int cmd_len)
  264. {
  265. req_hdr->opcode = opcode;
  266. req_hdr->subsystem = subsystem;
  267. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  268. }
  269. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  270. struct be_dma_mem *mem)
  271. {
  272. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  273. u64 dma = (u64)mem->dma;
  274. for (i = 0; i < buf_pages; i++) {
  275. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  276. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  277. dma += PAGE_SIZE_4K;
  278. }
  279. }
  280. /* Converts interrupt delay in microseconds to multiplier value */
  281. static u32 eq_delay_to_mult(u32 usec_delay)
  282. {
  283. #define MAX_INTR_RATE 651042
  284. const u32 round = 10;
  285. u32 multiplier;
  286. if (usec_delay == 0)
  287. multiplier = 0;
  288. else {
  289. u32 interrupt_rate = 1000000 / usec_delay;
  290. /* Max delay, corresponding to the lowest interrupt rate */
  291. if (interrupt_rate == 0)
  292. multiplier = 1023;
  293. else {
  294. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  295. multiplier /= interrupt_rate;
  296. /* Round the multiplier to the closest value.*/
  297. multiplier = (multiplier + round/2) / round;
  298. multiplier = min(multiplier, (u32)1023);
  299. }
  300. }
  301. return multiplier;
  302. }
  303. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  304. {
  305. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  306. }
  307. static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
  308. {
  309. struct be_mcc_wrb *wrb = NULL;
  310. if (atomic_read(&mccq->used) < mccq->len) {
  311. wrb = queue_head_node(mccq);
  312. queue_head_inc(mccq);
  313. atomic_inc(&mccq->used);
  314. memset(wrb, 0, sizeof(*wrb));
  315. }
  316. return wrb;
  317. }
  318. int be_cmd_eq_create(struct be_ctrl_info *ctrl,
  319. struct be_queue_info *eq, int eq_delay)
  320. {
  321. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  322. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  323. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  324. struct be_dma_mem *q_mem = &eq->dma_mem;
  325. int status;
  326. spin_lock(&ctrl->mbox_lock);
  327. memset(wrb, 0, sizeof(*wrb));
  328. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  329. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  330. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  331. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  332. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  333. ctrl->pci_func);
  334. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  335. /* 4byte eqe*/
  336. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  337. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  338. __ilog2_u32(eq->len/256));
  339. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  340. eq_delay_to_mult(eq_delay));
  341. be_dws_cpu_to_le(req->context, sizeof(req->context));
  342. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  343. status = be_mbox_db_ring(ctrl);
  344. if (!status) {
  345. eq->id = le16_to_cpu(resp->eq_id);
  346. eq->created = true;
  347. }
  348. spin_unlock(&ctrl->mbox_lock);
  349. return status;
  350. }
  351. int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
  352. u8 type, bool permanent, u32 if_handle)
  353. {
  354. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  355. struct be_cmd_req_mac_query *req = embedded_payload(wrb);
  356. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  357. int status;
  358. spin_lock(&ctrl->mbox_lock);
  359. memset(wrb, 0, sizeof(*wrb));
  360. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  361. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  362. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  363. req->type = type;
  364. if (permanent) {
  365. req->permanent = 1;
  366. } else {
  367. req->if_id = cpu_to_le16((u16)if_handle);
  368. req->permanent = 0;
  369. }
  370. status = be_mbox_db_ring(ctrl);
  371. if (!status)
  372. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  373. spin_unlock(&ctrl->mbox_lock);
  374. return status;
  375. }
  376. int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
  377. u32 if_id, u32 *pmac_id)
  378. {
  379. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  380. struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
  381. int status;
  382. spin_lock(&ctrl->mbox_lock);
  383. memset(wrb, 0, sizeof(*wrb));
  384. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  385. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  386. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  387. req->if_id = cpu_to_le32(if_id);
  388. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  389. status = be_mbox_db_ring(ctrl);
  390. if (!status) {
  391. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  392. *pmac_id = le32_to_cpu(resp->pmac_id);
  393. }
  394. spin_unlock(&ctrl->mbox_lock);
  395. return status;
  396. }
  397. int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
  398. {
  399. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  400. struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
  401. int status;
  402. spin_lock(&ctrl->mbox_lock);
  403. memset(wrb, 0, sizeof(*wrb));
  404. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  405. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  406. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  407. req->if_id = cpu_to_le32(if_id);
  408. req->pmac_id = cpu_to_le32(pmac_id);
  409. status = be_mbox_db_ring(ctrl);
  410. spin_unlock(&ctrl->mbox_lock);
  411. return status;
  412. }
  413. int be_cmd_cq_create(struct be_ctrl_info *ctrl,
  414. struct be_queue_info *cq, struct be_queue_info *eq,
  415. bool sol_evts, bool no_delay, int coalesce_wm)
  416. {
  417. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  418. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  419. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  420. struct be_dma_mem *q_mem = &cq->dma_mem;
  421. void *ctxt = &req->context;
  422. int status;
  423. spin_lock(&ctrl->mbox_lock);
  424. memset(wrb, 0, sizeof(*wrb));
  425. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  426. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  427. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  428. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  429. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  430. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  431. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  432. __ilog2_u32(cq->len/256));
  433. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  434. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  435. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  436. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  437. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  438. AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
  439. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  440. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  441. status = be_mbox_db_ring(ctrl);
  442. if (!status) {
  443. cq->id = le16_to_cpu(resp->cq_id);
  444. cq->created = true;
  445. }
  446. spin_unlock(&ctrl->mbox_lock);
  447. return status;
  448. }
  449. static u32 be_encoded_q_len(int q_len)
  450. {
  451. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  452. if (len_encoded == 16)
  453. len_encoded = 0;
  454. return len_encoded;
  455. }
  456. int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
  457. struct be_queue_info *mccq,
  458. struct be_queue_info *cq)
  459. {
  460. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  461. struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
  462. struct be_dma_mem *q_mem = &mccq->dma_mem;
  463. void *ctxt = &req->context;
  464. int status;
  465. spin_lock(&ctrl->mbox_lock);
  466. memset(wrb, 0, sizeof(*wrb));
  467. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  468. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  469. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  470. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  471. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
  472. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  473. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  474. be_encoded_q_len(mccq->len));
  475. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  476. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  477. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  478. status = be_mbox_db_ring(ctrl);
  479. if (!status) {
  480. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  481. mccq->id = le16_to_cpu(resp->id);
  482. mccq->created = true;
  483. }
  484. spin_unlock(&ctrl->mbox_lock);
  485. return status;
  486. }
  487. int be_cmd_txq_create(struct be_ctrl_info *ctrl,
  488. struct be_queue_info *txq,
  489. struct be_queue_info *cq)
  490. {
  491. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  492. struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
  493. struct be_dma_mem *q_mem = &txq->dma_mem;
  494. void *ctxt = &req->context;
  495. int status;
  496. u32 len_encoded;
  497. spin_lock(&ctrl->mbox_lock);
  498. memset(wrb, 0, sizeof(*wrb));
  499. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  500. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  501. sizeof(*req));
  502. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  503. req->ulp_num = BE_ULP1_NUM;
  504. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  505. len_encoded = fls(txq->len); /* log2(len) + 1 */
  506. if (len_encoded == 16)
  507. len_encoded = 0;
  508. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
  509. AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
  510. ctrl->pci_func);
  511. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  512. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  513. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  514. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  515. status = be_mbox_db_ring(ctrl);
  516. if (!status) {
  517. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  518. txq->id = le16_to_cpu(resp->cid);
  519. txq->created = true;
  520. }
  521. spin_unlock(&ctrl->mbox_lock);
  522. return status;
  523. }
  524. int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
  525. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  526. u16 max_frame_size, u32 if_id, u32 rss)
  527. {
  528. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  529. struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
  530. struct be_dma_mem *q_mem = &rxq->dma_mem;
  531. int status;
  532. spin_lock(&ctrl->mbox_lock);
  533. memset(wrb, 0, sizeof(*wrb));
  534. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  535. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  536. sizeof(*req));
  537. req->cq_id = cpu_to_le16(cq_id);
  538. req->frag_size = fls(frag_size) - 1;
  539. req->num_pages = 2;
  540. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  541. req->interface_id = cpu_to_le32(if_id);
  542. req->max_frame_size = cpu_to_le16(max_frame_size);
  543. req->rss_queue = cpu_to_le32(rss);
  544. status = be_mbox_db_ring(ctrl);
  545. if (!status) {
  546. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  547. rxq->id = le16_to_cpu(resp->id);
  548. rxq->created = true;
  549. }
  550. spin_unlock(&ctrl->mbox_lock);
  551. return status;
  552. }
  553. /* Generic destroyer function for all types of queues */
  554. int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  555. int queue_type)
  556. {
  557. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  558. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  559. u8 subsys = 0, opcode = 0;
  560. int status;
  561. spin_lock(&ctrl->mbox_lock);
  562. memset(wrb, 0, sizeof(*wrb));
  563. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  564. switch (queue_type) {
  565. case QTYPE_EQ:
  566. subsys = CMD_SUBSYSTEM_COMMON;
  567. opcode = OPCODE_COMMON_EQ_DESTROY;
  568. break;
  569. case QTYPE_CQ:
  570. subsys = CMD_SUBSYSTEM_COMMON;
  571. opcode = OPCODE_COMMON_CQ_DESTROY;
  572. break;
  573. case QTYPE_TXQ:
  574. subsys = CMD_SUBSYSTEM_ETH;
  575. opcode = OPCODE_ETH_TX_DESTROY;
  576. break;
  577. case QTYPE_RXQ:
  578. subsys = CMD_SUBSYSTEM_ETH;
  579. opcode = OPCODE_ETH_RX_DESTROY;
  580. break;
  581. case QTYPE_MCCQ:
  582. subsys = CMD_SUBSYSTEM_COMMON;
  583. opcode = OPCODE_COMMON_MCC_DESTROY;
  584. break;
  585. default:
  586. printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
  587. status = -1;
  588. goto err;
  589. }
  590. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  591. req->id = cpu_to_le16(q->id);
  592. status = be_mbox_db_ring(ctrl);
  593. err:
  594. spin_unlock(&ctrl->mbox_lock);
  595. return status;
  596. }
  597. /* Create an rx filtering policy configuration on an i/f */
  598. int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
  599. bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
  600. {
  601. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  602. struct be_cmd_req_if_create *req = embedded_payload(wrb);
  603. int status;
  604. spin_lock(&ctrl->mbox_lock);
  605. memset(wrb, 0, sizeof(*wrb));
  606. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  607. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  608. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  609. req->capability_flags = cpu_to_le32(flags);
  610. req->enable_flags = cpu_to_le32(flags);
  611. if (!pmac_invalid)
  612. memcpy(req->mac_addr, mac, ETH_ALEN);
  613. status = be_mbox_db_ring(ctrl);
  614. if (!status) {
  615. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  616. *if_handle = le32_to_cpu(resp->interface_id);
  617. if (!pmac_invalid)
  618. *pmac_id = le32_to_cpu(resp->pmac_id);
  619. }
  620. spin_unlock(&ctrl->mbox_lock);
  621. return status;
  622. }
  623. int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
  624. {
  625. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  626. struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
  627. int status;
  628. spin_lock(&ctrl->mbox_lock);
  629. memset(wrb, 0, sizeof(*wrb));
  630. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  631. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  632. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  633. req->interface_id = cpu_to_le32(interface_id);
  634. status = be_mbox_db_ring(ctrl);
  635. spin_unlock(&ctrl->mbox_lock);
  636. return status;
  637. }
  638. /* Get stats is a non embedded command: the request is not embedded inside
  639. * WRB but is a separate dma memory block
  640. */
  641. int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
  642. {
  643. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  644. struct be_cmd_req_get_stats *req = nonemb_cmd->va;
  645. struct be_sge *sge = nonembedded_sgl(wrb);
  646. int status;
  647. spin_lock(&ctrl->mbox_lock);
  648. memset(wrb, 0, sizeof(*wrb));
  649. memset(req, 0, sizeof(*req));
  650. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
  651. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  652. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  653. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  654. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  655. sge->len = cpu_to_le32(nonemb_cmd->size);
  656. status = be_mbox_db_ring(ctrl);
  657. if (!status) {
  658. struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
  659. be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
  660. }
  661. spin_unlock(&ctrl->mbox_lock);
  662. return status;
  663. }
  664. int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
  665. bool *link_up)
  666. {
  667. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  668. struct be_cmd_req_link_status *req = embedded_payload(wrb);
  669. int status;
  670. spin_lock(&ctrl->mbox_lock);
  671. *link_up = false;
  672. memset(wrb, 0, sizeof(*wrb));
  673. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  674. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  675. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  676. status = be_mbox_db_ring(ctrl);
  677. if (!status) {
  678. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  679. if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
  680. *link_up = true;
  681. }
  682. spin_unlock(&ctrl->mbox_lock);
  683. return status;
  684. }
  685. int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
  686. {
  687. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  688. struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
  689. int status;
  690. spin_lock(&ctrl->mbox_lock);
  691. memset(wrb, 0, sizeof(*wrb));
  692. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  693. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  694. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  695. status = be_mbox_db_ring(ctrl);
  696. if (!status) {
  697. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  698. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  699. }
  700. spin_unlock(&ctrl->mbox_lock);
  701. return status;
  702. }
  703. /* set the EQ delay interval of an EQ to specified value */
  704. int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
  705. {
  706. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  707. struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
  708. int status;
  709. spin_lock(&ctrl->mbox_lock);
  710. memset(wrb, 0, sizeof(*wrb));
  711. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  712. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  713. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  714. req->num_eq = cpu_to_le32(1);
  715. req->delay[0].eq_id = cpu_to_le32(eq_id);
  716. req->delay[0].phase = 0;
  717. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  718. status = be_mbox_db_ring(ctrl);
  719. spin_unlock(&ctrl->mbox_lock);
  720. return status;
  721. }
  722. int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
  723. u32 num, bool untagged, bool promiscuous)
  724. {
  725. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  726. struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
  727. int status;
  728. spin_lock(&ctrl->mbox_lock);
  729. memset(wrb, 0, sizeof(*wrb));
  730. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  731. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  732. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  733. req->interface_id = if_id;
  734. req->promiscuous = promiscuous;
  735. req->untagged = untagged;
  736. req->num_vlan = num;
  737. if (!promiscuous) {
  738. memcpy(req->normal_vlan, vtag_array,
  739. req->num_vlan * sizeof(vtag_array[0]));
  740. }
  741. status = be_mbox_db_ring(ctrl);
  742. spin_unlock(&ctrl->mbox_lock);
  743. return status;
  744. }
  745. /* Use MCC for this command as it may be called in BH context */
  746. int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
  747. {
  748. struct be_mcc_wrb *wrb;
  749. struct be_cmd_req_promiscuous_config *req;
  750. spin_lock_bh(&ctrl->mcc_lock);
  751. wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
  752. BUG_ON(!wrb);
  753. req = embedded_payload(wrb);
  754. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  755. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  756. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  757. if (port_num)
  758. req->port1_promiscuous = en;
  759. else
  760. req->port0_promiscuous = en;
  761. be_mcc_notify_wait(ctrl);
  762. spin_unlock_bh(&ctrl->mcc_lock);
  763. return 0;
  764. }
  765. /*
  766. * Use MCC for this command as it may be called in BH context
  767. * (mc == NULL) => multicast promiscous
  768. */
  769. int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
  770. struct dev_mc_list *mc_list, u32 mc_count)
  771. {
  772. #define BE_MAX_MC 32 /* set mcast promisc if > 32 */
  773. struct be_mcc_wrb *wrb;
  774. struct be_cmd_req_mcast_mac_config *req;
  775. spin_lock_bh(&ctrl->mcc_lock);
  776. wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
  777. BUG_ON(!wrb);
  778. req = embedded_payload(wrb);
  779. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  780. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  781. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  782. req->interface_id = if_id;
  783. if (mc_list && mc_count <= BE_MAX_MC) {
  784. int i;
  785. struct dev_mc_list *mc;
  786. req->num_mac = cpu_to_le16(mc_count);
  787. for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
  788. memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
  789. } else {
  790. req->promiscuous = 1;
  791. }
  792. be_mcc_notify_wait(ctrl);
  793. spin_unlock_bh(&ctrl->mcc_lock);
  794. return 0;
  795. }
  796. int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
  797. {
  798. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  799. struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
  800. int status;
  801. spin_lock(&ctrl->mbox_lock);
  802. memset(wrb, 0, sizeof(*wrb));
  803. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  804. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  805. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  806. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  807. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  808. status = be_mbox_db_ring(ctrl);
  809. spin_unlock(&ctrl->mbox_lock);
  810. return status;
  811. }
  812. int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
  813. {
  814. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  815. struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
  816. int status;
  817. spin_lock(&ctrl->mbox_lock);
  818. memset(wrb, 0, sizeof(*wrb));
  819. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  820. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  821. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  822. status = be_mbox_db_ring(ctrl);
  823. if (!status) {
  824. struct be_cmd_resp_get_flow_control *resp =
  825. embedded_payload(wrb);
  826. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  827. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  828. }
  829. spin_unlock(&ctrl->mbox_lock);
  830. return status;
  831. }
  832. int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
  833. {
  834. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  835. struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
  836. int status;
  837. spin_lock(&ctrl->mbox_lock);
  838. memset(wrb, 0, sizeof(*wrb));
  839. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  840. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  841. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  842. status = be_mbox_db_ring(ctrl);
  843. if (!status) {
  844. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  845. *port_num = le32_to_cpu(resp->phys_port);
  846. }
  847. spin_unlock(&ctrl->mbox_lock);
  848. return status;
  849. }