be_cmds.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. static void be_mcc_notify(struct be_adapter *adapter)
  20. {
  21. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  22. u32 val = 0;
  23. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  24. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  25. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  26. }
  27. /* To check if valid bit is set, check the entire word as we don't know
  28. * the endianness of the data (old entry is host endian while a new entry is
  29. * little endian) */
  30. static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
  31. {
  32. if (compl->flags != 0) {
  33. compl->flags = le32_to_cpu(compl->flags);
  34. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  35. return true;
  36. } else {
  37. return false;
  38. }
  39. }
  40. /* Need to reset the entire word that houses the valid bit */
  41. static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
  42. {
  43. compl->flags = 0;
  44. }
  45. static int be_mcc_compl_process(struct be_adapter *adapter,
  46. struct be_mcc_cq_entry *compl)
  47. {
  48. u16 compl_status, extd_status;
  49. /* Just swap the status to host endian; mcc tag is opaquely copied
  50. * from mcc_wrb */
  51. be_dws_le_to_cpu(compl, 4);
  52. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  53. CQE_STATUS_COMPL_MASK;
  54. if (compl_status != MCC_STATUS_SUCCESS) {
  55. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  56. CQE_STATUS_EXTD_MASK;
  57. printk(KERN_WARNING DRV_NAME
  58. " error in cmd completion: status(compl/extd)=%d/%d\n",
  59. compl_status, extd_status);
  60. return -1;
  61. }
  62. return 0;
  63. }
  64. /* Link state evt is a string of bytes; no need for endian swapping */
  65. static void be_async_link_state_process(struct be_adapter *adapter,
  66. struct be_async_event_link_state *evt)
  67. {
  68. be_link_status_update(adapter,
  69. evt->port_link_status == ASYNC_EVENT_LINK_UP);
  70. }
  71. static inline bool is_link_state_evt(u32 trailer)
  72. {
  73. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  74. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  75. ASYNC_EVENT_CODE_LINK_STATE);
  76. }
  77. static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_adapter *adapter)
  78. {
  79. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  80. struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
  81. if (be_mcc_compl_is_new(compl)) {
  82. queue_tail_inc(mcc_cq);
  83. return compl;
  84. }
  85. return NULL;
  86. }
  87. void be_process_mcc(struct be_adapter *adapter)
  88. {
  89. struct be_mcc_cq_entry *compl;
  90. int num = 0;
  91. spin_lock_bh(&adapter->mcc_cq_lock);
  92. while ((compl = be_mcc_compl_get(adapter))) {
  93. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  94. /* Interpret flags as an async trailer */
  95. BUG_ON(!is_link_state_evt(compl->flags));
  96. /* Interpret compl as a async link evt */
  97. be_async_link_state_process(adapter,
  98. (struct be_async_event_link_state *) compl);
  99. } else {
  100. be_mcc_compl_process(adapter, compl);
  101. atomic_dec(&adapter->mcc_obj.q.used);
  102. }
  103. be_mcc_compl_use(compl);
  104. num++;
  105. }
  106. if (num)
  107. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
  108. spin_unlock_bh(&adapter->mcc_cq_lock);
  109. }
  110. /* Wait till no more pending mcc requests are present */
  111. static void be_mcc_wait_compl(struct be_adapter *adapter)
  112. {
  113. #define mcc_timeout 50000 /* 5s timeout */
  114. int i;
  115. for (i = 0; i < mcc_timeout; i++) {
  116. be_process_mcc(adapter);
  117. if (atomic_read(&adapter->mcc_obj.q.used) == 0)
  118. break;
  119. udelay(100);
  120. }
  121. if (i == mcc_timeout)
  122. printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
  123. }
  124. /* Notify MCC requests and wait for completion */
  125. static void be_mcc_notify_wait(struct be_adapter *adapter)
  126. {
  127. be_mcc_notify(adapter);
  128. be_mcc_wait_compl(adapter);
  129. }
  130. static int be_mbox_db_ready_wait(void __iomem *db)
  131. {
  132. int cnt = 0, wait = 5;
  133. u32 ready;
  134. do {
  135. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  136. if (ready)
  137. break;
  138. if (cnt > 200000) {
  139. printk(KERN_WARNING DRV_NAME
  140. ": mbox_db poll timed out\n");
  141. return -1;
  142. }
  143. if (cnt > 50)
  144. wait = 200;
  145. cnt += wait;
  146. udelay(wait);
  147. } while (true);
  148. return 0;
  149. }
  150. /*
  151. * Insert the mailbox address into the doorbell in two steps
  152. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  153. */
  154. static int be_mbox_db_ring(struct be_adapter *adapter)
  155. {
  156. int status;
  157. u32 val = 0;
  158. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  159. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  160. struct be_mcc_mailbox *mbox = mbox_mem->va;
  161. struct be_mcc_cq_entry *cqe = &mbox->cqe;
  162. memset(cqe, 0, sizeof(*cqe));
  163. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  164. val |= MPU_MAILBOX_DB_HI_MASK;
  165. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  166. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  167. iowrite32(val, db);
  168. /* wait for ready to be set */
  169. status = be_mbox_db_ready_wait(db);
  170. if (status != 0)
  171. return status;
  172. val = 0;
  173. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  174. val &= ~MPU_MAILBOX_DB_HI_MASK;
  175. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  176. val |= (u32)(mbox_mem->dma >> 4) << 2;
  177. iowrite32(val, db);
  178. status = be_mbox_db_ready_wait(db);
  179. if (status != 0)
  180. return status;
  181. /* A cq entry has been made now */
  182. if (be_mcc_compl_is_new(cqe)) {
  183. status = be_mcc_compl_process(adapter, &mbox->cqe);
  184. be_mcc_compl_use(cqe);
  185. if (status)
  186. return status;
  187. } else {
  188. printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
  189. return -1;
  190. }
  191. return 0;
  192. }
  193. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  194. {
  195. u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  196. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  197. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  198. return -1;
  199. else
  200. return 0;
  201. }
  202. static int be_POST_stage_poll(struct be_adapter *adapter, u16 poll_stage)
  203. {
  204. u16 stage, cnt, error;
  205. for (cnt = 0; cnt < 5000; cnt++) {
  206. error = be_POST_stage_get(adapter, &stage);
  207. if (error)
  208. return -1;
  209. if (stage == poll_stage)
  210. break;
  211. udelay(1000);
  212. }
  213. if (stage != poll_stage)
  214. return -1;
  215. return 0;
  216. }
  217. int be_cmd_POST(struct be_adapter *adapter)
  218. {
  219. u16 stage, error;
  220. error = be_POST_stage_get(adapter, &stage);
  221. if (error)
  222. goto err;
  223. if (stage == POST_STAGE_ARMFW_RDY)
  224. return 0;
  225. if (stage != POST_STAGE_AWAITING_HOST_RDY)
  226. goto err;
  227. /* On awaiting host rdy, reset and again poll on awaiting host rdy */
  228. iowrite32(POST_STAGE_BE_RESET, adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  229. error = be_POST_stage_poll(adapter, POST_STAGE_AWAITING_HOST_RDY);
  230. if (error)
  231. goto err;
  232. /* Now kickoff POST and poll on armfw ready */
  233. iowrite32(POST_STAGE_HOST_RDY, adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  234. error = be_POST_stage_poll(adapter, POST_STAGE_ARMFW_RDY);
  235. if (error)
  236. goto err;
  237. return 0;
  238. err:
  239. printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
  240. return -1;
  241. }
  242. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  243. {
  244. return wrb->payload.embedded_payload;
  245. }
  246. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  247. {
  248. return &wrb->payload.sgl[0];
  249. }
  250. /* Don't touch the hdr after it's prepared */
  251. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  252. bool embedded, u8 sge_cnt)
  253. {
  254. if (embedded)
  255. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  256. else
  257. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  258. MCC_WRB_SGE_CNT_SHIFT;
  259. wrb->payload_length = payload_len;
  260. be_dws_cpu_to_le(wrb, 20);
  261. }
  262. /* Don't touch the hdr after it's prepared */
  263. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  264. u8 subsystem, u8 opcode, int cmd_len)
  265. {
  266. req_hdr->opcode = opcode;
  267. req_hdr->subsystem = subsystem;
  268. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  269. }
  270. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  271. struct be_dma_mem *mem)
  272. {
  273. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  274. u64 dma = (u64)mem->dma;
  275. for (i = 0; i < buf_pages; i++) {
  276. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  277. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  278. dma += PAGE_SIZE_4K;
  279. }
  280. }
  281. /* Converts interrupt delay in microseconds to multiplier value */
  282. static u32 eq_delay_to_mult(u32 usec_delay)
  283. {
  284. #define MAX_INTR_RATE 651042
  285. const u32 round = 10;
  286. u32 multiplier;
  287. if (usec_delay == 0)
  288. multiplier = 0;
  289. else {
  290. u32 interrupt_rate = 1000000 / usec_delay;
  291. /* Max delay, corresponding to the lowest interrupt rate */
  292. if (interrupt_rate == 0)
  293. multiplier = 1023;
  294. else {
  295. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  296. multiplier /= interrupt_rate;
  297. /* Round the multiplier to the closest value.*/
  298. multiplier = (multiplier + round/2) / round;
  299. multiplier = min(multiplier, (u32)1023);
  300. }
  301. }
  302. return multiplier;
  303. }
  304. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  305. {
  306. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  307. }
  308. static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
  309. {
  310. struct be_mcc_wrb *wrb = NULL;
  311. if (atomic_read(&mccq->used) < mccq->len) {
  312. wrb = queue_head_node(mccq);
  313. queue_head_inc(mccq);
  314. atomic_inc(&mccq->used);
  315. memset(wrb, 0, sizeof(*wrb));
  316. }
  317. return wrb;
  318. }
  319. int be_cmd_eq_create(struct be_adapter *adapter,
  320. struct be_queue_info *eq, int eq_delay)
  321. {
  322. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  323. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  324. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  325. struct be_dma_mem *q_mem = &eq->dma_mem;
  326. int status;
  327. spin_lock(&adapter->mbox_lock);
  328. memset(wrb, 0, sizeof(*wrb));
  329. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  330. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  331. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  332. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  333. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  334. adapter->pci_func);
  335. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  336. /* 4byte eqe*/
  337. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  338. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  339. __ilog2_u32(eq->len/256));
  340. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  341. eq_delay_to_mult(eq_delay));
  342. be_dws_cpu_to_le(req->context, sizeof(req->context));
  343. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  344. status = be_mbox_db_ring(adapter);
  345. if (!status) {
  346. eq->id = le16_to_cpu(resp->eq_id);
  347. eq->created = true;
  348. }
  349. spin_unlock(&adapter->mbox_lock);
  350. return status;
  351. }
  352. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  353. u8 type, bool permanent, u32 if_handle)
  354. {
  355. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  356. struct be_cmd_req_mac_query *req = embedded_payload(wrb);
  357. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  358. int status;
  359. spin_lock(&adapter->mbox_lock);
  360. memset(wrb, 0, sizeof(*wrb));
  361. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  362. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  363. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  364. req->type = type;
  365. if (permanent) {
  366. req->permanent = 1;
  367. } else {
  368. req->if_id = cpu_to_le16((u16)if_handle);
  369. req->permanent = 0;
  370. }
  371. status = be_mbox_db_ring(adapter);
  372. if (!status)
  373. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  374. spin_unlock(&adapter->mbox_lock);
  375. return status;
  376. }
  377. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  378. u32 if_id, u32 *pmac_id)
  379. {
  380. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  381. struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
  382. int status;
  383. spin_lock(&adapter->mbox_lock);
  384. memset(wrb, 0, sizeof(*wrb));
  385. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  386. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  387. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  388. req->if_id = cpu_to_le32(if_id);
  389. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  390. status = be_mbox_db_ring(adapter);
  391. if (!status) {
  392. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  393. *pmac_id = le32_to_cpu(resp->pmac_id);
  394. }
  395. spin_unlock(&adapter->mbox_lock);
  396. return status;
  397. }
  398. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
  399. {
  400. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  401. struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
  402. int status;
  403. spin_lock(&adapter->mbox_lock);
  404. memset(wrb, 0, sizeof(*wrb));
  405. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  406. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  407. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  408. req->if_id = cpu_to_le32(if_id);
  409. req->pmac_id = cpu_to_le32(pmac_id);
  410. status = be_mbox_db_ring(adapter);
  411. spin_unlock(&adapter->mbox_lock);
  412. return status;
  413. }
  414. int be_cmd_cq_create(struct be_adapter *adapter,
  415. struct be_queue_info *cq, struct be_queue_info *eq,
  416. bool sol_evts, bool no_delay, int coalesce_wm)
  417. {
  418. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  419. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  420. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  421. struct be_dma_mem *q_mem = &cq->dma_mem;
  422. void *ctxt = &req->context;
  423. int status;
  424. spin_lock(&adapter->mbox_lock);
  425. memset(wrb, 0, sizeof(*wrb));
  426. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  427. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  428. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  429. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  430. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  431. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  432. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  433. __ilog2_u32(cq->len/256));
  434. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  435. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  436. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  437. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  438. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  439. AMAP_SET_BITS(struct amap_cq_context, func, ctxt, adapter->pci_func);
  440. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  441. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  442. status = be_mbox_db_ring(adapter);
  443. if (!status) {
  444. cq->id = le16_to_cpu(resp->cq_id);
  445. cq->created = true;
  446. }
  447. spin_unlock(&adapter->mbox_lock);
  448. return status;
  449. }
  450. static u32 be_encoded_q_len(int q_len)
  451. {
  452. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  453. if (len_encoded == 16)
  454. len_encoded = 0;
  455. return len_encoded;
  456. }
  457. int be_cmd_mccq_create(struct be_adapter *adapter,
  458. struct be_queue_info *mccq,
  459. struct be_queue_info *cq)
  460. {
  461. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  462. struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
  463. struct be_dma_mem *q_mem = &mccq->dma_mem;
  464. void *ctxt = &req->context;
  465. int status;
  466. spin_lock(&adapter->mbox_lock);
  467. memset(wrb, 0, sizeof(*wrb));
  468. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  469. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  470. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  471. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  472. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, adapter->pci_func);
  473. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  474. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  475. be_encoded_q_len(mccq->len));
  476. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  477. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  478. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  479. status = be_mbox_db_ring(adapter);
  480. if (!status) {
  481. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  482. mccq->id = le16_to_cpu(resp->id);
  483. mccq->created = true;
  484. }
  485. spin_unlock(&adapter->mbox_lock);
  486. return status;
  487. }
  488. int be_cmd_txq_create(struct be_adapter *adapter,
  489. struct be_queue_info *txq,
  490. struct be_queue_info *cq)
  491. {
  492. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  493. struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
  494. struct be_dma_mem *q_mem = &txq->dma_mem;
  495. void *ctxt = &req->context;
  496. int status;
  497. u32 len_encoded;
  498. spin_lock(&adapter->mbox_lock);
  499. memset(wrb, 0, sizeof(*wrb));
  500. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  501. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  502. sizeof(*req));
  503. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  504. req->ulp_num = BE_ULP1_NUM;
  505. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  506. len_encoded = fls(txq->len); /* log2(len) + 1 */
  507. if (len_encoded == 16)
  508. len_encoded = 0;
  509. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
  510. AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
  511. adapter->pci_func);
  512. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  513. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  514. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  515. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  516. status = be_mbox_db_ring(adapter);
  517. if (!status) {
  518. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  519. txq->id = le16_to_cpu(resp->cid);
  520. txq->created = true;
  521. }
  522. spin_unlock(&adapter->mbox_lock);
  523. return status;
  524. }
  525. int be_cmd_rxq_create(struct be_adapter *adapter,
  526. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  527. u16 max_frame_size, u32 if_id, u32 rss)
  528. {
  529. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  530. struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
  531. struct be_dma_mem *q_mem = &rxq->dma_mem;
  532. int status;
  533. spin_lock(&adapter->mbox_lock);
  534. memset(wrb, 0, sizeof(*wrb));
  535. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  536. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  537. sizeof(*req));
  538. req->cq_id = cpu_to_le16(cq_id);
  539. req->frag_size = fls(frag_size) - 1;
  540. req->num_pages = 2;
  541. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  542. req->interface_id = cpu_to_le32(if_id);
  543. req->max_frame_size = cpu_to_le16(max_frame_size);
  544. req->rss_queue = cpu_to_le32(rss);
  545. status = be_mbox_db_ring(adapter);
  546. if (!status) {
  547. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  548. rxq->id = le16_to_cpu(resp->id);
  549. rxq->created = true;
  550. }
  551. spin_unlock(&adapter->mbox_lock);
  552. return status;
  553. }
  554. /* Generic destroyer function for all types of queues */
  555. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  556. int queue_type)
  557. {
  558. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  559. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  560. u8 subsys = 0, opcode = 0;
  561. int status;
  562. spin_lock(&adapter->mbox_lock);
  563. memset(wrb, 0, sizeof(*wrb));
  564. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  565. switch (queue_type) {
  566. case QTYPE_EQ:
  567. subsys = CMD_SUBSYSTEM_COMMON;
  568. opcode = OPCODE_COMMON_EQ_DESTROY;
  569. break;
  570. case QTYPE_CQ:
  571. subsys = CMD_SUBSYSTEM_COMMON;
  572. opcode = OPCODE_COMMON_CQ_DESTROY;
  573. break;
  574. case QTYPE_TXQ:
  575. subsys = CMD_SUBSYSTEM_ETH;
  576. opcode = OPCODE_ETH_TX_DESTROY;
  577. break;
  578. case QTYPE_RXQ:
  579. subsys = CMD_SUBSYSTEM_ETH;
  580. opcode = OPCODE_ETH_RX_DESTROY;
  581. break;
  582. case QTYPE_MCCQ:
  583. subsys = CMD_SUBSYSTEM_COMMON;
  584. opcode = OPCODE_COMMON_MCC_DESTROY;
  585. break;
  586. default:
  587. printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
  588. status = -1;
  589. goto err;
  590. }
  591. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  592. req->id = cpu_to_le16(q->id);
  593. status = be_mbox_db_ring(adapter);
  594. err:
  595. spin_unlock(&adapter->mbox_lock);
  596. return status;
  597. }
  598. /* Create an rx filtering policy configuration on an i/f */
  599. int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
  600. bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
  601. {
  602. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  603. struct be_cmd_req_if_create *req = embedded_payload(wrb);
  604. int status;
  605. spin_lock(&adapter->mbox_lock);
  606. memset(wrb, 0, sizeof(*wrb));
  607. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  608. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  609. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  610. req->capability_flags = cpu_to_le32(flags);
  611. req->enable_flags = cpu_to_le32(flags);
  612. if (!pmac_invalid)
  613. memcpy(req->mac_addr, mac, ETH_ALEN);
  614. status = be_mbox_db_ring(adapter);
  615. if (!status) {
  616. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  617. *if_handle = le32_to_cpu(resp->interface_id);
  618. if (!pmac_invalid)
  619. *pmac_id = le32_to_cpu(resp->pmac_id);
  620. }
  621. spin_unlock(&adapter->mbox_lock);
  622. return status;
  623. }
  624. int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
  625. {
  626. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  627. struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
  628. int status;
  629. spin_lock(&adapter->mbox_lock);
  630. memset(wrb, 0, sizeof(*wrb));
  631. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  632. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  633. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  634. req->interface_id = cpu_to_le32(interface_id);
  635. status = be_mbox_db_ring(adapter);
  636. spin_unlock(&adapter->mbox_lock);
  637. return status;
  638. }
  639. /* Get stats is a non embedded command: the request is not embedded inside
  640. * WRB but is a separate dma memory block
  641. */
  642. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  643. {
  644. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  645. struct be_cmd_req_get_stats *req = nonemb_cmd->va;
  646. struct be_sge *sge = nonembedded_sgl(wrb);
  647. int status;
  648. spin_lock(&adapter->mbox_lock);
  649. memset(wrb, 0, sizeof(*wrb));
  650. memset(req, 0, sizeof(*req));
  651. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
  652. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  653. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  654. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  655. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  656. sge->len = cpu_to_le32(nonemb_cmd->size);
  657. status = be_mbox_db_ring(adapter);
  658. if (!status) {
  659. struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
  660. be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
  661. }
  662. spin_unlock(&adapter->mbox_lock);
  663. return status;
  664. }
  665. int be_cmd_link_status_query(struct be_adapter *adapter,
  666. bool *link_up)
  667. {
  668. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  669. struct be_cmd_req_link_status *req = embedded_payload(wrb);
  670. int status;
  671. spin_lock(&adapter->mbox_lock);
  672. *link_up = false;
  673. memset(wrb, 0, sizeof(*wrb));
  674. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  675. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  676. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  677. status = be_mbox_db_ring(adapter);
  678. if (!status) {
  679. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  680. if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
  681. *link_up = true;
  682. }
  683. spin_unlock(&adapter->mbox_lock);
  684. return status;
  685. }
  686. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
  687. {
  688. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  689. struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
  690. int status;
  691. spin_lock(&adapter->mbox_lock);
  692. memset(wrb, 0, sizeof(*wrb));
  693. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  694. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  695. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  696. status = be_mbox_db_ring(adapter);
  697. if (!status) {
  698. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  699. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  700. }
  701. spin_unlock(&adapter->mbox_lock);
  702. return status;
  703. }
  704. /* set the EQ delay interval of an EQ to specified value */
  705. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  706. {
  707. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  708. struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
  709. int status;
  710. spin_lock(&adapter->mbox_lock);
  711. memset(wrb, 0, sizeof(*wrb));
  712. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  713. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  714. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  715. req->num_eq = cpu_to_le32(1);
  716. req->delay[0].eq_id = cpu_to_le32(eq_id);
  717. req->delay[0].phase = 0;
  718. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  719. status = be_mbox_db_ring(adapter);
  720. spin_unlock(&adapter->mbox_lock);
  721. return status;
  722. }
  723. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  724. u32 num, bool untagged, bool promiscuous)
  725. {
  726. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  727. struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
  728. int status;
  729. spin_lock(&adapter->mbox_lock);
  730. memset(wrb, 0, sizeof(*wrb));
  731. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  732. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  733. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  734. req->interface_id = if_id;
  735. req->promiscuous = promiscuous;
  736. req->untagged = untagged;
  737. req->num_vlan = num;
  738. if (!promiscuous) {
  739. memcpy(req->normal_vlan, vtag_array,
  740. req->num_vlan * sizeof(vtag_array[0]));
  741. }
  742. status = be_mbox_db_ring(adapter);
  743. spin_unlock(&adapter->mbox_lock);
  744. return status;
  745. }
  746. /* Use MCC for this command as it may be called in BH context */
  747. int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  748. {
  749. struct be_mcc_wrb *wrb;
  750. struct be_cmd_req_promiscuous_config *req;
  751. spin_lock_bh(&adapter->mcc_lock);
  752. wrb = wrb_from_mcc(&adapter->mcc_obj.q);
  753. BUG_ON(!wrb);
  754. req = embedded_payload(wrb);
  755. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  756. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  757. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  758. if (port_num)
  759. req->port1_promiscuous = en;
  760. else
  761. req->port0_promiscuous = en;
  762. be_mcc_notify_wait(adapter);
  763. spin_unlock_bh(&adapter->mcc_lock);
  764. return 0;
  765. }
  766. /*
  767. * Use MCC for this command as it may be called in BH context
  768. * (mc == NULL) => multicast promiscous
  769. */
  770. int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
  771. struct dev_mc_list *mc_list, u32 mc_count)
  772. {
  773. #define BE_MAX_MC 32 /* set mcast promisc if > 32 */
  774. struct be_mcc_wrb *wrb;
  775. struct be_cmd_req_mcast_mac_config *req;
  776. spin_lock_bh(&adapter->mcc_lock);
  777. wrb = wrb_from_mcc(&adapter->mcc_obj.q);
  778. BUG_ON(!wrb);
  779. req = embedded_payload(wrb);
  780. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  781. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  782. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  783. req->interface_id = if_id;
  784. if (mc_list && mc_count <= BE_MAX_MC) {
  785. int i;
  786. struct dev_mc_list *mc;
  787. req->num_mac = cpu_to_le16(mc_count);
  788. for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
  789. memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
  790. } else {
  791. req->promiscuous = 1;
  792. }
  793. be_mcc_notify_wait(adapter);
  794. spin_unlock_bh(&adapter->mcc_lock);
  795. return 0;
  796. }
  797. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  798. {
  799. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  800. struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
  801. int status;
  802. spin_lock(&adapter->mbox_lock);
  803. memset(wrb, 0, sizeof(*wrb));
  804. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  805. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  806. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  807. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  808. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  809. status = be_mbox_db_ring(adapter);
  810. spin_unlock(&adapter->mbox_lock);
  811. return status;
  812. }
  813. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  814. {
  815. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  816. struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
  817. int status;
  818. spin_lock(&adapter->mbox_lock);
  819. memset(wrb, 0, sizeof(*wrb));
  820. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  821. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  822. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  823. status = be_mbox_db_ring(adapter);
  824. if (!status) {
  825. struct be_cmd_resp_get_flow_control *resp =
  826. embedded_payload(wrb);
  827. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  828. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  829. }
  830. spin_unlock(&adapter->mbox_lock);
  831. return status;
  832. }
  833. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
  834. {
  835. struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
  836. struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
  837. int status;
  838. spin_lock(&adapter->mbox_lock);
  839. memset(wrb, 0, sizeof(*wrb));
  840. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  841. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  842. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  843. status = be_mbox_db_ring(adapter);
  844. if (!status) {
  845. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  846. *port_num = le32_to_cpu(resp->phys_port);
  847. }
  848. spin_unlock(&adapter->mbox_lock);
  849. return status;
  850. }