be_cmds.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874
  1. /*
  2. * Copyright (C) 2005 - 2010 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. static void be_mcc_notify(struct be_adapter *adapter)
  20. {
  21. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  22. u32 val = 0;
  23. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  24. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  25. wmb();
  26. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  27. }
  28. /* To check if valid bit is set, check the entire word as we don't know
  29. * the endianness of the data (old entry is host endian while a new entry is
  30. * little endian) */
  31. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  32. {
  33. if (compl->flags != 0) {
  34. compl->flags = le32_to_cpu(compl->flags);
  35. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  36. return true;
  37. } else {
  38. return false;
  39. }
  40. }
  41. /* Need to reset the entire word that houses the valid bit */
  42. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  43. {
  44. compl->flags = 0;
  45. }
  46. static int be_mcc_compl_process(struct be_adapter *adapter,
  47. struct be_mcc_compl *compl)
  48. {
  49. u16 compl_status, extd_status;
  50. /* Just swap the status to host endian; mcc tag is opaquely copied
  51. * from mcc_wrb */
  52. be_dws_le_to_cpu(compl, 4);
  53. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  54. CQE_STATUS_COMPL_MASK;
  55. if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
  56. (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
  57. adapter->flash_status = compl_status;
  58. complete(&adapter->flash_compl);
  59. }
  60. if (compl_status == MCC_STATUS_SUCCESS) {
  61. if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
  62. struct be_cmd_resp_get_stats *resp =
  63. adapter->stats_cmd.va;
  64. be_dws_le_to_cpu(&resp->hw_stats,
  65. sizeof(resp->hw_stats));
  66. netdev_stats_update(adapter);
  67. adapter->stats_ioctl_sent = false;
  68. }
  69. } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
  70. (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
  71. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  72. CQE_STATUS_EXTD_MASK;
  73. dev_warn(&adapter->pdev->dev,
  74. "Error in cmd completion - opcode %d, compl %d, extd %d\n",
  75. compl->tag0, compl_status, extd_status);
  76. }
  77. return compl_status;
  78. }
  79. /* Link state evt is a string of bytes; no need for endian swapping */
  80. static void be_async_link_state_process(struct be_adapter *adapter,
  81. struct be_async_event_link_state *evt)
  82. {
  83. be_link_status_update(adapter,
  84. evt->port_link_status == ASYNC_EVENT_LINK_UP);
  85. }
  86. /* Grp5 CoS Priority evt */
  87. static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
  88. struct be_async_event_grp5_cos_priority *evt)
  89. {
  90. if (evt->valid) {
  91. adapter->vlan_prio_bmap = evt->available_priority_bmap;
  92. adapter->recommended_prio =
  93. evt->reco_default_priority << VLAN_PRIO_SHIFT;
  94. }
  95. }
  96. /* Grp5 QOS Speed evt */
  97. static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
  98. struct be_async_event_grp5_qos_link_speed *evt)
  99. {
  100. if (evt->physical_port == adapter->port_num) {
  101. /* qos_link_speed is in units of 10 Mbps */
  102. adapter->link_speed = evt->qos_link_speed * 10;
  103. }
  104. }
  105. static void be_async_grp5_evt_process(struct be_adapter *adapter,
  106. u32 trailer, struct be_mcc_compl *evt)
  107. {
  108. u8 event_type = 0;
  109. event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
  110. ASYNC_TRAILER_EVENT_TYPE_MASK;
  111. switch (event_type) {
  112. case ASYNC_EVENT_COS_PRIORITY:
  113. be_async_grp5_cos_priority_process(adapter,
  114. (struct be_async_event_grp5_cos_priority *)evt);
  115. break;
  116. case ASYNC_EVENT_QOS_SPEED:
  117. be_async_grp5_qos_speed_process(adapter,
  118. (struct be_async_event_grp5_qos_link_speed *)evt);
  119. break;
  120. default:
  121. dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
  122. break;
  123. }
  124. }
  125. static inline bool is_link_state_evt(u32 trailer)
  126. {
  127. return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  128. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  129. ASYNC_EVENT_CODE_LINK_STATE;
  130. }
  131. static inline bool is_grp5_evt(u32 trailer)
  132. {
  133. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  134. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  135. ASYNC_EVENT_CODE_GRP_5);
  136. }
  137. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  138. {
  139. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  140. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  141. if (be_mcc_compl_is_new(compl)) {
  142. queue_tail_inc(mcc_cq);
  143. return compl;
  144. }
  145. return NULL;
  146. }
  147. void be_async_mcc_enable(struct be_adapter *adapter)
  148. {
  149. spin_lock_bh(&adapter->mcc_cq_lock);
  150. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  151. adapter->mcc_obj.rearm_cq = true;
  152. spin_unlock_bh(&adapter->mcc_cq_lock);
  153. }
  154. void be_async_mcc_disable(struct be_adapter *adapter)
  155. {
  156. adapter->mcc_obj.rearm_cq = false;
  157. }
  158. int be_process_mcc(struct be_adapter *adapter, int *status)
  159. {
  160. struct be_mcc_compl *compl;
  161. int num = 0;
  162. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  163. spin_lock_bh(&adapter->mcc_cq_lock);
  164. while ((compl = be_mcc_compl_get(adapter))) {
  165. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  166. /* Interpret flags as an async trailer */
  167. if (is_link_state_evt(compl->flags))
  168. be_async_link_state_process(adapter,
  169. (struct be_async_event_link_state *) compl);
  170. else if (is_grp5_evt(compl->flags))
  171. be_async_grp5_evt_process(adapter,
  172. compl->flags, compl);
  173. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  174. *status = be_mcc_compl_process(adapter, compl);
  175. atomic_dec(&mcc_obj->q.used);
  176. }
  177. be_mcc_compl_use(compl);
  178. num++;
  179. }
  180. spin_unlock_bh(&adapter->mcc_cq_lock);
  181. return num;
  182. }
  183. /* Wait till no more pending mcc requests are present */
  184. static int be_mcc_wait_compl(struct be_adapter *adapter)
  185. {
  186. #define mcc_timeout 120000 /* 12s timeout */
  187. int i, num, status = 0;
  188. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  189. for (i = 0; i < mcc_timeout; i++) {
  190. num = be_process_mcc(adapter, &status);
  191. if (num)
  192. be_cq_notify(adapter, mcc_obj->cq.id,
  193. mcc_obj->rearm_cq, num);
  194. if (atomic_read(&mcc_obj->q.used) == 0)
  195. break;
  196. udelay(100);
  197. }
  198. if (i == mcc_timeout) {
  199. dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
  200. return -1;
  201. }
  202. return status;
  203. }
  204. /* Notify MCC requests and wait for completion */
  205. static int be_mcc_notify_wait(struct be_adapter *adapter)
  206. {
  207. be_mcc_notify(adapter);
  208. return be_mcc_wait_compl(adapter);
  209. }
  210. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  211. {
  212. int msecs = 0;
  213. u32 ready;
  214. do {
  215. ready = ioread32(db);
  216. if (ready == 0xffffffff) {
  217. dev_err(&adapter->pdev->dev,
  218. "pci slot disconnected\n");
  219. return -1;
  220. }
  221. ready &= MPU_MAILBOX_DB_RDY_MASK;
  222. if (ready)
  223. break;
  224. if (msecs > 4000) {
  225. dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
  226. be_detect_dump_ue(adapter);
  227. return -1;
  228. }
  229. set_current_state(TASK_INTERRUPTIBLE);
  230. schedule_timeout(msecs_to_jiffies(1));
  231. msecs++;
  232. } while (true);
  233. return 0;
  234. }
  235. /*
  236. * Insert the mailbox address into the doorbell in two steps
  237. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  238. */
  239. static int be_mbox_notify_wait(struct be_adapter *adapter)
  240. {
  241. int status;
  242. u32 val = 0;
  243. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  244. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  245. struct be_mcc_mailbox *mbox = mbox_mem->va;
  246. struct be_mcc_compl *compl = &mbox->compl;
  247. /* wait for ready to be set */
  248. status = be_mbox_db_ready_wait(adapter, db);
  249. if (status != 0)
  250. return status;
  251. val |= MPU_MAILBOX_DB_HI_MASK;
  252. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  253. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  254. iowrite32(val, db);
  255. /* wait for ready to be set */
  256. status = be_mbox_db_ready_wait(adapter, db);
  257. if (status != 0)
  258. return status;
  259. val = 0;
  260. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  261. val |= (u32)(mbox_mem->dma >> 4) << 2;
  262. iowrite32(val, db);
  263. status = be_mbox_db_ready_wait(adapter, db);
  264. if (status != 0)
  265. return status;
  266. /* A cq entry has been made now */
  267. if (be_mcc_compl_is_new(compl)) {
  268. status = be_mcc_compl_process(adapter, &mbox->compl);
  269. be_mcc_compl_use(compl);
  270. if (status)
  271. return status;
  272. } else {
  273. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  274. return -1;
  275. }
  276. return 0;
  277. }
  278. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  279. {
  280. u32 sem;
  281. if (lancer_chip(adapter))
  282. sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
  283. else
  284. sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  285. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  286. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  287. return -1;
  288. else
  289. return 0;
  290. }
  291. int be_cmd_POST(struct be_adapter *adapter)
  292. {
  293. u16 stage;
  294. int status, timeout = 0;
  295. do {
  296. status = be_POST_stage_get(adapter, &stage);
  297. if (status) {
  298. dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
  299. stage);
  300. return -1;
  301. } else if (stage != POST_STAGE_ARMFW_RDY) {
  302. set_current_state(TASK_INTERRUPTIBLE);
  303. schedule_timeout(2 * HZ);
  304. timeout += 2;
  305. } else {
  306. return 0;
  307. }
  308. } while (timeout < 40);
  309. dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
  310. return -1;
  311. }
  312. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  313. {
  314. return wrb->payload.embedded_payload;
  315. }
  316. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  317. {
  318. return &wrb->payload.sgl[0];
  319. }
  320. /* Don't touch the hdr after it's prepared */
  321. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  322. bool embedded, u8 sge_cnt, u32 opcode)
  323. {
  324. if (embedded)
  325. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  326. else
  327. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  328. MCC_WRB_SGE_CNT_SHIFT;
  329. wrb->payload_length = payload_len;
  330. wrb->tag0 = opcode;
  331. be_dws_cpu_to_le(wrb, 8);
  332. }
  333. /* Don't touch the hdr after it's prepared */
  334. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  335. u8 subsystem, u8 opcode, int cmd_len)
  336. {
  337. req_hdr->opcode = opcode;
  338. req_hdr->subsystem = subsystem;
  339. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  340. req_hdr->version = 0;
  341. }
  342. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  343. struct be_dma_mem *mem)
  344. {
  345. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  346. u64 dma = (u64)mem->dma;
  347. for (i = 0; i < buf_pages; i++) {
  348. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  349. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  350. dma += PAGE_SIZE_4K;
  351. }
  352. }
  353. /* Converts interrupt delay in microseconds to multiplier value */
  354. static u32 eq_delay_to_mult(u32 usec_delay)
  355. {
  356. #define MAX_INTR_RATE 651042
  357. const u32 round = 10;
  358. u32 multiplier;
  359. if (usec_delay == 0)
  360. multiplier = 0;
  361. else {
  362. u32 interrupt_rate = 1000000 / usec_delay;
  363. /* Max delay, corresponding to the lowest interrupt rate */
  364. if (interrupt_rate == 0)
  365. multiplier = 1023;
  366. else {
  367. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  368. multiplier /= interrupt_rate;
  369. /* Round the multiplier to the closest value.*/
  370. multiplier = (multiplier + round/2) / round;
  371. multiplier = min(multiplier, (u32)1023);
  372. }
  373. }
  374. return multiplier;
  375. }
  376. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  377. {
  378. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  379. struct be_mcc_wrb *wrb
  380. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  381. memset(wrb, 0, sizeof(*wrb));
  382. return wrb;
  383. }
  384. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  385. {
  386. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  387. struct be_mcc_wrb *wrb;
  388. if (atomic_read(&mccq->used) >= mccq->len) {
  389. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  390. return NULL;
  391. }
  392. wrb = queue_head_node(mccq);
  393. queue_head_inc(mccq);
  394. atomic_inc(&mccq->used);
  395. memset(wrb, 0, sizeof(*wrb));
  396. return wrb;
  397. }
  398. /* Tell fw we're about to start firing cmds by writing a
  399. * special pattern across the wrb hdr; uses mbox
  400. */
  401. int be_cmd_fw_init(struct be_adapter *adapter)
  402. {
  403. u8 *wrb;
  404. int status;
  405. if (mutex_lock_interruptible(&adapter->mbox_lock))
  406. return -1;
  407. wrb = (u8 *)wrb_from_mbox(adapter);
  408. *wrb++ = 0xFF;
  409. *wrb++ = 0x12;
  410. *wrb++ = 0x34;
  411. *wrb++ = 0xFF;
  412. *wrb++ = 0xFF;
  413. *wrb++ = 0x56;
  414. *wrb++ = 0x78;
  415. *wrb = 0xFF;
  416. status = be_mbox_notify_wait(adapter);
  417. mutex_unlock(&adapter->mbox_lock);
  418. return status;
  419. }
  420. /* Tell fw we're done with firing cmds by writing a
  421. * special pattern across the wrb hdr; uses mbox
  422. */
  423. int be_cmd_fw_clean(struct be_adapter *adapter)
  424. {
  425. u8 *wrb;
  426. int status;
  427. if (adapter->eeh_err)
  428. return -EIO;
  429. if (mutex_lock_interruptible(&adapter->mbox_lock))
  430. return -1;
  431. wrb = (u8 *)wrb_from_mbox(adapter);
  432. *wrb++ = 0xFF;
  433. *wrb++ = 0xAA;
  434. *wrb++ = 0xBB;
  435. *wrb++ = 0xFF;
  436. *wrb++ = 0xFF;
  437. *wrb++ = 0xCC;
  438. *wrb++ = 0xDD;
  439. *wrb = 0xFF;
  440. status = be_mbox_notify_wait(adapter);
  441. mutex_unlock(&adapter->mbox_lock);
  442. return status;
  443. }
  444. int be_cmd_eq_create(struct be_adapter *adapter,
  445. struct be_queue_info *eq, int eq_delay)
  446. {
  447. struct be_mcc_wrb *wrb;
  448. struct be_cmd_req_eq_create *req;
  449. struct be_dma_mem *q_mem = &eq->dma_mem;
  450. int status;
  451. if (mutex_lock_interruptible(&adapter->mbox_lock))
  452. return -1;
  453. wrb = wrb_from_mbox(adapter);
  454. req = embedded_payload(wrb);
  455. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
  456. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  457. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  458. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  459. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  460. /* 4byte eqe*/
  461. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  462. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  463. __ilog2_u32(eq->len/256));
  464. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  465. eq_delay_to_mult(eq_delay));
  466. be_dws_cpu_to_le(req->context, sizeof(req->context));
  467. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  468. status = be_mbox_notify_wait(adapter);
  469. if (!status) {
  470. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  471. eq->id = le16_to_cpu(resp->eq_id);
  472. eq->created = true;
  473. }
  474. mutex_unlock(&adapter->mbox_lock);
  475. return status;
  476. }
  477. /* Uses mbox */
  478. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  479. u8 type, bool permanent, u32 if_handle)
  480. {
  481. struct be_mcc_wrb *wrb;
  482. struct be_cmd_req_mac_query *req;
  483. int status;
  484. if (mutex_lock_interruptible(&adapter->mbox_lock))
  485. return -1;
  486. wrb = wrb_from_mbox(adapter);
  487. req = embedded_payload(wrb);
  488. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  489. OPCODE_COMMON_NTWK_MAC_QUERY);
  490. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  491. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  492. req->type = type;
  493. if (permanent) {
  494. req->permanent = 1;
  495. } else {
  496. req->if_id = cpu_to_le16((u16) if_handle);
  497. req->permanent = 0;
  498. }
  499. status = be_mbox_notify_wait(adapter);
  500. if (!status) {
  501. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  502. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  503. }
  504. mutex_unlock(&adapter->mbox_lock);
  505. return status;
  506. }
  507. /* Uses synchronous MCCQ */
  508. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  509. u32 if_id, u32 *pmac_id)
  510. {
  511. struct be_mcc_wrb *wrb;
  512. struct be_cmd_req_pmac_add *req;
  513. int status;
  514. spin_lock_bh(&adapter->mcc_lock);
  515. wrb = wrb_from_mccq(adapter);
  516. if (!wrb) {
  517. status = -EBUSY;
  518. goto err;
  519. }
  520. req = embedded_payload(wrb);
  521. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  522. OPCODE_COMMON_NTWK_PMAC_ADD);
  523. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  524. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  525. req->if_id = cpu_to_le32(if_id);
  526. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  527. status = be_mcc_notify_wait(adapter);
  528. if (!status) {
  529. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  530. *pmac_id = le32_to_cpu(resp->pmac_id);
  531. }
  532. err:
  533. spin_unlock_bh(&adapter->mcc_lock);
  534. return status;
  535. }
  536. /* Uses synchronous MCCQ */
  537. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
  538. {
  539. struct be_mcc_wrb *wrb;
  540. struct be_cmd_req_pmac_del *req;
  541. int status;
  542. spin_lock_bh(&adapter->mcc_lock);
  543. wrb = wrb_from_mccq(adapter);
  544. if (!wrb) {
  545. status = -EBUSY;
  546. goto err;
  547. }
  548. req = embedded_payload(wrb);
  549. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  550. OPCODE_COMMON_NTWK_PMAC_DEL);
  551. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  552. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  553. req->if_id = cpu_to_le32(if_id);
  554. req->pmac_id = cpu_to_le32(pmac_id);
  555. status = be_mcc_notify_wait(adapter);
  556. err:
  557. spin_unlock_bh(&adapter->mcc_lock);
  558. return status;
  559. }
  560. /* Uses Mbox */
  561. int be_cmd_cq_create(struct be_adapter *adapter,
  562. struct be_queue_info *cq, struct be_queue_info *eq,
  563. bool sol_evts, bool no_delay, int coalesce_wm)
  564. {
  565. struct be_mcc_wrb *wrb;
  566. struct be_cmd_req_cq_create *req;
  567. struct be_dma_mem *q_mem = &cq->dma_mem;
  568. void *ctxt;
  569. int status;
  570. if (mutex_lock_interruptible(&adapter->mbox_lock))
  571. return -1;
  572. wrb = wrb_from_mbox(adapter);
  573. req = embedded_payload(wrb);
  574. ctxt = &req->context;
  575. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  576. OPCODE_COMMON_CQ_CREATE);
  577. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  578. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  579. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  580. if (lancer_chip(adapter)) {
  581. req->hdr.version = 1;
  582. req->page_size = 1; /* 1 for 4K */
  583. AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
  584. coalesce_wm);
  585. AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
  586. no_delay);
  587. AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
  588. __ilog2_u32(cq->len/256));
  589. AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
  590. AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
  591. ctxt, 1);
  592. AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
  593. ctxt, eq->id);
  594. AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
  595. } else {
  596. AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
  597. coalesce_wm);
  598. AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
  599. ctxt, no_delay);
  600. AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
  601. __ilog2_u32(cq->len/256));
  602. AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
  603. AMAP_SET_BITS(struct amap_cq_context_be, solevent,
  604. ctxt, sol_evts);
  605. AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
  606. AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
  607. AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
  608. }
  609. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  610. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  611. status = be_mbox_notify_wait(adapter);
  612. if (!status) {
  613. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  614. cq->id = le16_to_cpu(resp->cq_id);
  615. cq->created = true;
  616. }
  617. mutex_unlock(&adapter->mbox_lock);
  618. return status;
  619. }
  620. static u32 be_encoded_q_len(int q_len)
  621. {
  622. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  623. if (len_encoded == 16)
  624. len_encoded = 0;
  625. return len_encoded;
  626. }
  627. int be_cmd_mccq_create(struct be_adapter *adapter,
  628. struct be_queue_info *mccq,
  629. struct be_queue_info *cq)
  630. {
  631. struct be_mcc_wrb *wrb;
  632. struct be_cmd_req_mcc_create *req;
  633. struct be_dma_mem *q_mem = &mccq->dma_mem;
  634. void *ctxt;
  635. int status;
  636. if (mutex_lock_interruptible(&adapter->mbox_lock))
  637. return -1;
  638. wrb = wrb_from_mbox(adapter);
  639. req = embedded_payload(wrb);
  640. ctxt = &req->context;
  641. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  642. OPCODE_COMMON_MCC_CREATE_EXT);
  643. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  644. OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
  645. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  646. if (lancer_chip(adapter)) {
  647. req->hdr.version = 1;
  648. req->cq_id = cpu_to_le16(cq->id);
  649. AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
  650. be_encoded_q_len(mccq->len));
  651. AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
  652. AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
  653. ctxt, cq->id);
  654. AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
  655. ctxt, 1);
  656. } else {
  657. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  658. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  659. be_encoded_q_len(mccq->len));
  660. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  661. }
  662. /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
  663. req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
  664. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  665. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  666. status = be_mbox_notify_wait(adapter);
  667. if (!status) {
  668. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  669. mccq->id = le16_to_cpu(resp->id);
  670. mccq->created = true;
  671. }
  672. mutex_unlock(&adapter->mbox_lock);
  673. return status;
  674. }
  675. int be_cmd_txq_create(struct be_adapter *adapter,
  676. struct be_queue_info *txq,
  677. struct be_queue_info *cq)
  678. {
  679. struct be_mcc_wrb *wrb;
  680. struct be_cmd_req_eth_tx_create *req;
  681. struct be_dma_mem *q_mem = &txq->dma_mem;
  682. void *ctxt;
  683. int status;
  684. if (mutex_lock_interruptible(&adapter->mbox_lock))
  685. return -1;
  686. wrb = wrb_from_mbox(adapter);
  687. req = embedded_payload(wrb);
  688. ctxt = &req->context;
  689. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  690. OPCODE_ETH_TX_CREATE);
  691. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  692. sizeof(*req));
  693. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  694. req->ulp_num = BE_ULP1_NUM;
  695. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  696. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  697. be_encoded_q_len(txq->len));
  698. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  699. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  700. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  701. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  702. status = be_mbox_notify_wait(adapter);
  703. if (!status) {
  704. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  705. txq->id = le16_to_cpu(resp->cid);
  706. txq->created = true;
  707. }
  708. mutex_unlock(&adapter->mbox_lock);
  709. return status;
  710. }
  711. /* Uses mbox */
  712. int be_cmd_rxq_create(struct be_adapter *adapter,
  713. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  714. u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
  715. {
  716. struct be_mcc_wrb *wrb;
  717. struct be_cmd_req_eth_rx_create *req;
  718. struct be_dma_mem *q_mem = &rxq->dma_mem;
  719. int status;
  720. if (mutex_lock_interruptible(&adapter->mbox_lock))
  721. return -1;
  722. wrb = wrb_from_mbox(adapter);
  723. req = embedded_payload(wrb);
  724. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  725. OPCODE_ETH_RX_CREATE);
  726. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  727. sizeof(*req));
  728. req->cq_id = cpu_to_le16(cq_id);
  729. req->frag_size = fls(frag_size) - 1;
  730. req->num_pages = 2;
  731. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  732. req->interface_id = cpu_to_le32(if_id);
  733. req->max_frame_size = cpu_to_le16(max_frame_size);
  734. req->rss_queue = cpu_to_le32(rss);
  735. status = be_mbox_notify_wait(adapter);
  736. if (!status) {
  737. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  738. rxq->id = le16_to_cpu(resp->id);
  739. rxq->created = true;
  740. *rss_id = resp->rss_id;
  741. }
  742. mutex_unlock(&adapter->mbox_lock);
  743. return status;
  744. }
  745. /* Generic destroyer function for all types of queues
  746. * Uses Mbox
  747. */
  748. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  749. int queue_type)
  750. {
  751. struct be_mcc_wrb *wrb;
  752. struct be_cmd_req_q_destroy *req;
  753. u8 subsys = 0, opcode = 0;
  754. int status;
  755. if (adapter->eeh_err)
  756. return -EIO;
  757. if (mutex_lock_interruptible(&adapter->mbox_lock))
  758. return -1;
  759. wrb = wrb_from_mbox(adapter);
  760. req = embedded_payload(wrb);
  761. switch (queue_type) {
  762. case QTYPE_EQ:
  763. subsys = CMD_SUBSYSTEM_COMMON;
  764. opcode = OPCODE_COMMON_EQ_DESTROY;
  765. break;
  766. case QTYPE_CQ:
  767. subsys = CMD_SUBSYSTEM_COMMON;
  768. opcode = OPCODE_COMMON_CQ_DESTROY;
  769. break;
  770. case QTYPE_TXQ:
  771. subsys = CMD_SUBSYSTEM_ETH;
  772. opcode = OPCODE_ETH_TX_DESTROY;
  773. break;
  774. case QTYPE_RXQ:
  775. subsys = CMD_SUBSYSTEM_ETH;
  776. opcode = OPCODE_ETH_RX_DESTROY;
  777. break;
  778. case QTYPE_MCCQ:
  779. subsys = CMD_SUBSYSTEM_COMMON;
  780. opcode = OPCODE_COMMON_MCC_DESTROY;
  781. break;
  782. default:
  783. BUG();
  784. }
  785. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
  786. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  787. req->id = cpu_to_le16(q->id);
  788. status = be_mbox_notify_wait(adapter);
  789. mutex_unlock(&adapter->mbox_lock);
  790. return status;
  791. }
  792. /* Create an rx filtering policy configuration on an i/f
  793. * Uses mbox
  794. */
  795. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  796. u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
  797. u32 domain)
  798. {
  799. struct be_mcc_wrb *wrb;
  800. struct be_cmd_req_if_create *req;
  801. int status;
  802. if (mutex_lock_interruptible(&adapter->mbox_lock))
  803. return -1;
  804. wrb = wrb_from_mbox(adapter);
  805. req = embedded_payload(wrb);
  806. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  807. OPCODE_COMMON_NTWK_INTERFACE_CREATE);
  808. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  809. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  810. req->hdr.domain = domain;
  811. req->capability_flags = cpu_to_le32(cap_flags);
  812. req->enable_flags = cpu_to_le32(en_flags);
  813. req->pmac_invalid = pmac_invalid;
  814. if (!pmac_invalid)
  815. memcpy(req->mac_addr, mac, ETH_ALEN);
  816. status = be_mbox_notify_wait(adapter);
  817. if (!status) {
  818. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  819. *if_handle = le32_to_cpu(resp->interface_id);
  820. if (!pmac_invalid)
  821. *pmac_id = le32_to_cpu(resp->pmac_id);
  822. }
  823. mutex_unlock(&adapter->mbox_lock);
  824. return status;
  825. }
  826. /* Uses mbox */
  827. int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
  828. {
  829. struct be_mcc_wrb *wrb;
  830. struct be_cmd_req_if_destroy *req;
  831. int status;
  832. if (adapter->eeh_err)
  833. return -EIO;
  834. if (mutex_lock_interruptible(&adapter->mbox_lock))
  835. return -1;
  836. wrb = wrb_from_mbox(adapter);
  837. req = embedded_payload(wrb);
  838. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  839. OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
  840. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  841. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  842. req->interface_id = cpu_to_le32(interface_id);
  843. status = be_mbox_notify_wait(adapter);
  844. mutex_unlock(&adapter->mbox_lock);
  845. return status;
  846. }
  847. /* Get stats is a non embedded command: the request is not embedded inside
  848. * WRB but is a separate dma memory block
  849. * Uses asynchronous MCC
  850. */
  851. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  852. {
  853. struct be_mcc_wrb *wrb;
  854. struct be_cmd_req_get_stats *req;
  855. struct be_sge *sge;
  856. int status = 0;
  857. spin_lock_bh(&adapter->mcc_lock);
  858. wrb = wrb_from_mccq(adapter);
  859. if (!wrb) {
  860. status = -EBUSY;
  861. goto err;
  862. }
  863. req = nonemb_cmd->va;
  864. sge = nonembedded_sgl(wrb);
  865. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  866. OPCODE_ETH_GET_STATISTICS);
  867. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  868. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  869. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  870. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  871. sge->len = cpu_to_le32(nonemb_cmd->size);
  872. be_mcc_notify(adapter);
  873. adapter->stats_ioctl_sent = true;
  874. err:
  875. spin_unlock_bh(&adapter->mcc_lock);
  876. return status;
  877. }
  878. /* Uses synchronous mcc */
  879. int be_cmd_link_status_query(struct be_adapter *adapter,
  880. bool *link_up, u8 *mac_speed, u16 *link_speed)
  881. {
  882. struct be_mcc_wrb *wrb;
  883. struct be_cmd_req_link_status *req;
  884. int status;
  885. spin_lock_bh(&adapter->mcc_lock);
  886. wrb = wrb_from_mccq(adapter);
  887. if (!wrb) {
  888. status = -EBUSY;
  889. goto err;
  890. }
  891. req = embedded_payload(wrb);
  892. *link_up = false;
  893. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  894. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
  895. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  896. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  897. status = be_mcc_notify_wait(adapter);
  898. if (!status) {
  899. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  900. if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
  901. *link_up = true;
  902. *link_speed = le16_to_cpu(resp->link_speed);
  903. *mac_speed = resp->mac_speed;
  904. }
  905. }
  906. err:
  907. spin_unlock_bh(&adapter->mcc_lock);
  908. return status;
  909. }
  910. /* Uses Mbox */
  911. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
  912. {
  913. struct be_mcc_wrb *wrb;
  914. struct be_cmd_req_get_fw_version *req;
  915. int status;
  916. if (mutex_lock_interruptible(&adapter->mbox_lock))
  917. return -1;
  918. wrb = wrb_from_mbox(adapter);
  919. req = embedded_payload(wrb);
  920. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  921. OPCODE_COMMON_GET_FW_VERSION);
  922. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  923. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  924. status = be_mbox_notify_wait(adapter);
  925. if (!status) {
  926. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  927. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  928. }
  929. mutex_unlock(&adapter->mbox_lock);
  930. return status;
  931. }
  932. /* set the EQ delay interval of an EQ to specified value
  933. * Uses async mcc
  934. */
  935. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  936. {
  937. struct be_mcc_wrb *wrb;
  938. struct be_cmd_req_modify_eq_delay *req;
  939. int status = 0;
  940. spin_lock_bh(&adapter->mcc_lock);
  941. wrb = wrb_from_mccq(adapter);
  942. if (!wrb) {
  943. status = -EBUSY;
  944. goto err;
  945. }
  946. req = embedded_payload(wrb);
  947. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  948. OPCODE_COMMON_MODIFY_EQ_DELAY);
  949. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  950. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  951. req->num_eq = cpu_to_le32(1);
  952. req->delay[0].eq_id = cpu_to_le32(eq_id);
  953. req->delay[0].phase = 0;
  954. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  955. be_mcc_notify(adapter);
  956. err:
  957. spin_unlock_bh(&adapter->mcc_lock);
  958. return status;
  959. }
  960. /* Uses sycnhronous mcc */
  961. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  962. u32 num, bool untagged, bool promiscuous)
  963. {
  964. struct be_mcc_wrb *wrb;
  965. struct be_cmd_req_vlan_config *req;
  966. int status;
  967. spin_lock_bh(&adapter->mcc_lock);
  968. wrb = wrb_from_mccq(adapter);
  969. if (!wrb) {
  970. status = -EBUSY;
  971. goto err;
  972. }
  973. req = embedded_payload(wrb);
  974. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  975. OPCODE_COMMON_NTWK_VLAN_CONFIG);
  976. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  977. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  978. req->interface_id = if_id;
  979. req->promiscuous = promiscuous;
  980. req->untagged = untagged;
  981. req->num_vlan = num;
  982. if (!promiscuous) {
  983. memcpy(req->normal_vlan, vtag_array,
  984. req->num_vlan * sizeof(vtag_array[0]));
  985. }
  986. status = be_mcc_notify_wait(adapter);
  987. err:
  988. spin_unlock_bh(&adapter->mcc_lock);
  989. return status;
  990. }
  991. /* Uses MCC for this command as it may be called in BH context
  992. * Uses synchronous mcc
  993. */
  994. int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  995. {
  996. struct be_mcc_wrb *wrb;
  997. struct be_cmd_req_promiscuous_config *req;
  998. int status;
  999. spin_lock_bh(&adapter->mcc_lock);
  1000. wrb = wrb_from_mccq(adapter);
  1001. if (!wrb) {
  1002. status = -EBUSY;
  1003. goto err;
  1004. }
  1005. req = embedded_payload(wrb);
  1006. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
  1007. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1008. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  1009. /* In FW versions X.102.149/X.101.487 and later,
  1010. * the port setting associated only with the
  1011. * issuing pci function will take effect
  1012. */
  1013. if (port_num)
  1014. req->port1_promiscuous = en;
  1015. else
  1016. req->port0_promiscuous = en;
  1017. status = be_mcc_notify_wait(adapter);
  1018. err:
  1019. spin_unlock_bh(&adapter->mcc_lock);
  1020. return status;
  1021. }
  1022. /*
  1023. * Uses MCC for this command as it may be called in BH context
  1024. * (mc == NULL) => multicast promiscous
  1025. */
  1026. int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
  1027. struct net_device *netdev, struct be_dma_mem *mem)
  1028. {
  1029. struct be_mcc_wrb *wrb;
  1030. struct be_cmd_req_mcast_mac_config *req = mem->va;
  1031. struct be_sge *sge;
  1032. int status;
  1033. spin_lock_bh(&adapter->mcc_lock);
  1034. wrb = wrb_from_mccq(adapter);
  1035. if (!wrb) {
  1036. status = -EBUSY;
  1037. goto err;
  1038. }
  1039. sge = nonembedded_sgl(wrb);
  1040. memset(req, 0, sizeof(*req));
  1041. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1042. OPCODE_COMMON_NTWK_MULTICAST_SET);
  1043. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  1044. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  1045. sge->len = cpu_to_le32(mem->size);
  1046. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1047. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  1048. req->interface_id = if_id;
  1049. if (netdev) {
  1050. int i;
  1051. struct netdev_hw_addr *ha;
  1052. req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
  1053. i = 0;
  1054. netdev_for_each_mc_addr(ha, netdev)
  1055. memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
  1056. } else {
  1057. req->promiscuous = 1;
  1058. }
  1059. status = be_mcc_notify_wait(adapter);
  1060. err:
  1061. spin_unlock_bh(&adapter->mcc_lock);
  1062. return status;
  1063. }
  1064. /* Uses synchrounous mcc */
  1065. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  1066. {
  1067. struct be_mcc_wrb *wrb;
  1068. struct be_cmd_req_set_flow_control *req;
  1069. int status;
  1070. spin_lock_bh(&adapter->mcc_lock);
  1071. wrb = wrb_from_mccq(adapter);
  1072. if (!wrb) {
  1073. status = -EBUSY;
  1074. goto err;
  1075. }
  1076. req = embedded_payload(wrb);
  1077. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1078. OPCODE_COMMON_SET_FLOW_CONTROL);
  1079. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1080. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  1081. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  1082. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  1083. status = be_mcc_notify_wait(adapter);
  1084. err:
  1085. spin_unlock_bh(&adapter->mcc_lock);
  1086. return status;
  1087. }
  1088. /* Uses sycn mcc */
  1089. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  1090. {
  1091. struct be_mcc_wrb *wrb;
  1092. struct be_cmd_req_get_flow_control *req;
  1093. int status;
  1094. spin_lock_bh(&adapter->mcc_lock);
  1095. wrb = wrb_from_mccq(adapter);
  1096. if (!wrb) {
  1097. status = -EBUSY;
  1098. goto err;
  1099. }
  1100. req = embedded_payload(wrb);
  1101. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1102. OPCODE_COMMON_GET_FLOW_CONTROL);
  1103. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1104. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  1105. status = be_mcc_notify_wait(adapter);
  1106. if (!status) {
  1107. struct be_cmd_resp_get_flow_control *resp =
  1108. embedded_payload(wrb);
  1109. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1110. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1111. }
  1112. err:
  1113. spin_unlock_bh(&adapter->mcc_lock);
  1114. return status;
  1115. }
  1116. /* Uses mbox */
  1117. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
  1118. u32 *mode, u32 *caps)
  1119. {
  1120. struct be_mcc_wrb *wrb;
  1121. struct be_cmd_req_query_fw_cfg *req;
  1122. int status;
  1123. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1124. return -1;
  1125. wrb = wrb_from_mbox(adapter);
  1126. req = embedded_payload(wrb);
  1127. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1128. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
  1129. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1130. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  1131. status = be_mbox_notify_wait(adapter);
  1132. if (!status) {
  1133. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1134. *port_num = le32_to_cpu(resp->phys_port);
  1135. *mode = le32_to_cpu(resp->function_mode);
  1136. *caps = le32_to_cpu(resp->function_caps);
  1137. }
  1138. mutex_unlock(&adapter->mbox_lock);
  1139. return status;
  1140. }
  1141. /* Uses mbox */
  1142. int be_cmd_reset_function(struct be_adapter *adapter)
  1143. {
  1144. struct be_mcc_wrb *wrb;
  1145. struct be_cmd_req_hdr *req;
  1146. int status;
  1147. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1148. return -1;
  1149. wrb = wrb_from_mbox(adapter);
  1150. req = embedded_payload(wrb);
  1151. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1152. OPCODE_COMMON_FUNCTION_RESET);
  1153. be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1154. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1155. status = be_mbox_notify_wait(adapter);
  1156. mutex_unlock(&adapter->mbox_lock);
  1157. return status;
  1158. }
  1159. int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
  1160. {
  1161. struct be_mcc_wrb *wrb;
  1162. struct be_cmd_req_rss_config *req;
  1163. u32 myhash[10];
  1164. int status;
  1165. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1166. return -1;
  1167. wrb = wrb_from_mbox(adapter);
  1168. req = embedded_payload(wrb);
  1169. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1170. OPCODE_ETH_RSS_CONFIG);
  1171. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1172. OPCODE_ETH_RSS_CONFIG, sizeof(*req));
  1173. req->if_id = cpu_to_le32(adapter->if_handle);
  1174. req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
  1175. req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  1176. memcpy(req->cpu_table, rsstable, table_size);
  1177. memcpy(req->hash, myhash, sizeof(myhash));
  1178. be_dws_cpu_to_le(req->hash, sizeof(req->hash));
  1179. status = be_mbox_notify_wait(adapter);
  1180. mutex_unlock(&adapter->mbox_lock);
  1181. return status;
  1182. }
  1183. /* Uses sync mcc */
  1184. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1185. u8 bcn, u8 sts, u8 state)
  1186. {
  1187. struct be_mcc_wrb *wrb;
  1188. struct be_cmd_req_enable_disable_beacon *req;
  1189. int status;
  1190. spin_lock_bh(&adapter->mcc_lock);
  1191. wrb = wrb_from_mccq(adapter);
  1192. if (!wrb) {
  1193. status = -EBUSY;
  1194. goto err;
  1195. }
  1196. req = embedded_payload(wrb);
  1197. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1198. OPCODE_COMMON_ENABLE_DISABLE_BEACON);
  1199. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1200. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
  1201. req->port_num = port_num;
  1202. req->beacon_state = state;
  1203. req->beacon_duration = bcn;
  1204. req->status_duration = sts;
  1205. status = be_mcc_notify_wait(adapter);
  1206. err:
  1207. spin_unlock_bh(&adapter->mcc_lock);
  1208. return status;
  1209. }
  1210. /* Uses sync mcc */
  1211. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1212. {
  1213. struct be_mcc_wrb *wrb;
  1214. struct be_cmd_req_get_beacon_state *req;
  1215. int status;
  1216. spin_lock_bh(&adapter->mcc_lock);
  1217. wrb = wrb_from_mccq(adapter);
  1218. if (!wrb) {
  1219. status = -EBUSY;
  1220. goto err;
  1221. }
  1222. req = embedded_payload(wrb);
  1223. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1224. OPCODE_COMMON_GET_BEACON_STATE);
  1225. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1226. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
  1227. req->port_num = port_num;
  1228. status = be_mcc_notify_wait(adapter);
  1229. if (!status) {
  1230. struct be_cmd_resp_get_beacon_state *resp =
  1231. embedded_payload(wrb);
  1232. *state = resp->beacon_state;
  1233. }
  1234. err:
  1235. spin_unlock_bh(&adapter->mcc_lock);
  1236. return status;
  1237. }
  1238. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1239. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1240. {
  1241. struct be_mcc_wrb *wrb;
  1242. struct be_cmd_write_flashrom *req;
  1243. struct be_sge *sge;
  1244. int status;
  1245. spin_lock_bh(&adapter->mcc_lock);
  1246. adapter->flash_status = 0;
  1247. wrb = wrb_from_mccq(adapter);
  1248. if (!wrb) {
  1249. status = -EBUSY;
  1250. goto err_unlock;
  1251. }
  1252. req = cmd->va;
  1253. sge = nonembedded_sgl(wrb);
  1254. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1255. OPCODE_COMMON_WRITE_FLASHROM);
  1256. wrb->tag1 = CMD_SUBSYSTEM_COMMON;
  1257. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1258. OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
  1259. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1260. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1261. sge->len = cpu_to_le32(cmd->size);
  1262. req->params.op_type = cpu_to_le32(flash_type);
  1263. req->params.op_code = cpu_to_le32(flash_opcode);
  1264. req->params.data_buf_size = cpu_to_le32(buf_size);
  1265. be_mcc_notify(adapter);
  1266. spin_unlock_bh(&adapter->mcc_lock);
  1267. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1268. msecs_to_jiffies(12000)))
  1269. status = -1;
  1270. else
  1271. status = adapter->flash_status;
  1272. return status;
  1273. err_unlock:
  1274. spin_unlock_bh(&adapter->mcc_lock);
  1275. return status;
  1276. }
  1277. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  1278. int offset)
  1279. {
  1280. struct be_mcc_wrb *wrb;
  1281. struct be_cmd_write_flashrom *req;
  1282. int status;
  1283. spin_lock_bh(&adapter->mcc_lock);
  1284. wrb = wrb_from_mccq(adapter);
  1285. if (!wrb) {
  1286. status = -EBUSY;
  1287. goto err;
  1288. }
  1289. req = embedded_payload(wrb);
  1290. be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
  1291. OPCODE_COMMON_READ_FLASHROM);
  1292. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1293. OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
  1294. req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
  1295. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1296. req->params.offset = cpu_to_le32(offset);
  1297. req->params.data_buf_size = cpu_to_le32(0x4);
  1298. status = be_mcc_notify_wait(adapter);
  1299. if (!status)
  1300. memcpy(flashed_crc, req->params.data_buf, 4);
  1301. err:
  1302. spin_unlock_bh(&adapter->mcc_lock);
  1303. return status;
  1304. }
  1305. int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1306. struct be_dma_mem *nonemb_cmd)
  1307. {
  1308. struct be_mcc_wrb *wrb;
  1309. struct be_cmd_req_acpi_wol_magic_config *req;
  1310. struct be_sge *sge;
  1311. int status;
  1312. spin_lock_bh(&adapter->mcc_lock);
  1313. wrb = wrb_from_mccq(adapter);
  1314. if (!wrb) {
  1315. status = -EBUSY;
  1316. goto err;
  1317. }
  1318. req = nonemb_cmd->va;
  1319. sge = nonembedded_sgl(wrb);
  1320. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1321. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
  1322. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1323. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
  1324. memcpy(req->magic_mac, mac, ETH_ALEN);
  1325. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1326. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1327. sge->len = cpu_to_le32(nonemb_cmd->size);
  1328. status = be_mcc_notify_wait(adapter);
  1329. err:
  1330. spin_unlock_bh(&adapter->mcc_lock);
  1331. return status;
  1332. }
  1333. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1334. u8 loopback_type, u8 enable)
  1335. {
  1336. struct be_mcc_wrb *wrb;
  1337. struct be_cmd_req_set_lmode *req;
  1338. int status;
  1339. spin_lock_bh(&adapter->mcc_lock);
  1340. wrb = wrb_from_mccq(adapter);
  1341. if (!wrb) {
  1342. status = -EBUSY;
  1343. goto err;
  1344. }
  1345. req = embedded_payload(wrb);
  1346. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1347. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
  1348. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1349. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  1350. sizeof(*req));
  1351. req->src_port = port_num;
  1352. req->dest_port = port_num;
  1353. req->loopback_type = loopback_type;
  1354. req->loopback_state = enable;
  1355. status = be_mcc_notify_wait(adapter);
  1356. err:
  1357. spin_unlock_bh(&adapter->mcc_lock);
  1358. return status;
  1359. }
  1360. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1361. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1362. {
  1363. struct be_mcc_wrb *wrb;
  1364. struct be_cmd_req_loopback_test *req;
  1365. int status;
  1366. spin_lock_bh(&adapter->mcc_lock);
  1367. wrb = wrb_from_mccq(adapter);
  1368. if (!wrb) {
  1369. status = -EBUSY;
  1370. goto err;
  1371. }
  1372. req = embedded_payload(wrb);
  1373. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1374. OPCODE_LOWLEVEL_LOOPBACK_TEST);
  1375. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1376. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
  1377. req->hdr.timeout = cpu_to_le32(4);
  1378. req->pattern = cpu_to_le64(pattern);
  1379. req->src_port = cpu_to_le32(port_num);
  1380. req->dest_port = cpu_to_le32(port_num);
  1381. req->pkt_size = cpu_to_le32(pkt_size);
  1382. req->num_pkts = cpu_to_le32(num_pkts);
  1383. req->loopback_type = cpu_to_le32(loopback_type);
  1384. status = be_mcc_notify_wait(adapter);
  1385. if (!status) {
  1386. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1387. status = le32_to_cpu(resp->status);
  1388. }
  1389. err:
  1390. spin_unlock_bh(&adapter->mcc_lock);
  1391. return status;
  1392. }
  1393. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1394. u32 byte_cnt, struct be_dma_mem *cmd)
  1395. {
  1396. struct be_mcc_wrb *wrb;
  1397. struct be_cmd_req_ddrdma_test *req;
  1398. struct be_sge *sge;
  1399. int status;
  1400. int i, j = 0;
  1401. spin_lock_bh(&adapter->mcc_lock);
  1402. wrb = wrb_from_mccq(adapter);
  1403. if (!wrb) {
  1404. status = -EBUSY;
  1405. goto err;
  1406. }
  1407. req = cmd->va;
  1408. sge = nonembedded_sgl(wrb);
  1409. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1410. OPCODE_LOWLEVEL_HOST_DDR_DMA);
  1411. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1412. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
  1413. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1414. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1415. sge->len = cpu_to_le32(cmd->size);
  1416. req->pattern = cpu_to_le64(pattern);
  1417. req->byte_count = cpu_to_le32(byte_cnt);
  1418. for (i = 0; i < byte_cnt; i++) {
  1419. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1420. j++;
  1421. if (j > 7)
  1422. j = 0;
  1423. }
  1424. status = be_mcc_notify_wait(adapter);
  1425. if (!status) {
  1426. struct be_cmd_resp_ddrdma_test *resp;
  1427. resp = cmd->va;
  1428. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1429. resp->snd_err) {
  1430. status = -1;
  1431. }
  1432. }
  1433. err:
  1434. spin_unlock_bh(&adapter->mcc_lock);
  1435. return status;
  1436. }
  1437. int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  1438. struct be_dma_mem *nonemb_cmd)
  1439. {
  1440. struct be_mcc_wrb *wrb;
  1441. struct be_cmd_req_seeprom_read *req;
  1442. struct be_sge *sge;
  1443. int status;
  1444. spin_lock_bh(&adapter->mcc_lock);
  1445. wrb = wrb_from_mccq(adapter);
  1446. req = nonemb_cmd->va;
  1447. sge = nonembedded_sgl(wrb);
  1448. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1449. OPCODE_COMMON_SEEPROM_READ);
  1450. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1451. OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
  1452. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1453. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1454. sge->len = cpu_to_le32(nonemb_cmd->size);
  1455. status = be_mcc_notify_wait(adapter);
  1456. spin_unlock_bh(&adapter->mcc_lock);
  1457. return status;
  1458. }
  1459. int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
  1460. {
  1461. struct be_mcc_wrb *wrb;
  1462. struct be_cmd_req_get_phy_info *req;
  1463. struct be_sge *sge;
  1464. int status;
  1465. spin_lock_bh(&adapter->mcc_lock);
  1466. wrb = wrb_from_mccq(adapter);
  1467. if (!wrb) {
  1468. status = -EBUSY;
  1469. goto err;
  1470. }
  1471. req = cmd->va;
  1472. sge = nonembedded_sgl(wrb);
  1473. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1474. OPCODE_COMMON_GET_PHY_DETAILS);
  1475. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1476. OPCODE_COMMON_GET_PHY_DETAILS,
  1477. sizeof(*req));
  1478. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1479. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1480. sge->len = cpu_to_le32(cmd->size);
  1481. status = be_mcc_notify_wait(adapter);
  1482. err:
  1483. spin_unlock_bh(&adapter->mcc_lock);
  1484. return status;
  1485. }
  1486. int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
  1487. {
  1488. struct be_mcc_wrb *wrb;
  1489. struct be_cmd_req_set_qos *req;
  1490. int status;
  1491. spin_lock_bh(&adapter->mcc_lock);
  1492. wrb = wrb_from_mccq(adapter);
  1493. if (!wrb) {
  1494. status = -EBUSY;
  1495. goto err;
  1496. }
  1497. req = embedded_payload(wrb);
  1498. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1499. OPCODE_COMMON_SET_QOS);
  1500. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1501. OPCODE_COMMON_SET_QOS, sizeof(*req));
  1502. req->hdr.domain = domain;
  1503. req->valid_bits = BE_QOS_BITS_NIC;
  1504. req->max_bps_nic = bps;
  1505. status = be_mcc_notify_wait(adapter);
  1506. err:
  1507. spin_unlock_bh(&adapter->mcc_lock);
  1508. return status;
  1509. }