bnx2fc_hwi.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190
  1. /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
  2. * This file contains the code that low level functions that interact
  3. * with 57712 FCoE firmware.
  4. *
  5. * Copyright (c) 2008 - 2011 Broadcom Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation.
  10. *
  11. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  12. */
  13. #include "bnx2fc.h"
  14. DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  15. static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
  16. struct fcoe_kcqe *new_cqe_kcqe);
  17. static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
  18. struct fcoe_kcqe *ofld_kcqe);
  19. static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
  20. struct fcoe_kcqe *ofld_kcqe);
  21. static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
  22. static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
  23. struct fcoe_kcqe *destroy_kcqe);
  24. int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
  25. {
  26. struct fcoe_kwqe_stat stat_req;
  27. struct kwqe *kwqe_arr[2];
  28. int num_kwqes = 1;
  29. int rc = 0;
  30. memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
  31. stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
  32. stat_req.hdr.flags =
  33. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  34. stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
  35. stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
  36. kwqe_arr[0] = (struct kwqe *) &stat_req;
  37. if (hba->cnic && hba->cnic->submit_kwqes)
  38. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  39. return rc;
  40. }
  41. /**
  42. * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
  43. *
  44. * @hba: adapter structure pointer
  45. *
  46. * Send down FCoE firmware init KWQEs which initiates the initial handshake
  47. * with the f/w.
  48. *
  49. */
  50. int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
  51. {
  52. struct fcoe_kwqe_init1 fcoe_init1;
  53. struct fcoe_kwqe_init2 fcoe_init2;
  54. struct fcoe_kwqe_init3 fcoe_init3;
  55. struct kwqe *kwqe_arr[3];
  56. int num_kwqes = 3;
  57. int rc = 0;
  58. if (!hba->cnic) {
  59. printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
  60. return -ENODEV;
  61. }
  62. /* fill init1 KWQE */
  63. memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
  64. fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
  65. fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  66. FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  67. fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
  68. fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
  69. fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
  70. fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
  71. fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
  72. fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
  73. fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
  74. fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
  75. fcoe_init1.task_list_pbl_addr_hi =
  76. (u32) ((u64) hba->task_ctx_bd_dma >> 32);
  77. fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
  78. fcoe_init1.flags = (PAGE_SHIFT <<
  79. FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
  80. fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
  81. /* fill init2 KWQE */
  82. memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
  83. fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
  84. fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  85. FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  86. fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
  87. fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
  88. fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
  89. fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
  90. ((u64) hba->hash_tbl_pbl_dma >> 32);
  91. fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
  92. fcoe_init2.t2_hash_tbl_addr_hi = (u32)
  93. ((u64) hba->t2_hash_tbl_dma >> 32);
  94. fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
  95. fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
  96. ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
  97. fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
  98. /* fill init3 KWQE */
  99. memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
  100. fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
  101. fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  102. FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  103. fcoe_init3.error_bit_map_lo = 0xffffffff;
  104. fcoe_init3.error_bit_map_hi = 0xffffffff;
  105. fcoe_init3.perf_config = 1;
  106. kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
  107. kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
  108. kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
  109. if (hba->cnic && hba->cnic->submit_kwqes)
  110. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  111. return rc;
  112. }
  113. int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
  114. {
  115. struct fcoe_kwqe_destroy fcoe_destroy;
  116. struct kwqe *kwqe_arr[2];
  117. int num_kwqes = 1;
  118. int rc = -1;
  119. /* fill destroy KWQE */
  120. memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
  121. fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
  122. fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  123. FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  124. kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
  125. if (hba->cnic && hba->cnic->submit_kwqes)
  126. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  127. return rc;
  128. }
  129. /**
  130. * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
  131. *
  132. * @port: port structure pointer
  133. * @tgt: bnx2fc_rport structure pointer
  134. */
  135. int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
  136. struct bnx2fc_rport *tgt)
  137. {
  138. struct fc_lport *lport = port->lport;
  139. struct bnx2fc_interface *interface = port->priv;
  140. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  141. struct bnx2fc_hba *hba = interface->hba;
  142. struct kwqe *kwqe_arr[4];
  143. struct fcoe_kwqe_conn_offload1 ofld_req1;
  144. struct fcoe_kwqe_conn_offload2 ofld_req2;
  145. struct fcoe_kwqe_conn_offload3 ofld_req3;
  146. struct fcoe_kwqe_conn_offload4 ofld_req4;
  147. struct fc_rport_priv *rdata = tgt->rdata;
  148. struct fc_rport *rport = tgt->rport;
  149. int num_kwqes = 4;
  150. u32 port_id;
  151. int rc = 0;
  152. u16 conn_id;
  153. /* Initialize offload request 1 structure */
  154. memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
  155. ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
  156. ofld_req1.hdr.flags =
  157. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  158. conn_id = (u16)tgt->fcoe_conn_id;
  159. ofld_req1.fcoe_conn_id = conn_id;
  160. ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
  161. ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
  162. ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
  163. ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
  164. ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
  165. ofld_req1.rq_first_pbe_addr_hi =
  166. (u32)((u64) tgt->rq_dma >> 32);
  167. ofld_req1.rq_prod = 0x8000;
  168. /* Initialize offload request 2 structure */
  169. memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
  170. ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
  171. ofld_req2.hdr.flags =
  172. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  173. ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
  174. ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
  175. ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
  176. ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
  177. ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
  178. ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
  179. ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
  180. /* Initialize offload request 3 structure */
  181. memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
  182. ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
  183. ofld_req3.hdr.flags =
  184. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  185. ofld_req3.vlan_tag = interface->vlan_id <<
  186. FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
  187. ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
  188. port_id = fc_host_port_id(lport->host);
  189. if (port_id == 0) {
  190. BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
  191. return -EINVAL;
  192. }
  193. /*
  194. * Store s_id of the initiator for further reference. This will
  195. * be used during disable/destroy during linkdown processing as
  196. * when the lport is reset, the port_id also is reset to 0
  197. */
  198. tgt->sid = port_id;
  199. ofld_req3.s_id[0] = (port_id & 0x000000FF);
  200. ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
  201. ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
  202. port_id = rport->port_id;
  203. ofld_req3.d_id[0] = (port_id & 0x000000FF);
  204. ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
  205. ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
  206. ofld_req3.tx_total_conc_seqs = rdata->max_seq;
  207. ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
  208. ofld_req3.rx_max_fc_pay_len = lport->mfs;
  209. ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
  210. ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
  211. ofld_req3.rx_open_seqs_exch_c3 = 1;
  212. ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
  213. ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
  214. /* set mul_n_port_ids supported flag to 0, until it is supported */
  215. ofld_req3.flags = 0;
  216. /*
  217. ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
  218. FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
  219. */
  220. /* Info from PLOGI response */
  221. ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
  222. FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
  223. ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
  224. FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
  225. /*
  226. * Info from PRLI response, this info is used for sequence level error
  227. * recovery support
  228. */
  229. if (tgt->dev_type == TYPE_TAPE) {
  230. ofld_req3.flags |= 1 <<
  231. FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
  232. ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
  233. ? 1 : 0) <<
  234. FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
  235. }
  236. /* vlan flag */
  237. ofld_req3.flags |= (interface->vlan_enabled <<
  238. FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
  239. /* C2_VALID and ACK flags are not set as they are not supported */
  240. /* Initialize offload request 4 structure */
  241. memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
  242. ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
  243. ofld_req4.hdr.flags =
  244. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  245. ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
  246. ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
  247. /* local mac */
  248. ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
  249. ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
  250. ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
  251. ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
  252. ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
  253. ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
  254. /* fcf mac */
  255. ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
  256. ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
  257. ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
  258. ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
  259. ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
  260. ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
  261. ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
  262. ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
  263. ofld_req4.confq_pbl_base_addr_hi =
  264. (u32)((u64) tgt->confq_pbl_dma >> 32);
  265. kwqe_arr[0] = (struct kwqe *) &ofld_req1;
  266. kwqe_arr[1] = (struct kwqe *) &ofld_req2;
  267. kwqe_arr[2] = (struct kwqe *) &ofld_req3;
  268. kwqe_arr[3] = (struct kwqe *) &ofld_req4;
  269. if (hba->cnic && hba->cnic->submit_kwqes)
  270. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  271. return rc;
  272. }
  273. /**
  274. * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
  275. *
  276. * @port: port structure pointer
  277. * @tgt: bnx2fc_rport structure pointer
  278. */
  279. static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
  280. struct bnx2fc_rport *tgt)
  281. {
  282. struct kwqe *kwqe_arr[2];
  283. struct bnx2fc_interface *interface = port->priv;
  284. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  285. struct bnx2fc_hba *hba = interface->hba;
  286. struct fcoe_kwqe_conn_enable_disable enbl_req;
  287. struct fc_lport *lport = port->lport;
  288. struct fc_rport *rport = tgt->rport;
  289. int num_kwqes = 1;
  290. int rc = 0;
  291. u32 port_id;
  292. memset(&enbl_req, 0x00,
  293. sizeof(struct fcoe_kwqe_conn_enable_disable));
  294. enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
  295. enbl_req.hdr.flags =
  296. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  297. enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
  298. /* local mac */
  299. enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
  300. enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
  301. enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
  302. enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
  303. enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
  304. memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
  305. enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
  306. enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
  307. enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
  308. enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
  309. enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
  310. enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
  311. port_id = fc_host_port_id(lport->host);
  312. if (port_id != tgt->sid) {
  313. printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
  314. "sid = 0x%x\n", port_id, tgt->sid);
  315. port_id = tgt->sid;
  316. }
  317. enbl_req.s_id[0] = (port_id & 0x000000FF);
  318. enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
  319. enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
  320. port_id = rport->port_id;
  321. enbl_req.d_id[0] = (port_id & 0x000000FF);
  322. enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
  323. enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
  324. enbl_req.vlan_tag = interface->vlan_id <<
  325. FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
  326. enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
  327. enbl_req.vlan_flag = interface->vlan_enabled;
  328. enbl_req.context_id = tgt->context_id;
  329. enbl_req.conn_id = tgt->fcoe_conn_id;
  330. kwqe_arr[0] = (struct kwqe *) &enbl_req;
  331. if (hba->cnic && hba->cnic->submit_kwqes)
  332. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  333. return rc;
  334. }
  335. /**
  336. * bnx2fc_send_session_disable_req - initiates FCoE Session disable
  337. *
  338. * @port: port structure pointer
  339. * @tgt: bnx2fc_rport structure pointer
  340. */
  341. int bnx2fc_send_session_disable_req(struct fcoe_port *port,
  342. struct bnx2fc_rport *tgt)
  343. {
  344. struct bnx2fc_interface *interface = port->priv;
  345. struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
  346. struct bnx2fc_hba *hba = interface->hba;
  347. struct fcoe_kwqe_conn_enable_disable disable_req;
  348. struct kwqe *kwqe_arr[2];
  349. struct fc_rport *rport = tgt->rport;
  350. int num_kwqes = 1;
  351. int rc = 0;
  352. u32 port_id;
  353. memset(&disable_req, 0x00,
  354. sizeof(struct fcoe_kwqe_conn_enable_disable));
  355. disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
  356. disable_req.hdr.flags =
  357. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  358. disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
  359. disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
  360. disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
  361. disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
  362. disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
  363. disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
  364. disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
  365. disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
  366. disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
  367. disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
  368. disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
  369. disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
  370. port_id = tgt->sid;
  371. disable_req.s_id[0] = (port_id & 0x000000FF);
  372. disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
  373. disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
  374. port_id = rport->port_id;
  375. disable_req.d_id[0] = (port_id & 0x000000FF);
  376. disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
  377. disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
  378. disable_req.context_id = tgt->context_id;
  379. disable_req.conn_id = tgt->fcoe_conn_id;
  380. disable_req.vlan_tag = interface->vlan_id <<
  381. FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
  382. disable_req.vlan_tag |=
  383. 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
  384. disable_req.vlan_flag = interface->vlan_enabled;
  385. kwqe_arr[0] = (struct kwqe *) &disable_req;
  386. if (hba->cnic && hba->cnic->submit_kwqes)
  387. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  388. return rc;
  389. }
  390. /**
  391. * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
  392. *
  393. * @port: port structure pointer
  394. * @tgt: bnx2fc_rport structure pointer
  395. */
  396. int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
  397. struct bnx2fc_rport *tgt)
  398. {
  399. struct fcoe_kwqe_conn_destroy destroy_req;
  400. struct kwqe *kwqe_arr[2];
  401. int num_kwqes = 1;
  402. int rc = 0;
  403. memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
  404. destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
  405. destroy_req.hdr.flags =
  406. (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  407. destroy_req.context_id = tgt->context_id;
  408. destroy_req.conn_id = tgt->fcoe_conn_id;
  409. kwqe_arr[0] = (struct kwqe *) &destroy_req;
  410. if (hba->cnic && hba->cnic->submit_kwqes)
  411. rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  412. return rc;
  413. }
  414. static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
  415. {
  416. struct bnx2fc_lport *blport;
  417. spin_lock_bh(&hba->hba_lock);
  418. list_for_each_entry(blport, &hba->vports, list) {
  419. if (blport->lport == lport) {
  420. spin_unlock_bh(&hba->hba_lock);
  421. return true;
  422. }
  423. }
  424. spin_unlock_bh(&hba->hba_lock);
  425. return false;
  426. }
  427. static void bnx2fc_unsol_els_work(struct work_struct *work)
  428. {
  429. struct bnx2fc_unsol_els *unsol_els;
  430. struct fc_lport *lport;
  431. struct bnx2fc_hba *hba;
  432. struct fc_frame *fp;
  433. unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
  434. lport = unsol_els->lport;
  435. fp = unsol_els->fp;
  436. hba = unsol_els->hba;
  437. if (is_valid_lport(hba, lport))
  438. fc_exch_recv(lport, fp);
  439. kfree(unsol_els);
  440. }
  441. void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
  442. unsigned char *buf,
  443. u32 frame_len, u16 l2_oxid)
  444. {
  445. struct fcoe_port *port = tgt->port;
  446. struct fc_lport *lport = port->lport;
  447. struct bnx2fc_interface *interface = port->priv;
  448. struct bnx2fc_unsol_els *unsol_els;
  449. struct fc_frame_header *fh;
  450. struct fc_frame *fp;
  451. struct sk_buff *skb;
  452. u32 payload_len;
  453. u32 crc;
  454. u8 op;
  455. unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
  456. if (!unsol_els) {
  457. BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
  458. return;
  459. }
  460. BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
  461. l2_oxid, frame_len);
  462. payload_len = frame_len - sizeof(struct fc_frame_header);
  463. fp = fc_frame_alloc(lport, payload_len);
  464. if (!fp) {
  465. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  466. kfree(unsol_els);
  467. return;
  468. }
  469. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  470. /* Copy FC Frame header and payload into the frame */
  471. memcpy(fh, buf, frame_len);
  472. if (l2_oxid != FC_XID_UNKNOWN)
  473. fh->fh_ox_id = htons(l2_oxid);
  474. skb = fp_skb(fp);
  475. if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
  476. (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
  477. if (fh->fh_type == FC_TYPE_ELS) {
  478. op = fc_frame_payload_op(fp);
  479. if ((op == ELS_TEST) || (op == ELS_ESTC) ||
  480. (op == ELS_FAN) || (op == ELS_CSU)) {
  481. /*
  482. * No need to reply for these
  483. * ELS requests
  484. */
  485. printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
  486. kfree_skb(skb);
  487. kfree(unsol_els);
  488. return;
  489. }
  490. }
  491. crc = fcoe_fc_crc(fp);
  492. fc_frame_init(fp);
  493. fr_dev(fp) = lport;
  494. fr_sof(fp) = FC_SOF_I3;
  495. fr_eof(fp) = FC_EOF_T;
  496. fr_crc(fp) = cpu_to_le32(~crc);
  497. unsol_els->lport = lport;
  498. unsol_els->hba = interface->hba;
  499. unsol_els->fp = fp;
  500. INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
  501. queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
  502. } else {
  503. BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
  504. kfree_skb(skb);
  505. kfree(unsol_els);
  506. }
  507. }
  508. static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
  509. {
  510. u8 num_rq;
  511. struct fcoe_err_report_entry *err_entry;
  512. unsigned char *rq_data;
  513. unsigned char *buf = NULL, *buf1;
  514. int i;
  515. u16 xid;
  516. u32 frame_len, len;
  517. struct bnx2fc_cmd *io_req = NULL;
  518. struct fcoe_task_ctx_entry *task, *task_page;
  519. struct bnx2fc_interface *interface = tgt->port->priv;
  520. struct bnx2fc_hba *hba = interface->hba;
  521. int task_idx, index;
  522. int rc = 0;
  523. u64 err_warn_bit_map;
  524. u8 err_warn = 0xff;
  525. BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
  526. switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
  527. case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
  528. frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
  529. FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
  530. num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
  531. spin_lock_bh(&tgt->tgt_lock);
  532. rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
  533. spin_unlock_bh(&tgt->tgt_lock);
  534. if (rq_data) {
  535. buf = rq_data;
  536. } else {
  537. buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
  538. GFP_ATOMIC);
  539. if (!buf1) {
  540. BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
  541. break;
  542. }
  543. for (i = 0; i < num_rq; i++) {
  544. spin_lock_bh(&tgt->tgt_lock);
  545. rq_data = (unsigned char *)
  546. bnx2fc_get_next_rqe(tgt, 1);
  547. spin_unlock_bh(&tgt->tgt_lock);
  548. len = BNX2FC_RQ_BUF_SZ;
  549. memcpy(buf1, rq_data, len);
  550. buf1 += len;
  551. }
  552. }
  553. bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
  554. FC_XID_UNKNOWN);
  555. if (buf != rq_data)
  556. kfree(buf);
  557. spin_lock_bh(&tgt->tgt_lock);
  558. bnx2fc_return_rqe(tgt, num_rq);
  559. spin_unlock_bh(&tgt->tgt_lock);
  560. break;
  561. case FCOE_ERROR_DETECTION_CQE_TYPE:
  562. /*
  563. * In case of error reporting CQE a single RQ entry
  564. * is consumed.
  565. */
  566. spin_lock_bh(&tgt->tgt_lock);
  567. num_rq = 1;
  568. err_entry = (struct fcoe_err_report_entry *)
  569. bnx2fc_get_next_rqe(tgt, 1);
  570. xid = err_entry->fc_hdr.ox_id;
  571. BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
  572. BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
  573. err_entry->data.err_warn_bitmap_hi,
  574. err_entry->data.err_warn_bitmap_lo);
  575. BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
  576. err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
  577. if (xid > BNX2FC_MAX_XID) {
  578. BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
  579. xid);
  580. goto ret_err_rqe;
  581. }
  582. task_idx = xid / BNX2FC_TASKS_PER_PAGE;
  583. index = xid % BNX2FC_TASKS_PER_PAGE;
  584. task_page = (struct fcoe_task_ctx_entry *)
  585. hba->task_ctx[task_idx];
  586. task = &(task_page[index]);
  587. io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
  588. if (!io_req)
  589. goto ret_err_rqe;
  590. if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
  591. printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
  592. goto ret_err_rqe;
  593. }
  594. if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
  595. &io_req->req_flags)) {
  596. BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
  597. "progress.. ignore unsol err\n");
  598. goto ret_err_rqe;
  599. }
  600. err_warn_bit_map = (u64)
  601. ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
  602. (u64)err_entry->data.err_warn_bitmap_lo;
  603. for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
  604. if (err_warn_bit_map & (u64)((u64)1 << i)) {
  605. err_warn = i;
  606. break;
  607. }
  608. }
  609. /*
  610. * If ABTS is already in progress, and FW error is
  611. * received after that, do not cancel the timeout_work
  612. * and let the error recovery continue by explicitly
  613. * logging out the target, when the ABTS eventually
  614. * times out.
  615. */
  616. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
  617. printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
  618. "in ABTS processing\n", xid);
  619. goto ret_err_rqe;
  620. }
  621. BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
  622. if (tgt->dev_type != TYPE_TAPE)
  623. goto skip_rec;
  624. switch (err_warn) {
  625. case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
  626. case FCOE_ERROR_CODE_DATA_OOO_RO:
  627. case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
  628. case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
  629. case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
  630. case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
  631. BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
  632. xid);
  633. memcpy(&io_req->err_entry, err_entry,
  634. sizeof(struct fcoe_err_report_entry));
  635. if (!test_bit(BNX2FC_FLAG_SRR_SENT,
  636. &io_req->req_flags)) {
  637. spin_unlock_bh(&tgt->tgt_lock);
  638. rc = bnx2fc_send_rec(io_req);
  639. spin_lock_bh(&tgt->tgt_lock);
  640. if (rc)
  641. goto skip_rec;
  642. } else
  643. printk(KERN_ERR PFX "SRR in progress\n");
  644. goto ret_err_rqe;
  645. break;
  646. default:
  647. break;
  648. }
  649. skip_rec:
  650. set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
  651. /*
  652. * Cancel the timeout_work, as we received IO
  653. * completion with FW error.
  654. */
  655. if (cancel_delayed_work(&io_req->timeout_work))
  656. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  657. rc = bnx2fc_initiate_abts(io_req);
  658. if (rc != SUCCESS) {
  659. printk(KERN_ERR PFX "err_warn: initiate_abts "
  660. "failed xid = 0x%x. issue cleanup\n",
  661. io_req->xid);
  662. bnx2fc_initiate_cleanup(io_req);
  663. }
  664. ret_err_rqe:
  665. bnx2fc_return_rqe(tgt, 1);
  666. spin_unlock_bh(&tgt->tgt_lock);
  667. break;
  668. case FCOE_WARNING_DETECTION_CQE_TYPE:
  669. /*
  670. *In case of warning reporting CQE a single RQ entry
  671. * is consumes.
  672. */
  673. spin_lock_bh(&tgt->tgt_lock);
  674. num_rq = 1;
  675. err_entry = (struct fcoe_err_report_entry *)
  676. bnx2fc_get_next_rqe(tgt, 1);
  677. xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
  678. BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
  679. BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
  680. err_entry->data.err_warn_bitmap_hi,
  681. err_entry->data.err_warn_bitmap_lo);
  682. BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
  683. err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
  684. if (xid > BNX2FC_MAX_XID) {
  685. BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
  686. goto ret_warn_rqe;
  687. }
  688. err_warn_bit_map = (u64)
  689. ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
  690. (u64)err_entry->data.err_warn_bitmap_lo;
  691. for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
  692. if (err_warn_bit_map & (u64) (1 << i)) {
  693. err_warn = i;
  694. break;
  695. }
  696. }
  697. BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
  698. task_idx = xid / BNX2FC_TASKS_PER_PAGE;
  699. index = xid % BNX2FC_TASKS_PER_PAGE;
  700. task_page = (struct fcoe_task_ctx_entry *)
  701. interface->hba->task_ctx[task_idx];
  702. task = &(task_page[index]);
  703. io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
  704. if (!io_req)
  705. goto ret_warn_rqe;
  706. if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
  707. printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
  708. goto ret_warn_rqe;
  709. }
  710. memcpy(&io_req->err_entry, err_entry,
  711. sizeof(struct fcoe_err_report_entry));
  712. if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
  713. /* REC_TOV is not a warning code */
  714. BUG_ON(1);
  715. else
  716. BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
  717. ret_warn_rqe:
  718. bnx2fc_return_rqe(tgt, 1);
  719. spin_unlock_bh(&tgt->tgt_lock);
  720. break;
  721. default:
  722. printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
  723. break;
  724. }
  725. }
  726. void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
  727. {
  728. struct fcoe_task_ctx_entry *task;
  729. struct fcoe_task_ctx_entry *task_page;
  730. struct fcoe_port *port = tgt->port;
  731. struct bnx2fc_interface *interface = port->priv;
  732. struct bnx2fc_hba *hba = interface->hba;
  733. struct bnx2fc_cmd *io_req;
  734. int task_idx, index;
  735. u16 xid;
  736. u8 cmd_type;
  737. u8 rx_state = 0;
  738. u8 num_rq;
  739. spin_lock_bh(&tgt->tgt_lock);
  740. xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
  741. if (xid >= BNX2FC_MAX_TASKS) {
  742. printk(KERN_ERR PFX "ERROR:xid out of range\n");
  743. spin_unlock_bh(&tgt->tgt_lock);
  744. return;
  745. }
  746. task_idx = xid / BNX2FC_TASKS_PER_PAGE;
  747. index = xid % BNX2FC_TASKS_PER_PAGE;
  748. task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
  749. task = &(task_page[index]);
  750. num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
  751. FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
  752. FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
  753. io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
  754. if (io_req == NULL) {
  755. printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
  756. spin_unlock_bh(&tgt->tgt_lock);
  757. return;
  758. }
  759. /* Timestamp IO completion time */
  760. cmd_type = io_req->cmd_type;
  761. rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
  762. FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
  763. FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
  764. /* Process other IO completion types */
  765. switch (cmd_type) {
  766. case BNX2FC_SCSI_CMD:
  767. if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
  768. bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
  769. spin_unlock_bh(&tgt->tgt_lock);
  770. return;
  771. }
  772. if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
  773. bnx2fc_process_abts_compl(io_req, task, num_rq);
  774. else if (rx_state ==
  775. FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
  776. bnx2fc_process_cleanup_compl(io_req, task, num_rq);
  777. else
  778. printk(KERN_ERR PFX "Invalid rx state - %d\n",
  779. rx_state);
  780. break;
  781. case BNX2FC_TASK_MGMT_CMD:
  782. BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
  783. bnx2fc_process_tm_compl(io_req, task, num_rq);
  784. break;
  785. case BNX2FC_ABTS:
  786. /*
  787. * ABTS request received by firmware. ABTS response
  788. * will be delivered to the task belonging to the IO
  789. * that was aborted
  790. */
  791. BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
  792. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  793. break;
  794. case BNX2FC_ELS:
  795. if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
  796. bnx2fc_process_els_compl(io_req, task, num_rq);
  797. else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
  798. bnx2fc_process_abts_compl(io_req, task, num_rq);
  799. else if (rx_state ==
  800. FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
  801. bnx2fc_process_cleanup_compl(io_req, task, num_rq);
  802. else
  803. printk(KERN_ERR PFX "Invalid rx state = %d\n",
  804. rx_state);
  805. break;
  806. case BNX2FC_CLEANUP:
  807. BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
  808. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  809. break;
  810. case BNX2FC_SEQ_CLEANUP:
  811. BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
  812. io_req->xid);
  813. bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
  814. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  815. break;
  816. default:
  817. printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
  818. break;
  819. }
  820. spin_unlock_bh(&tgt->tgt_lock);
  821. }
  822. void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
  823. {
  824. struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
  825. u32 msg;
  826. wmb();
  827. rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
  828. FCOE_CQE_TOGGLE_BIT_SHIFT);
  829. msg = *((u32 *)rx_db);
  830. writel(cpu_to_le32(msg), tgt->ctx_base);
  831. mmiowb();
  832. }
  833. struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
  834. {
  835. struct bnx2fc_work *work;
  836. work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
  837. if (!work)
  838. return NULL;
  839. INIT_LIST_HEAD(&work->list);
  840. work->tgt = tgt;
  841. work->wqe = wqe;
  842. return work;
  843. }
  844. int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
  845. {
  846. struct fcoe_cqe *cq;
  847. u32 cq_cons;
  848. struct fcoe_cqe *cqe;
  849. u32 num_free_sqes = 0;
  850. u32 num_cqes = 0;
  851. u16 wqe;
  852. /*
  853. * cq_lock is a low contention lock used to protect
  854. * the CQ data structure from being freed up during
  855. * the upload operation
  856. */
  857. spin_lock_bh(&tgt->cq_lock);
  858. if (!tgt->cq) {
  859. printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
  860. spin_unlock_bh(&tgt->cq_lock);
  861. return 0;
  862. }
  863. cq = tgt->cq;
  864. cq_cons = tgt->cq_cons_idx;
  865. cqe = &cq[cq_cons];
  866. while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
  867. (tgt->cq_curr_toggle_bit <<
  868. FCOE_CQE_TOGGLE_BIT_SHIFT)) {
  869. /* new entry on the cq */
  870. if (wqe & FCOE_CQE_CQE_TYPE) {
  871. /* Unsolicited event notification */
  872. bnx2fc_process_unsol_compl(tgt, wqe);
  873. } else {
  874. /* Pending work request completion */
  875. struct bnx2fc_work *work = NULL;
  876. struct bnx2fc_percpu_s *fps = NULL;
  877. unsigned int cpu = wqe % num_possible_cpus();
  878. fps = &per_cpu(bnx2fc_percpu, cpu);
  879. spin_lock_bh(&fps->fp_work_lock);
  880. if (unlikely(!fps->iothread))
  881. goto unlock;
  882. work = bnx2fc_alloc_work(tgt, wqe);
  883. if (work)
  884. list_add_tail(&work->list,
  885. &fps->work_list);
  886. unlock:
  887. spin_unlock_bh(&fps->fp_work_lock);
  888. /* Pending work request completion */
  889. if (fps->iothread && work)
  890. wake_up_process(fps->iothread);
  891. else
  892. bnx2fc_process_cq_compl(tgt, wqe);
  893. num_free_sqes++;
  894. }
  895. cqe++;
  896. tgt->cq_cons_idx++;
  897. num_cqes++;
  898. if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
  899. tgt->cq_cons_idx = 0;
  900. cqe = cq;
  901. tgt->cq_curr_toggle_bit =
  902. 1 - tgt->cq_curr_toggle_bit;
  903. }
  904. }
  905. if (num_cqes) {
  906. /* Arm CQ only if doorbell is mapped */
  907. if (tgt->ctx_base)
  908. bnx2fc_arm_cq(tgt);
  909. atomic_add(num_free_sqes, &tgt->free_sqes);
  910. }
  911. spin_unlock_bh(&tgt->cq_lock);
  912. return 0;
  913. }
  914. /**
  915. * bnx2fc_fastpath_notification - process global event queue (KCQ)
  916. *
  917. * @hba: adapter structure pointer
  918. * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
  919. *
  920. * Fast path event notification handler
  921. */
  922. static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
  923. struct fcoe_kcqe *new_cqe_kcqe)
  924. {
  925. u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
  926. struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
  927. if (!tgt) {
  928. printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
  929. return;
  930. }
  931. bnx2fc_process_new_cqes(tgt);
  932. }
  933. /**
  934. * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
  935. *
  936. * @hba: adapter structure pointer
  937. * @ofld_kcqe: connection offload kcqe pointer
  938. *
  939. * handle session offload completion, enable the session if offload is
  940. * successful.
  941. */
  942. static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
  943. struct fcoe_kcqe *ofld_kcqe)
  944. {
  945. struct bnx2fc_rport *tgt;
  946. struct fcoe_port *port;
  947. struct bnx2fc_interface *interface;
  948. u32 conn_id;
  949. u32 context_id;
  950. int rc;
  951. conn_id = ofld_kcqe->fcoe_conn_id;
  952. context_id = ofld_kcqe->fcoe_conn_context_id;
  953. tgt = hba->tgt_ofld_list[conn_id];
  954. if (!tgt) {
  955. printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
  956. return;
  957. }
  958. BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
  959. ofld_kcqe->fcoe_conn_context_id);
  960. port = tgt->port;
  961. interface = tgt->port->priv;
  962. if (hba != interface->hba) {
  963. printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
  964. goto ofld_cmpl_err;
  965. }
  966. /*
  967. * cnic has allocated a context_id for this session; use this
  968. * while enabling the session.
  969. */
  970. tgt->context_id = context_id;
  971. if (ofld_kcqe->completion_status) {
  972. if (ofld_kcqe->completion_status ==
  973. FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
  974. printk(KERN_ERR PFX "unable to allocate FCoE context "
  975. "resources\n");
  976. set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
  977. }
  978. goto ofld_cmpl_err;
  979. } else {
  980. /* now enable the session */
  981. rc = bnx2fc_send_session_enable_req(port, tgt);
  982. if (rc) {
  983. printk(KERN_ERR PFX "enable session failed\n");
  984. goto ofld_cmpl_err;
  985. }
  986. }
  987. return;
  988. ofld_cmpl_err:
  989. set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
  990. wake_up_interruptible(&tgt->ofld_wait);
  991. }
  992. /**
  993. * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
  994. *
  995. * @hba: adapter structure pointer
  996. * @ofld_kcqe: connection offload kcqe pointer
  997. *
  998. * handle session enable completion, mark the rport as ready
  999. */
  1000. static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
  1001. struct fcoe_kcqe *ofld_kcqe)
  1002. {
  1003. struct bnx2fc_rport *tgt;
  1004. struct bnx2fc_interface *interface;
  1005. u32 conn_id;
  1006. u32 context_id;
  1007. context_id = ofld_kcqe->fcoe_conn_context_id;
  1008. conn_id = ofld_kcqe->fcoe_conn_id;
  1009. tgt = hba->tgt_ofld_list[conn_id];
  1010. if (!tgt) {
  1011. printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
  1012. return;
  1013. }
  1014. BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
  1015. ofld_kcqe->fcoe_conn_context_id);
  1016. /*
  1017. * context_id should be the same for this target during offload
  1018. * and enable
  1019. */
  1020. if (tgt->context_id != context_id) {
  1021. printk(KERN_ERR PFX "context id mis-match\n");
  1022. return;
  1023. }
  1024. interface = tgt->port->priv;
  1025. if (hba != interface->hba) {
  1026. printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
  1027. goto enbl_cmpl_err;
  1028. }
  1029. if (ofld_kcqe->completion_status)
  1030. goto enbl_cmpl_err;
  1031. else {
  1032. /* enable successful - rport ready for issuing IOs */
  1033. set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
  1034. set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
  1035. wake_up_interruptible(&tgt->ofld_wait);
  1036. }
  1037. return;
  1038. enbl_cmpl_err:
  1039. set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
  1040. wake_up_interruptible(&tgt->ofld_wait);
  1041. }
  1042. static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
  1043. struct fcoe_kcqe *disable_kcqe)
  1044. {
  1045. struct bnx2fc_rport *tgt;
  1046. u32 conn_id;
  1047. conn_id = disable_kcqe->fcoe_conn_id;
  1048. tgt = hba->tgt_ofld_list[conn_id];
  1049. if (!tgt) {
  1050. printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
  1051. return;
  1052. }
  1053. BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
  1054. if (disable_kcqe->completion_status) {
  1055. printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
  1056. disable_kcqe->completion_status);
  1057. set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
  1058. set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
  1059. wake_up_interruptible(&tgt->upld_wait);
  1060. } else {
  1061. /* disable successful */
  1062. BNX2FC_TGT_DBG(tgt, "disable successful\n");
  1063. clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
  1064. set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
  1065. set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
  1066. wake_up_interruptible(&tgt->upld_wait);
  1067. }
  1068. }
  1069. static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
  1070. struct fcoe_kcqe *destroy_kcqe)
  1071. {
  1072. struct bnx2fc_rport *tgt;
  1073. u32 conn_id;
  1074. conn_id = destroy_kcqe->fcoe_conn_id;
  1075. tgt = hba->tgt_ofld_list[conn_id];
  1076. if (!tgt) {
  1077. printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
  1078. return;
  1079. }
  1080. BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
  1081. if (destroy_kcqe->completion_status) {
  1082. printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
  1083. destroy_kcqe->completion_status);
  1084. return;
  1085. } else {
  1086. /* destroy successful */
  1087. BNX2FC_TGT_DBG(tgt, "upload successful\n");
  1088. clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
  1089. set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
  1090. set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
  1091. wake_up_interruptible(&tgt->upld_wait);
  1092. }
  1093. }
  1094. static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
  1095. {
  1096. switch (err_code) {
  1097. case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
  1098. printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
  1099. break;
  1100. case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
  1101. printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
  1102. break;
  1103. case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
  1104. printk(KERN_ERR PFX "init_failure due to NIC error\n");
  1105. break;
  1106. case FCOE_KCQE_COMPLETION_STATUS_ERROR:
  1107. printk(KERN_ERR PFX "init failure due to compl status err\n");
  1108. break;
  1109. case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
  1110. printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
  1111. break;
  1112. default:
  1113. printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
  1114. }
  1115. }
  1116. /**
  1117. * bnx2fc_indicae_kcqe - process KCQE
  1118. *
  1119. * @hba: adapter structure pointer
  1120. * @kcqe: kcqe pointer
  1121. * @num_cqe: Number of completion queue elements
  1122. *
  1123. * Generic KCQ event handler
  1124. */
  1125. void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
  1126. u32 num_cqe)
  1127. {
  1128. struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
  1129. int i = 0;
  1130. struct fcoe_kcqe *kcqe = NULL;
  1131. while (i < num_cqe) {
  1132. kcqe = (struct fcoe_kcqe *) kcq[i++];
  1133. switch (kcqe->op_code) {
  1134. case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
  1135. bnx2fc_fastpath_notification(hba, kcqe);
  1136. break;
  1137. case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
  1138. bnx2fc_process_ofld_cmpl(hba, kcqe);
  1139. break;
  1140. case FCOE_KCQE_OPCODE_ENABLE_CONN:
  1141. bnx2fc_process_enable_conn_cmpl(hba, kcqe);
  1142. break;
  1143. case FCOE_KCQE_OPCODE_INIT_FUNC:
  1144. if (kcqe->completion_status !=
  1145. FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
  1146. bnx2fc_init_failure(hba,
  1147. kcqe->completion_status);
  1148. } else {
  1149. set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
  1150. bnx2fc_get_link_state(hba);
  1151. printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
  1152. (u8)hba->pcidev->bus->number);
  1153. }
  1154. break;
  1155. case FCOE_KCQE_OPCODE_DESTROY_FUNC:
  1156. if (kcqe->completion_status !=
  1157. FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
  1158. printk(KERN_ERR PFX "DESTROY failed\n");
  1159. } else {
  1160. printk(KERN_ERR PFX "DESTROY success\n");
  1161. }
  1162. set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
  1163. wake_up_interruptible(&hba->destroy_wait);
  1164. break;
  1165. case FCOE_KCQE_OPCODE_DISABLE_CONN:
  1166. bnx2fc_process_conn_disable_cmpl(hba, kcqe);
  1167. break;
  1168. case FCOE_KCQE_OPCODE_DESTROY_CONN:
  1169. bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
  1170. break;
  1171. case FCOE_KCQE_OPCODE_STAT_FUNC:
  1172. if (kcqe->completion_status !=
  1173. FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
  1174. printk(KERN_ERR PFX "STAT failed\n");
  1175. complete(&hba->stat_req_done);
  1176. break;
  1177. case FCOE_KCQE_OPCODE_FCOE_ERROR:
  1178. /* fall thru */
  1179. default:
  1180. printk(KERN_ERR PFX "unknown opcode 0x%x\n",
  1181. kcqe->op_code);
  1182. }
  1183. }
  1184. }
  1185. void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
  1186. {
  1187. struct fcoe_sqe *sqe;
  1188. sqe = &tgt->sq[tgt->sq_prod_idx];
  1189. /* Fill SQ WQE */
  1190. sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
  1191. sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
  1192. /* Advance SQ Prod Idx */
  1193. if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
  1194. tgt->sq_prod_idx = 0;
  1195. tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
  1196. }
  1197. }
  1198. void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
  1199. {
  1200. struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
  1201. u32 msg;
  1202. wmb();
  1203. sq_db->prod = tgt->sq_prod_idx |
  1204. (tgt->sq_curr_toggle_bit << 15);
  1205. msg = *((u32 *)sq_db);
  1206. writel(cpu_to_le32(msg), tgt->ctx_base);
  1207. mmiowb();
  1208. }
  1209. int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
  1210. {
  1211. u32 context_id = tgt->context_id;
  1212. struct fcoe_port *port = tgt->port;
  1213. u32 reg_off;
  1214. resource_size_t reg_base;
  1215. struct bnx2fc_interface *interface = port->priv;
  1216. struct bnx2fc_hba *hba = interface->hba;
  1217. reg_base = pci_resource_start(hba->pcidev,
  1218. BNX2X_DOORBELL_PCI_BAR);
  1219. reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
  1220. (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
  1221. tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
  1222. if (!tgt->ctx_base)
  1223. return -ENOMEM;
  1224. return 0;
  1225. }
  1226. char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
  1227. {
  1228. char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
  1229. if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
  1230. return NULL;
  1231. tgt->rq_cons_idx += num_items;
  1232. if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
  1233. tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
  1234. return buf;
  1235. }
  1236. void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
  1237. {
  1238. /* return the rq buffer */
  1239. u32 next_prod_idx = tgt->rq_prod_idx + num_items;
  1240. if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
  1241. /* Wrap around RQ */
  1242. next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
  1243. }
  1244. tgt->rq_prod_idx = next_prod_idx;
  1245. tgt->conn_db->rq_prod = tgt->rq_prod_idx;
  1246. }
  1247. void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
  1248. struct fcoe_task_ctx_entry *task,
  1249. struct bnx2fc_cmd *orig_io_req,
  1250. u32 offset)
  1251. {
  1252. struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
  1253. struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
  1254. struct bnx2fc_interface *interface = tgt->port->priv;
  1255. struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
  1256. struct fcoe_task_ctx_entry *orig_task;
  1257. struct fcoe_task_ctx_entry *task_page;
  1258. struct fcoe_ext_mul_sges_ctx *sgl;
  1259. u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
  1260. u8 orig_task_type;
  1261. u16 orig_xid = orig_io_req->xid;
  1262. u32 context_id = tgt->context_id;
  1263. u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
  1264. u32 orig_offset = offset;
  1265. int bd_count;
  1266. int orig_task_idx, index;
  1267. int i;
  1268. memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
  1269. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
  1270. orig_task_type = FCOE_TASK_TYPE_WRITE;
  1271. else
  1272. orig_task_type = FCOE_TASK_TYPE_READ;
  1273. /* Tx flags */
  1274. task->txwr_rxrd.const_ctx.tx_flags =
  1275. FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
  1276. FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
  1277. /* init flags */
  1278. task->txwr_rxrd.const_ctx.init_flags = task_type <<
  1279. FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
  1280. task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
  1281. FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
  1282. task->rxwr_txrd.const_ctx.init_flags = context_id <<
  1283. FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
  1284. task->rxwr_txrd.const_ctx.init_flags = context_id <<
  1285. FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
  1286. task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
  1287. task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
  1288. task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
  1289. bd_count = orig_io_req->bd_tbl->bd_valid;
  1290. /* obtain the appropriate bd entry from relative offset */
  1291. for (i = 0; i < bd_count; i++) {
  1292. if (offset < bd[i].buf_len)
  1293. break;
  1294. offset -= bd[i].buf_len;
  1295. }
  1296. phys_addr += (i * sizeof(struct fcoe_bd_ctx));
  1297. if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
  1298. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
  1299. (u32)phys_addr;
  1300. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
  1301. (u32)((u64)phys_addr >> 32);
  1302. task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
  1303. bd_count;
  1304. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
  1305. offset; /* adjusted offset */
  1306. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
  1307. } else {
  1308. orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
  1309. index = orig_xid % BNX2FC_TASKS_PER_PAGE;
  1310. task_page = (struct fcoe_task_ctx_entry *)
  1311. interface->hba->task_ctx[orig_task_idx];
  1312. orig_task = &(task_page[index]);
  1313. /* Multiple SGEs were used for this IO */
  1314. sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
  1315. sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
  1316. sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
  1317. sgl->mul_sgl.sgl_size = bd_count;
  1318. sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
  1319. sgl->mul_sgl.cur_sge_idx = i;
  1320. memset(&task->rxwr_only.rx_seq_ctx, 0,
  1321. sizeof(struct fcoe_rx_seq_ctx));
  1322. task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
  1323. task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
  1324. }
  1325. }
  1326. void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
  1327. struct fcoe_task_ctx_entry *task,
  1328. u16 orig_xid)
  1329. {
  1330. u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
  1331. struct bnx2fc_rport *tgt = io_req->tgt;
  1332. u32 context_id = tgt->context_id;
  1333. memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
  1334. /* Tx Write Rx Read */
  1335. /* init flags */
  1336. task->txwr_rxrd.const_ctx.init_flags = task_type <<
  1337. FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
  1338. task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
  1339. FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
  1340. if (tgt->dev_type == TYPE_TAPE)
  1341. task->txwr_rxrd.const_ctx.init_flags |=
  1342. FCOE_TASK_DEV_TYPE_TAPE <<
  1343. FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
  1344. else
  1345. task->txwr_rxrd.const_ctx.init_flags |=
  1346. FCOE_TASK_DEV_TYPE_DISK <<
  1347. FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
  1348. task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
  1349. /* Tx flags */
  1350. task->txwr_rxrd.const_ctx.tx_flags =
  1351. FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
  1352. FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
  1353. /* Rx Read Tx Write */
  1354. task->rxwr_txrd.const_ctx.init_flags = context_id <<
  1355. FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
  1356. task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
  1357. FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
  1358. }
  1359. void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
  1360. struct fcoe_task_ctx_entry *task)
  1361. {
  1362. struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
  1363. struct bnx2fc_rport *tgt = io_req->tgt;
  1364. struct fc_frame_header *fc_hdr;
  1365. struct fcoe_ext_mul_sges_ctx *sgl;
  1366. u8 task_type = 0;
  1367. u64 *hdr;
  1368. u64 temp_hdr[3];
  1369. u32 context_id;
  1370. /* Obtain task_type */
  1371. if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
  1372. (io_req->cmd_type == BNX2FC_ELS)) {
  1373. task_type = FCOE_TASK_TYPE_MIDPATH;
  1374. } else if (io_req->cmd_type == BNX2FC_ABTS) {
  1375. task_type = FCOE_TASK_TYPE_ABTS;
  1376. }
  1377. memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
  1378. /* Setup the task from io_req for easy reference */
  1379. io_req->task = task;
  1380. BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
  1381. io_req->cmd_type, task_type);
  1382. /* Tx only */
  1383. if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
  1384. (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
  1385. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
  1386. (u32)mp_req->mp_req_bd_dma;
  1387. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
  1388. (u32)((u64)mp_req->mp_req_bd_dma >> 32);
  1389. task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
  1390. }
  1391. /* Tx Write Rx Read */
  1392. /* init flags */
  1393. task->txwr_rxrd.const_ctx.init_flags = task_type <<
  1394. FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
  1395. if (tgt->dev_type == TYPE_TAPE)
  1396. task->txwr_rxrd.const_ctx.init_flags |=
  1397. FCOE_TASK_DEV_TYPE_TAPE <<
  1398. FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
  1399. else
  1400. task->txwr_rxrd.const_ctx.init_flags |=
  1401. FCOE_TASK_DEV_TYPE_DISK <<
  1402. FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
  1403. task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
  1404. FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
  1405. /* tx flags */
  1406. task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
  1407. FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
  1408. /* Rx Write Tx Read */
  1409. task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
  1410. /* rx flags */
  1411. task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
  1412. FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
  1413. context_id = tgt->context_id;
  1414. task->rxwr_txrd.const_ctx.init_flags = context_id <<
  1415. FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
  1416. fc_hdr = &(mp_req->req_fc_hdr);
  1417. if (task_type == FCOE_TASK_TYPE_MIDPATH) {
  1418. fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
  1419. fc_hdr->fh_rx_id = htons(0xffff);
  1420. task->rxwr_txrd.var_ctx.rx_id = 0xffff;
  1421. } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
  1422. fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
  1423. }
  1424. /* Fill FC Header into middle path buffer */
  1425. hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
  1426. memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
  1427. hdr[0] = cpu_to_be64(temp_hdr[0]);
  1428. hdr[1] = cpu_to_be64(temp_hdr[1]);
  1429. hdr[2] = cpu_to_be64(temp_hdr[2]);
  1430. /* Rx Only */
  1431. if (task_type == FCOE_TASK_TYPE_MIDPATH) {
  1432. sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
  1433. sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
  1434. sgl->mul_sgl.cur_sge_addr.hi =
  1435. (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
  1436. sgl->mul_sgl.sgl_size = 1;
  1437. }
  1438. }
  1439. void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
  1440. struct fcoe_task_ctx_entry *task)
  1441. {
  1442. u8 task_type;
  1443. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  1444. struct io_bdt *bd_tbl = io_req->bd_tbl;
  1445. struct bnx2fc_rport *tgt = io_req->tgt;
  1446. struct fcoe_cached_sge_ctx *cached_sge;
  1447. struct fcoe_ext_mul_sges_ctx *sgl;
  1448. int dev_type = tgt->dev_type;
  1449. u64 *fcp_cmnd;
  1450. u64 tmp_fcp_cmnd[4];
  1451. u32 context_id;
  1452. int cnt, i;
  1453. int bd_count;
  1454. memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
  1455. /* Setup the task from io_req for easy reference */
  1456. io_req->task = task;
  1457. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
  1458. task_type = FCOE_TASK_TYPE_WRITE;
  1459. else
  1460. task_type = FCOE_TASK_TYPE_READ;
  1461. /* Tx only */
  1462. bd_count = bd_tbl->bd_valid;
  1463. cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
  1464. if (task_type == FCOE_TASK_TYPE_WRITE) {
  1465. if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
  1466. struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
  1467. task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
  1468. cached_sge->cur_buf_addr.lo =
  1469. fcoe_bd_tbl->buf_addr_lo;
  1470. task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
  1471. cached_sge->cur_buf_addr.hi =
  1472. fcoe_bd_tbl->buf_addr_hi;
  1473. task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
  1474. cached_sge->cur_buf_rem =
  1475. fcoe_bd_tbl->buf_len;
  1476. task->txwr_rxrd.const_ctx.init_flags |= 1 <<
  1477. FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
  1478. } else {
  1479. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
  1480. (u32)bd_tbl->bd_tbl_dma;
  1481. task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
  1482. (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
  1483. task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
  1484. bd_tbl->bd_valid;
  1485. }
  1486. }
  1487. /*Tx Write Rx Read */
  1488. /* Init state to NORMAL */
  1489. task->txwr_rxrd.const_ctx.init_flags |= task_type <<
  1490. FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
  1491. if (dev_type == TYPE_TAPE) {
  1492. task->txwr_rxrd.const_ctx.init_flags |=
  1493. FCOE_TASK_DEV_TYPE_TAPE <<
  1494. FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
  1495. io_req->rec_retry = 0;
  1496. io_req->rec_retry = 0;
  1497. } else
  1498. task->txwr_rxrd.const_ctx.init_flags |=
  1499. FCOE_TASK_DEV_TYPE_DISK <<
  1500. FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
  1501. task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
  1502. FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
  1503. /* tx flags */
  1504. task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
  1505. FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
  1506. /* Set initial seq counter */
  1507. task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
  1508. /* Fill FCP_CMND IU */
  1509. fcp_cmnd = (u64 *)
  1510. task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
  1511. bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
  1512. /* swap fcp_cmnd */
  1513. cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
  1514. for (i = 0; i < cnt; i++) {
  1515. *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
  1516. fcp_cmnd++;
  1517. }
  1518. /* Rx Write Tx Read */
  1519. task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
  1520. context_id = tgt->context_id;
  1521. task->rxwr_txrd.const_ctx.init_flags = context_id <<
  1522. FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
  1523. /* rx flags */
  1524. /* Set state to "waiting for the first packet" */
  1525. task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
  1526. FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
  1527. task->rxwr_txrd.var_ctx.rx_id = 0xffff;
  1528. /* Rx Only */
  1529. if (task_type != FCOE_TASK_TYPE_READ)
  1530. return;
  1531. sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
  1532. bd_count = bd_tbl->bd_valid;
  1533. if (dev_type == TYPE_DISK) {
  1534. if (bd_count == 1) {
  1535. struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
  1536. cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
  1537. cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
  1538. cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
  1539. task->txwr_rxrd.const_ctx.init_flags |= 1 <<
  1540. FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
  1541. } else if (bd_count == 2) {
  1542. struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
  1543. cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
  1544. cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
  1545. cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
  1546. fcoe_bd_tbl++;
  1547. cached_sge->second_buf_addr.lo =
  1548. fcoe_bd_tbl->buf_addr_lo;
  1549. cached_sge->second_buf_addr.hi =
  1550. fcoe_bd_tbl->buf_addr_hi;
  1551. cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
  1552. task->txwr_rxrd.const_ctx.init_flags |= 1 <<
  1553. FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
  1554. } else {
  1555. sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
  1556. sgl->mul_sgl.cur_sge_addr.hi =
  1557. (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
  1558. sgl->mul_sgl.sgl_size = bd_count;
  1559. }
  1560. } else {
  1561. sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
  1562. sgl->mul_sgl.cur_sge_addr.hi =
  1563. (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
  1564. sgl->mul_sgl.sgl_size = bd_count;
  1565. }
  1566. }
  1567. /**
  1568. * bnx2fc_setup_task_ctx - allocate and map task context
  1569. *
  1570. * @hba: pointer to adapter structure
  1571. *
  1572. * allocate memory for task context, and associated BD table to be used
  1573. * by firmware
  1574. *
  1575. */
  1576. int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
  1577. {
  1578. int rc = 0;
  1579. struct regpair *task_ctx_bdt;
  1580. dma_addr_t addr;
  1581. int i;
  1582. /*
  1583. * Allocate task context bd table. A page size of bd table
  1584. * can map 256 buffers. Each buffer contains 32 task context
  1585. * entries. Hence the limit with one page is 8192 task context
  1586. * entries.
  1587. */
  1588. hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
  1589. PAGE_SIZE,
  1590. &hba->task_ctx_bd_dma,
  1591. GFP_KERNEL);
  1592. if (!hba->task_ctx_bd_tbl) {
  1593. printk(KERN_ERR PFX "unable to allocate task context BDT\n");
  1594. rc = -1;
  1595. goto out;
  1596. }
  1597. memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
  1598. /*
  1599. * Allocate task_ctx which is an array of pointers pointing to
  1600. * a page containing 32 task contexts
  1601. */
  1602. hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
  1603. GFP_KERNEL);
  1604. if (!hba->task_ctx) {
  1605. printk(KERN_ERR PFX "unable to allocate task context array\n");
  1606. rc = -1;
  1607. goto out1;
  1608. }
  1609. /*
  1610. * Allocate task_ctx_dma which is an array of dma addresses
  1611. */
  1612. hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
  1613. sizeof(dma_addr_t)), GFP_KERNEL);
  1614. if (!hba->task_ctx_dma) {
  1615. printk(KERN_ERR PFX "unable to alloc context mapping array\n");
  1616. rc = -1;
  1617. goto out2;
  1618. }
  1619. task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
  1620. for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
  1621. hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
  1622. PAGE_SIZE,
  1623. &hba->task_ctx_dma[i],
  1624. GFP_KERNEL);
  1625. if (!hba->task_ctx[i]) {
  1626. printk(KERN_ERR PFX "unable to alloc task context\n");
  1627. rc = -1;
  1628. goto out3;
  1629. }
  1630. memset(hba->task_ctx[i], 0, PAGE_SIZE);
  1631. addr = (u64)hba->task_ctx_dma[i];
  1632. task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
  1633. task_ctx_bdt->lo = cpu_to_le32((u32)addr);
  1634. task_ctx_bdt++;
  1635. }
  1636. return 0;
  1637. out3:
  1638. for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
  1639. if (hba->task_ctx[i]) {
  1640. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1641. hba->task_ctx[i], hba->task_ctx_dma[i]);
  1642. hba->task_ctx[i] = NULL;
  1643. }
  1644. }
  1645. kfree(hba->task_ctx_dma);
  1646. hba->task_ctx_dma = NULL;
  1647. out2:
  1648. kfree(hba->task_ctx);
  1649. hba->task_ctx = NULL;
  1650. out1:
  1651. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1652. hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
  1653. hba->task_ctx_bd_tbl = NULL;
  1654. out:
  1655. return rc;
  1656. }
  1657. void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
  1658. {
  1659. int i;
  1660. if (hba->task_ctx_bd_tbl) {
  1661. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1662. hba->task_ctx_bd_tbl,
  1663. hba->task_ctx_bd_dma);
  1664. hba->task_ctx_bd_tbl = NULL;
  1665. }
  1666. if (hba->task_ctx) {
  1667. for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
  1668. if (hba->task_ctx[i]) {
  1669. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1670. hba->task_ctx[i],
  1671. hba->task_ctx_dma[i]);
  1672. hba->task_ctx[i] = NULL;
  1673. }
  1674. }
  1675. kfree(hba->task_ctx);
  1676. hba->task_ctx = NULL;
  1677. }
  1678. kfree(hba->task_ctx_dma);
  1679. hba->task_ctx_dma = NULL;
  1680. }
  1681. static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
  1682. {
  1683. int i;
  1684. int segment_count;
  1685. int hash_table_size;
  1686. u32 *pbl;
  1687. segment_count = hba->hash_tbl_segment_count;
  1688. hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
  1689. sizeof(struct fcoe_hash_table_entry);
  1690. pbl = hba->hash_tbl_pbl;
  1691. for (i = 0; i < segment_count; ++i) {
  1692. dma_addr_t dma_address;
  1693. dma_address = le32_to_cpu(*pbl);
  1694. ++pbl;
  1695. dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
  1696. ++pbl;
  1697. dma_free_coherent(&hba->pcidev->dev,
  1698. BNX2FC_HASH_TBL_CHUNK_SIZE,
  1699. hba->hash_tbl_segments[i],
  1700. dma_address);
  1701. }
  1702. if (hba->hash_tbl_pbl) {
  1703. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1704. hba->hash_tbl_pbl,
  1705. hba->hash_tbl_pbl_dma);
  1706. hba->hash_tbl_pbl = NULL;
  1707. }
  1708. }
  1709. static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
  1710. {
  1711. int i;
  1712. int hash_table_size;
  1713. int segment_count;
  1714. int segment_array_size;
  1715. int dma_segment_array_size;
  1716. dma_addr_t *dma_segment_array;
  1717. u32 *pbl;
  1718. hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
  1719. sizeof(struct fcoe_hash_table_entry);
  1720. segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
  1721. segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
  1722. hba->hash_tbl_segment_count = segment_count;
  1723. segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
  1724. hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
  1725. if (!hba->hash_tbl_segments) {
  1726. printk(KERN_ERR PFX "hash table pointers alloc failed\n");
  1727. return -ENOMEM;
  1728. }
  1729. dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
  1730. dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
  1731. if (!dma_segment_array) {
  1732. printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
  1733. return -ENOMEM;
  1734. }
  1735. for (i = 0; i < segment_count; ++i) {
  1736. hba->hash_tbl_segments[i] =
  1737. dma_alloc_coherent(&hba->pcidev->dev,
  1738. BNX2FC_HASH_TBL_CHUNK_SIZE,
  1739. &dma_segment_array[i],
  1740. GFP_KERNEL);
  1741. if (!hba->hash_tbl_segments[i]) {
  1742. printk(KERN_ERR PFX "hash segment alloc failed\n");
  1743. while (--i >= 0) {
  1744. dma_free_coherent(&hba->pcidev->dev,
  1745. BNX2FC_HASH_TBL_CHUNK_SIZE,
  1746. hba->hash_tbl_segments[i],
  1747. dma_segment_array[i]);
  1748. hba->hash_tbl_segments[i] = NULL;
  1749. }
  1750. kfree(dma_segment_array);
  1751. return -ENOMEM;
  1752. }
  1753. memset(hba->hash_tbl_segments[i], 0,
  1754. BNX2FC_HASH_TBL_CHUNK_SIZE);
  1755. }
  1756. hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
  1757. PAGE_SIZE,
  1758. &hba->hash_tbl_pbl_dma,
  1759. GFP_KERNEL);
  1760. if (!hba->hash_tbl_pbl) {
  1761. printk(KERN_ERR PFX "hash table pbl alloc failed\n");
  1762. kfree(dma_segment_array);
  1763. return -ENOMEM;
  1764. }
  1765. memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
  1766. pbl = hba->hash_tbl_pbl;
  1767. for (i = 0; i < segment_count; ++i) {
  1768. u64 paddr = dma_segment_array[i];
  1769. *pbl = cpu_to_le32((u32) paddr);
  1770. ++pbl;
  1771. *pbl = cpu_to_le32((u32) (paddr >> 32));
  1772. ++pbl;
  1773. }
  1774. pbl = hba->hash_tbl_pbl;
  1775. i = 0;
  1776. while (*pbl && *(pbl + 1)) {
  1777. u32 lo;
  1778. u32 hi;
  1779. lo = *pbl;
  1780. ++pbl;
  1781. hi = *pbl;
  1782. ++pbl;
  1783. ++i;
  1784. }
  1785. kfree(dma_segment_array);
  1786. return 0;
  1787. }
  1788. /**
  1789. * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
  1790. *
  1791. * @hba: Pointer to adapter structure
  1792. *
  1793. */
  1794. int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
  1795. {
  1796. u64 addr;
  1797. u32 mem_size;
  1798. int i;
  1799. if (bnx2fc_allocate_hash_table(hba))
  1800. return -ENOMEM;
  1801. mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
  1802. hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
  1803. &hba->t2_hash_tbl_ptr_dma,
  1804. GFP_KERNEL);
  1805. if (!hba->t2_hash_tbl_ptr) {
  1806. printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
  1807. bnx2fc_free_fw_resc(hba);
  1808. return -ENOMEM;
  1809. }
  1810. memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
  1811. mem_size = BNX2FC_NUM_MAX_SESS *
  1812. sizeof(struct fcoe_t2_hash_table_entry);
  1813. hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
  1814. &hba->t2_hash_tbl_dma,
  1815. GFP_KERNEL);
  1816. if (!hba->t2_hash_tbl) {
  1817. printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
  1818. bnx2fc_free_fw_resc(hba);
  1819. return -ENOMEM;
  1820. }
  1821. memset(hba->t2_hash_tbl, 0x00, mem_size);
  1822. for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
  1823. addr = (unsigned long) hba->t2_hash_tbl_dma +
  1824. ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
  1825. hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
  1826. hba->t2_hash_tbl[i].next.hi = addr >> 32;
  1827. }
  1828. hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
  1829. PAGE_SIZE, &hba->dummy_buf_dma,
  1830. GFP_KERNEL);
  1831. if (!hba->dummy_buffer) {
  1832. printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
  1833. bnx2fc_free_fw_resc(hba);
  1834. return -ENOMEM;
  1835. }
  1836. hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
  1837. PAGE_SIZE,
  1838. &hba->stats_buf_dma,
  1839. GFP_KERNEL);
  1840. if (!hba->stats_buffer) {
  1841. printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
  1842. bnx2fc_free_fw_resc(hba);
  1843. return -ENOMEM;
  1844. }
  1845. memset(hba->stats_buffer, 0x00, PAGE_SIZE);
  1846. return 0;
  1847. }
  1848. void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
  1849. {
  1850. u32 mem_size;
  1851. if (hba->stats_buffer) {
  1852. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1853. hba->stats_buffer, hba->stats_buf_dma);
  1854. hba->stats_buffer = NULL;
  1855. }
  1856. if (hba->dummy_buffer) {
  1857. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  1858. hba->dummy_buffer, hba->dummy_buf_dma);
  1859. hba->dummy_buffer = NULL;
  1860. }
  1861. if (hba->t2_hash_tbl_ptr) {
  1862. mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
  1863. dma_free_coherent(&hba->pcidev->dev, mem_size,
  1864. hba->t2_hash_tbl_ptr,
  1865. hba->t2_hash_tbl_ptr_dma);
  1866. hba->t2_hash_tbl_ptr = NULL;
  1867. }
  1868. if (hba->t2_hash_tbl) {
  1869. mem_size = BNX2FC_NUM_MAX_SESS *
  1870. sizeof(struct fcoe_t2_hash_table_entry);
  1871. dma_free_coherent(&hba->pcidev->dev, mem_size,
  1872. hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
  1873. hba->t2_hash_tbl = NULL;
  1874. }
  1875. bnx2fc_free_hash_table(hba);
  1876. }