qla_iocb.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2011 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
  12. static void qla25xx_set_que(srb_t *, struct rsp_que **);
  13. /**
  14. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  15. * @cmd: SCSI command
  16. *
  17. * Returns the proper CF_* direction based on CDB.
  18. */
  19. static inline uint16_t
  20. qla2x00_get_cmd_direction(srb_t *sp)
  21. {
  22. uint16_t cflags;
  23. cflags = 0;
  24. /* Set transfer direction */
  25. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  26. cflags = CF_WRITE;
  27. sp->fcport->vha->hw->qla_stats.output_bytes +=
  28. scsi_bufflen(sp->cmd);
  29. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  30. cflags = CF_READ;
  31. sp->fcport->vha->hw->qla_stats.input_bytes +=
  32. scsi_bufflen(sp->cmd);
  33. }
  34. return (cflags);
  35. }
  36. /**
  37. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  38. * Continuation Type 0 IOCBs to allocate.
  39. *
  40. * @dsds: number of data segment decriptors needed
  41. *
  42. * Returns the number of IOCB entries needed to store @dsds.
  43. */
  44. uint16_t
  45. qla2x00_calc_iocbs_32(uint16_t dsds)
  46. {
  47. uint16_t iocbs;
  48. iocbs = 1;
  49. if (dsds > 3) {
  50. iocbs += (dsds - 3) / 7;
  51. if ((dsds - 3) % 7)
  52. iocbs++;
  53. }
  54. return (iocbs);
  55. }
  56. /**
  57. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  58. * Continuation Type 1 IOCBs to allocate.
  59. *
  60. * @dsds: number of data segment decriptors needed
  61. *
  62. * Returns the number of IOCB entries needed to store @dsds.
  63. */
  64. uint16_t
  65. qla2x00_calc_iocbs_64(uint16_t dsds)
  66. {
  67. uint16_t iocbs;
  68. iocbs = 1;
  69. if (dsds > 2) {
  70. iocbs += (dsds - 2) / 5;
  71. if ((dsds - 2) % 5)
  72. iocbs++;
  73. }
  74. return (iocbs);
  75. }
  76. /**
  77. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  78. * @ha: HA context
  79. *
  80. * Returns a pointer to the Continuation Type 0 IOCB packet.
  81. */
  82. static inline cont_entry_t *
  83. qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  84. {
  85. cont_entry_t *cont_pkt;
  86. struct req_que *req = vha->req;
  87. /* Adjust ring index. */
  88. req->ring_index++;
  89. if (req->ring_index == req->length) {
  90. req->ring_index = 0;
  91. req->ring_ptr = req->ring;
  92. } else {
  93. req->ring_ptr++;
  94. }
  95. cont_pkt = (cont_entry_t *)req->ring_ptr;
  96. /* Load packet defaults. */
  97. *((uint32_t *)(&cont_pkt->entry_type)) =
  98. __constant_cpu_to_le32(CONTINUE_TYPE);
  99. return (cont_pkt);
  100. }
  101. /**
  102. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  103. * @ha: HA context
  104. *
  105. * Returns a pointer to the continuation type 1 IOCB packet.
  106. */
  107. static inline cont_a64_entry_t *
  108. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
  109. {
  110. cont_a64_entry_t *cont_pkt;
  111. struct req_que *req = vha->req;
  112. /* Adjust ring index. */
  113. req->ring_index++;
  114. if (req->ring_index == req->length) {
  115. req->ring_index = 0;
  116. req->ring_ptr = req->ring;
  117. } else {
  118. req->ring_ptr++;
  119. }
  120. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  121. /* Load packet defaults. */
  122. *((uint32_t *)(&cont_pkt->entry_type)) =
  123. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  124. return (cont_pkt);
  125. }
  126. static inline int
  127. qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
  128. {
  129. uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
  130. /* We only support T10 DIF right now */
  131. if (guard != SHOST_DIX_GUARD_CRC) {
  132. ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
  133. "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
  134. return 0;
  135. }
  136. /* We always use DIFF Bundling for best performance */
  137. *fw_prot_opts = 0;
  138. /* Translate SCSI opcode to a protection opcode */
  139. switch (scsi_get_prot_op(sp->cmd)) {
  140. case SCSI_PROT_READ_STRIP:
  141. *fw_prot_opts |= PO_MODE_DIF_REMOVE;
  142. break;
  143. case SCSI_PROT_WRITE_INSERT:
  144. *fw_prot_opts |= PO_MODE_DIF_INSERT;
  145. break;
  146. case SCSI_PROT_READ_INSERT:
  147. *fw_prot_opts |= PO_MODE_DIF_INSERT;
  148. break;
  149. case SCSI_PROT_WRITE_STRIP:
  150. *fw_prot_opts |= PO_MODE_DIF_REMOVE;
  151. break;
  152. case SCSI_PROT_READ_PASS:
  153. *fw_prot_opts |= PO_MODE_DIF_PASS;
  154. break;
  155. case SCSI_PROT_WRITE_PASS:
  156. *fw_prot_opts |= PO_MODE_DIF_PASS;
  157. break;
  158. default: /* Normal Request */
  159. *fw_prot_opts |= PO_MODE_DIF_PASS;
  160. break;
  161. }
  162. return scsi_prot_sg_count(sp->cmd);
  163. }
  164. /*
  165. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  166. * capable IOCB types.
  167. *
  168. * @sp: SRB command to process
  169. * @cmd_pkt: Command type 2 IOCB
  170. * @tot_dsds: Total number of segments to transfer
  171. */
  172. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  173. uint16_t tot_dsds)
  174. {
  175. uint16_t avail_dsds;
  176. uint32_t *cur_dsd;
  177. scsi_qla_host_t *vha;
  178. struct scsi_cmnd *cmd;
  179. struct scatterlist *sg;
  180. int i;
  181. cmd = sp->cmd;
  182. /* Update entry type to indicate Command Type 2 IOCB */
  183. *((uint32_t *)(&cmd_pkt->entry_type)) =
  184. __constant_cpu_to_le32(COMMAND_TYPE);
  185. /* No data transfer */
  186. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  187. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  188. return;
  189. }
  190. vha = sp->fcport->vha;
  191. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  192. /* Three DSDs are available in the Command Type 2 IOCB */
  193. avail_dsds = 3;
  194. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  195. /* Load data segments */
  196. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  197. cont_entry_t *cont_pkt;
  198. /* Allocate additional continuation packets? */
  199. if (avail_dsds == 0) {
  200. /*
  201. * Seven DSDs are available in the Continuation
  202. * Type 0 IOCB.
  203. */
  204. cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
  205. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  206. avail_dsds = 7;
  207. }
  208. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  209. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  210. avail_dsds--;
  211. }
  212. }
  213. /**
  214. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  215. * capable IOCB types.
  216. *
  217. * @sp: SRB command to process
  218. * @cmd_pkt: Command type 3 IOCB
  219. * @tot_dsds: Total number of segments to transfer
  220. */
  221. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  222. uint16_t tot_dsds)
  223. {
  224. uint16_t avail_dsds;
  225. uint32_t *cur_dsd;
  226. scsi_qla_host_t *vha;
  227. struct scsi_cmnd *cmd;
  228. struct scatterlist *sg;
  229. int i;
  230. cmd = sp->cmd;
  231. /* Update entry type to indicate Command Type 3 IOCB */
  232. *((uint32_t *)(&cmd_pkt->entry_type)) =
  233. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  234. /* No data transfer */
  235. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  236. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  237. return;
  238. }
  239. vha = sp->fcport->vha;
  240. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  241. /* Two DSDs are available in the Command Type 3 IOCB */
  242. avail_dsds = 2;
  243. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  244. /* Load data segments */
  245. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  246. dma_addr_t sle_dma;
  247. cont_a64_entry_t *cont_pkt;
  248. /* Allocate additional continuation packets? */
  249. if (avail_dsds == 0) {
  250. /*
  251. * Five DSDs are available in the Continuation
  252. * Type 1 IOCB.
  253. */
  254. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  255. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  256. avail_dsds = 5;
  257. }
  258. sle_dma = sg_dma_address(sg);
  259. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  260. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  261. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  262. avail_dsds--;
  263. }
  264. }
  265. /**
  266. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  267. * @sp: command to send to the ISP
  268. *
  269. * Returns non-zero if a failure occurred, else zero.
  270. */
  271. int
  272. qla2x00_start_scsi(srb_t *sp)
  273. {
  274. int ret, nseg;
  275. unsigned long flags;
  276. scsi_qla_host_t *vha;
  277. struct scsi_cmnd *cmd;
  278. uint32_t *clr_ptr;
  279. uint32_t index;
  280. uint32_t handle;
  281. cmd_entry_t *cmd_pkt;
  282. uint16_t cnt;
  283. uint16_t req_cnt;
  284. uint16_t tot_dsds;
  285. struct device_reg_2xxx __iomem *reg;
  286. struct qla_hw_data *ha;
  287. struct req_que *req;
  288. struct rsp_que *rsp;
  289. char tag[2];
  290. /* Setup device pointers. */
  291. ret = 0;
  292. vha = sp->fcport->vha;
  293. ha = vha->hw;
  294. reg = &ha->iobase->isp;
  295. cmd = sp->cmd;
  296. req = ha->req_q_map[0];
  297. rsp = ha->rsp_q_map[0];
  298. /* So we know we haven't pci_map'ed anything yet */
  299. tot_dsds = 0;
  300. /* Send marker if required */
  301. if (vha->marker_needed != 0) {
  302. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
  303. QLA_SUCCESS) {
  304. return (QLA_FUNCTION_FAILED);
  305. }
  306. vha->marker_needed = 0;
  307. }
  308. /* Acquire ring specific lock */
  309. spin_lock_irqsave(&ha->hardware_lock, flags);
  310. /* Check for room in outstanding command list. */
  311. handle = req->current_outstanding_cmd;
  312. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  313. handle++;
  314. if (handle == MAX_OUTSTANDING_COMMANDS)
  315. handle = 1;
  316. if (!req->outstanding_cmds[handle])
  317. break;
  318. }
  319. if (index == MAX_OUTSTANDING_COMMANDS)
  320. goto queuing_error;
  321. /* Map the sg table so we have an accurate count of sg entries needed */
  322. if (scsi_sg_count(cmd)) {
  323. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  324. scsi_sg_count(cmd), cmd->sc_data_direction);
  325. if (unlikely(!nseg))
  326. goto queuing_error;
  327. } else
  328. nseg = 0;
  329. tot_dsds = nseg;
  330. /* Calculate the number of request entries needed. */
  331. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  332. if (req->cnt < (req_cnt + 2)) {
  333. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  334. if (req->ring_index < cnt)
  335. req->cnt = cnt - req->ring_index;
  336. else
  337. req->cnt = req->length -
  338. (req->ring_index - cnt);
  339. }
  340. if (req->cnt < (req_cnt + 2))
  341. goto queuing_error;
  342. /* Build command packet */
  343. req->current_outstanding_cmd = handle;
  344. req->outstanding_cmds[handle] = sp;
  345. sp->handle = handle;
  346. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  347. req->cnt -= req_cnt;
  348. cmd_pkt = (cmd_entry_t *)req->ring_ptr;
  349. cmd_pkt->handle = handle;
  350. /* Zero out remaining portion of packet. */
  351. clr_ptr = (uint32_t *)cmd_pkt + 2;
  352. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  353. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  354. /* Set target ID and LUN number*/
  355. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  356. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  357. /* Update tagged queuing modifier */
  358. if (scsi_populate_tag_msg(cmd, tag)) {
  359. switch (tag[0]) {
  360. case HEAD_OF_QUEUE_TAG:
  361. cmd_pkt->control_flags =
  362. __constant_cpu_to_le16(CF_HEAD_TAG);
  363. break;
  364. case ORDERED_QUEUE_TAG:
  365. cmd_pkt->control_flags =
  366. __constant_cpu_to_le16(CF_ORDERED_TAG);
  367. break;
  368. default:
  369. cmd_pkt->control_flags =
  370. __constant_cpu_to_le16(CF_SIMPLE_TAG);
  371. break;
  372. }
  373. }
  374. /* Load SCSI command packet. */
  375. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  376. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  377. /* Build IOCB segments */
  378. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  379. /* Set total data segment count. */
  380. cmd_pkt->entry_count = (uint8_t)req_cnt;
  381. wmb();
  382. /* Adjust ring index. */
  383. req->ring_index++;
  384. if (req->ring_index == req->length) {
  385. req->ring_index = 0;
  386. req->ring_ptr = req->ring;
  387. } else
  388. req->ring_ptr++;
  389. sp->flags |= SRB_DMA_VALID;
  390. /* Set chip new ring index. */
  391. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
  392. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  393. /* Manage unprocessed RIO/ZIO commands in response queue. */
  394. if (vha->flags.process_response_queue &&
  395. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  396. qla2x00_process_response_queue(rsp);
  397. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  398. return (QLA_SUCCESS);
  399. queuing_error:
  400. if (tot_dsds)
  401. scsi_dma_unmap(cmd);
  402. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  403. return (QLA_FUNCTION_FAILED);
  404. }
  405. /**
  406. * qla2x00_marker() - Send a marker IOCB to the firmware.
  407. * @ha: HA context
  408. * @loop_id: loop ID
  409. * @lun: LUN
  410. * @type: marker modifier
  411. *
  412. * Can be called from both normal and interrupt context.
  413. *
  414. * Returns non-zero if a failure occurred, else zero.
  415. */
  416. static int
  417. __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  418. struct rsp_que *rsp, uint16_t loop_id,
  419. uint16_t lun, uint8_t type)
  420. {
  421. mrk_entry_t *mrk;
  422. struct mrk_entry_24xx *mrk24;
  423. struct qla_hw_data *ha = vha->hw;
  424. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  425. mrk24 = NULL;
  426. mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
  427. if (mrk == NULL) {
  428. ql_log(ql_log_warn, base_vha, 0x3026,
  429. "Failed to allocate Marker IOCB.\n");
  430. return (QLA_FUNCTION_FAILED);
  431. }
  432. mrk->entry_type = MARKER_TYPE;
  433. mrk->modifier = type;
  434. if (type != MK_SYNC_ALL) {
  435. if (IS_FWI2_CAPABLE(ha)) {
  436. mrk24 = (struct mrk_entry_24xx *) mrk;
  437. mrk24->nport_handle = cpu_to_le16(loop_id);
  438. mrk24->lun[1] = LSB(lun);
  439. mrk24->lun[2] = MSB(lun);
  440. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  441. mrk24->vp_index = vha->vp_idx;
  442. mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
  443. } else {
  444. SET_TARGET_ID(ha, mrk->target, loop_id);
  445. mrk->lun = cpu_to_le16(lun);
  446. }
  447. }
  448. wmb();
  449. qla2x00_isp_cmd(vha, req);
  450. return (QLA_SUCCESS);
  451. }
  452. int
  453. qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  454. struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
  455. uint8_t type)
  456. {
  457. int ret;
  458. unsigned long flags = 0;
  459. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  460. ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
  461. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  462. return (ret);
  463. }
  464. /**
  465. * qla2x00_isp_cmd() - Modify the request ring pointer.
  466. * @ha: HA context
  467. *
  468. * Note: The caller must hold the hardware lock before calling this routine.
  469. */
  470. static void
  471. qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
  472. {
  473. struct qla_hw_data *ha = vha->hw;
  474. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  475. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  476. ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
  477. "IOCB data:\n");
  478. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
  479. (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
  480. /* Adjust ring index. */
  481. req->ring_index++;
  482. if (req->ring_index == req->length) {
  483. req->ring_index = 0;
  484. req->ring_ptr = req->ring;
  485. } else
  486. req->ring_ptr++;
  487. /* Set chip new ring index. */
  488. if (IS_QLA82XX(ha)) {
  489. uint32_t dbval = 0x04 | (ha->portnum << 5);
  490. /* write, read and verify logic */
  491. dbval = dbval | (req->id << 8) | (req->ring_index << 16);
  492. if (ql2xdbwr)
  493. qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
  494. else {
  495. WRT_REG_DWORD(
  496. (unsigned long __iomem *)ha->nxdb_wr_ptr,
  497. dbval);
  498. wmb();
  499. while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
  500. WRT_REG_DWORD((unsigned long __iomem *)
  501. ha->nxdb_wr_ptr, dbval);
  502. wmb();
  503. }
  504. }
  505. } else if (ha->mqenable) {
  506. /* Set chip new ring index. */
  507. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  508. RD_REG_DWORD(&ioreg->hccr);
  509. } else {
  510. if (IS_FWI2_CAPABLE(ha)) {
  511. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  512. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  513. } else {
  514. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  515. req->ring_index);
  516. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  517. }
  518. }
  519. }
  520. /**
  521. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  522. * Continuation Type 1 IOCBs to allocate.
  523. *
  524. * @dsds: number of data segment decriptors needed
  525. *
  526. * Returns the number of IOCB entries needed to store @dsds.
  527. */
  528. inline uint16_t
  529. qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
  530. {
  531. uint16_t iocbs;
  532. iocbs = 1;
  533. if (dsds > 1) {
  534. iocbs += (dsds - 1) / 5;
  535. if ((dsds - 1) % 5)
  536. iocbs++;
  537. }
  538. return iocbs;
  539. }
  540. /**
  541. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  542. * IOCB types.
  543. *
  544. * @sp: SRB command to process
  545. * @cmd_pkt: Command type 3 IOCB
  546. * @tot_dsds: Total number of segments to transfer
  547. */
  548. inline void
  549. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  550. uint16_t tot_dsds)
  551. {
  552. uint16_t avail_dsds;
  553. uint32_t *cur_dsd;
  554. scsi_qla_host_t *vha;
  555. struct scsi_cmnd *cmd;
  556. struct scatterlist *sg;
  557. int i;
  558. struct req_que *req;
  559. cmd = sp->cmd;
  560. /* Update entry type to indicate Command Type 3 IOCB */
  561. *((uint32_t *)(&cmd_pkt->entry_type)) =
  562. __constant_cpu_to_le32(COMMAND_TYPE_7);
  563. /* No data transfer */
  564. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  565. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  566. return;
  567. }
  568. vha = sp->fcport->vha;
  569. req = vha->req;
  570. /* Set transfer direction */
  571. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  572. cmd_pkt->task_mgmt_flags =
  573. __constant_cpu_to_le16(TMF_WRITE_DATA);
  574. sp->fcport->vha->hw->qla_stats.output_bytes +=
  575. scsi_bufflen(sp->cmd);
  576. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  577. cmd_pkt->task_mgmt_flags =
  578. __constant_cpu_to_le16(TMF_READ_DATA);
  579. sp->fcport->vha->hw->qla_stats.input_bytes +=
  580. scsi_bufflen(sp->cmd);
  581. }
  582. /* One DSD is available in the Command Type 3 IOCB */
  583. avail_dsds = 1;
  584. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  585. /* Load data segments */
  586. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  587. dma_addr_t sle_dma;
  588. cont_a64_entry_t *cont_pkt;
  589. /* Allocate additional continuation packets? */
  590. if (avail_dsds == 0) {
  591. /*
  592. * Five DSDs are available in the Continuation
  593. * Type 1 IOCB.
  594. */
  595. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  596. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  597. avail_dsds = 5;
  598. }
  599. sle_dma = sg_dma_address(sg);
  600. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  601. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  602. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  603. avail_dsds--;
  604. }
  605. }
  606. struct fw_dif_context {
  607. uint32_t ref_tag;
  608. uint16_t app_tag;
  609. uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
  610. uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
  611. };
  612. /*
  613. * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
  614. *
  615. */
  616. static inline void
  617. qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
  618. unsigned int protcnt)
  619. {
  620. struct scsi_cmnd *cmd = sp->cmd;
  621. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  622. switch (scsi_get_prot_type(cmd)) {
  623. case SCSI_PROT_DIF_TYPE0:
  624. /*
  625. * No check for ql2xenablehba_err_chk, as it would be an
  626. * I/O error if hba tag generation is not done.
  627. */
  628. pkt->ref_tag = cpu_to_le32((uint32_t)
  629. (0xffffffff & scsi_get_lba(cmd)));
  630. if (!qla2x00_hba_err_chk_enabled(sp))
  631. break;
  632. pkt->ref_tag_mask[0] = 0xff;
  633. pkt->ref_tag_mask[1] = 0xff;
  634. pkt->ref_tag_mask[2] = 0xff;
  635. pkt->ref_tag_mask[3] = 0xff;
  636. break;
  637. /*
  638. * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
  639. * match LBA in CDB + N
  640. */
  641. case SCSI_PROT_DIF_TYPE2:
  642. pkt->app_tag = __constant_cpu_to_le16(0);
  643. pkt->app_tag_mask[0] = 0x0;
  644. pkt->app_tag_mask[1] = 0x0;
  645. pkt->ref_tag = cpu_to_le32((uint32_t)
  646. (0xffffffff & scsi_get_lba(cmd)));
  647. if (!qla2x00_hba_err_chk_enabled(sp))
  648. break;
  649. /* enable ALL bytes of the ref tag */
  650. pkt->ref_tag_mask[0] = 0xff;
  651. pkt->ref_tag_mask[1] = 0xff;
  652. pkt->ref_tag_mask[2] = 0xff;
  653. pkt->ref_tag_mask[3] = 0xff;
  654. break;
  655. /* For Type 3 protection: 16 bit GUARD only */
  656. case SCSI_PROT_DIF_TYPE3:
  657. pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
  658. pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
  659. 0x00;
  660. break;
  661. /*
  662. * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
  663. * 16 bit app tag.
  664. */
  665. case SCSI_PROT_DIF_TYPE1:
  666. pkt->ref_tag = cpu_to_le32((uint32_t)
  667. (0xffffffff & scsi_get_lba(cmd)));
  668. pkt->app_tag = __constant_cpu_to_le16(0);
  669. pkt->app_tag_mask[0] = 0x0;
  670. pkt->app_tag_mask[1] = 0x0;
  671. if (!qla2x00_hba_err_chk_enabled(sp))
  672. break;
  673. /* enable ALL bytes of the ref tag */
  674. pkt->ref_tag_mask[0] = 0xff;
  675. pkt->ref_tag_mask[1] = 0xff;
  676. pkt->ref_tag_mask[2] = 0xff;
  677. pkt->ref_tag_mask[3] = 0xff;
  678. break;
  679. }
  680. ql_dbg(ql_dbg_io, vha, 0x3009,
  681. "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
  682. "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
  683. pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
  684. scsi_get_prot_type(cmd), cmd);
  685. }
  686. struct qla2_sgx {
  687. dma_addr_t dma_addr; /* OUT */
  688. uint32_t dma_len; /* OUT */
  689. uint32_t tot_bytes; /* IN */
  690. struct scatterlist *cur_sg; /* IN */
  691. /* for book keeping, bzero on initial invocation */
  692. uint32_t bytes_consumed;
  693. uint32_t num_bytes;
  694. uint32_t tot_partial;
  695. /* for debugging */
  696. uint32_t num_sg;
  697. srb_t *sp;
  698. };
  699. static int
  700. qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
  701. uint32_t *partial)
  702. {
  703. struct scatterlist *sg;
  704. uint32_t cumulative_partial, sg_len;
  705. dma_addr_t sg_dma_addr;
  706. if (sgx->num_bytes == sgx->tot_bytes)
  707. return 0;
  708. sg = sgx->cur_sg;
  709. cumulative_partial = sgx->tot_partial;
  710. sg_dma_addr = sg_dma_address(sg);
  711. sg_len = sg_dma_len(sg);
  712. sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
  713. if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
  714. sgx->dma_len = (blk_sz - cumulative_partial);
  715. sgx->tot_partial = 0;
  716. sgx->num_bytes += blk_sz;
  717. *partial = 0;
  718. } else {
  719. sgx->dma_len = sg_len - sgx->bytes_consumed;
  720. sgx->tot_partial += sgx->dma_len;
  721. *partial = 1;
  722. }
  723. sgx->bytes_consumed += sgx->dma_len;
  724. if (sg_len == sgx->bytes_consumed) {
  725. sg = sg_next(sg);
  726. sgx->num_sg++;
  727. sgx->cur_sg = sg;
  728. sgx->bytes_consumed = 0;
  729. }
  730. return 1;
  731. }
  732. static int
  733. qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
  734. uint32_t *dsd, uint16_t tot_dsds)
  735. {
  736. void *next_dsd;
  737. uint8_t avail_dsds = 0;
  738. uint32_t dsd_list_len;
  739. struct dsd_dma *dsd_ptr;
  740. struct scatterlist *sg_prot;
  741. uint32_t *cur_dsd = dsd;
  742. uint16_t used_dsds = tot_dsds;
  743. uint32_t prot_int;
  744. uint32_t partial;
  745. struct qla2_sgx sgx;
  746. dma_addr_t sle_dma;
  747. uint32_t sle_dma_len, tot_prot_dma_len = 0;
  748. struct scsi_cmnd *cmd = sp->cmd;
  749. prot_int = cmd->device->sector_size;
  750. memset(&sgx, 0, sizeof(struct qla2_sgx));
  751. sgx.tot_bytes = scsi_bufflen(sp->cmd);
  752. sgx.cur_sg = scsi_sglist(sp->cmd);
  753. sgx.sp = sp;
  754. sg_prot = scsi_prot_sglist(sp->cmd);
  755. while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
  756. sle_dma = sgx.dma_addr;
  757. sle_dma_len = sgx.dma_len;
  758. alloc_and_fill:
  759. /* Allocate additional continuation packets? */
  760. if (avail_dsds == 0) {
  761. avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
  762. QLA_DSDS_PER_IOCB : used_dsds;
  763. dsd_list_len = (avail_dsds + 1) * 12;
  764. used_dsds -= avail_dsds;
  765. /* allocate tracking DS */
  766. dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
  767. if (!dsd_ptr)
  768. return 1;
  769. /* allocate new list */
  770. dsd_ptr->dsd_addr = next_dsd =
  771. dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
  772. &dsd_ptr->dsd_list_dma);
  773. if (!next_dsd) {
  774. /*
  775. * Need to cleanup only this dsd_ptr, rest
  776. * will be done by sp_free_dma()
  777. */
  778. kfree(dsd_ptr);
  779. return 1;
  780. }
  781. list_add_tail(&dsd_ptr->list,
  782. &((struct crc_context *)sp->ctx)->dsd_list);
  783. sp->flags |= SRB_CRC_CTX_DSD_VALID;
  784. /* add new list to cmd iocb or last list */
  785. *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
  786. *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
  787. *cur_dsd++ = dsd_list_len;
  788. cur_dsd = (uint32_t *)next_dsd;
  789. }
  790. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  791. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  792. *cur_dsd++ = cpu_to_le32(sle_dma_len);
  793. avail_dsds--;
  794. if (partial == 0) {
  795. /* Got a full protection interval */
  796. sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
  797. sle_dma_len = 8;
  798. tot_prot_dma_len += sle_dma_len;
  799. if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
  800. tot_prot_dma_len = 0;
  801. sg_prot = sg_next(sg_prot);
  802. }
  803. partial = 1; /* So as to not re-enter this block */
  804. goto alloc_and_fill;
  805. }
  806. }
  807. /* Null termination */
  808. *cur_dsd++ = 0;
  809. *cur_dsd++ = 0;
  810. *cur_dsd++ = 0;
  811. return 0;
  812. }
  813. static int
  814. qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
  815. uint16_t tot_dsds)
  816. {
  817. void *next_dsd;
  818. uint8_t avail_dsds = 0;
  819. uint32_t dsd_list_len;
  820. struct dsd_dma *dsd_ptr;
  821. struct scatterlist *sg;
  822. uint32_t *cur_dsd = dsd;
  823. int i;
  824. uint16_t used_dsds = tot_dsds;
  825. scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
  826. uint8_t *cp;
  827. scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
  828. dma_addr_t sle_dma;
  829. /* Allocate additional continuation packets? */
  830. if (avail_dsds == 0) {
  831. avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
  832. QLA_DSDS_PER_IOCB : used_dsds;
  833. dsd_list_len = (avail_dsds + 1) * 12;
  834. used_dsds -= avail_dsds;
  835. /* allocate tracking DS */
  836. dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
  837. if (!dsd_ptr)
  838. return 1;
  839. /* allocate new list */
  840. dsd_ptr->dsd_addr = next_dsd =
  841. dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
  842. &dsd_ptr->dsd_list_dma);
  843. if (!next_dsd) {
  844. /*
  845. * Need to cleanup only this dsd_ptr, rest
  846. * will be done by sp_free_dma()
  847. */
  848. kfree(dsd_ptr);
  849. return 1;
  850. }
  851. list_add_tail(&dsd_ptr->list,
  852. &((struct crc_context *)sp->ctx)->dsd_list);
  853. sp->flags |= SRB_CRC_CTX_DSD_VALID;
  854. /* add new list to cmd iocb or last list */
  855. *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
  856. *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
  857. *cur_dsd++ = dsd_list_len;
  858. cur_dsd = (uint32_t *)next_dsd;
  859. }
  860. sle_dma = sg_dma_address(sg);
  861. ql_dbg(ql_dbg_io, vha, 0x300a,
  862. "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
  863. cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
  864. sp->cmd);
  865. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  866. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  867. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  868. avail_dsds--;
  869. if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
  870. cp = page_address(sg_page(sg)) + sg->offset;
  871. ql_dbg(ql_dbg_io, vha, 0x300b,
  872. "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
  873. }
  874. }
  875. /* Null termination */
  876. *cur_dsd++ = 0;
  877. *cur_dsd++ = 0;
  878. *cur_dsd++ = 0;
  879. return 0;
  880. }
  881. static int
  882. qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
  883. uint32_t *dsd,
  884. uint16_t tot_dsds)
  885. {
  886. void *next_dsd;
  887. uint8_t avail_dsds = 0;
  888. uint32_t dsd_list_len;
  889. struct dsd_dma *dsd_ptr;
  890. struct scatterlist *sg;
  891. int i;
  892. struct scsi_cmnd *cmd;
  893. uint32_t *cur_dsd = dsd;
  894. uint16_t used_dsds = tot_dsds;
  895. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  896. uint8_t *cp;
  897. cmd = sp->cmd;
  898. scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
  899. dma_addr_t sle_dma;
  900. /* Allocate additional continuation packets? */
  901. if (avail_dsds == 0) {
  902. avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
  903. QLA_DSDS_PER_IOCB : used_dsds;
  904. dsd_list_len = (avail_dsds + 1) * 12;
  905. used_dsds -= avail_dsds;
  906. /* allocate tracking DS */
  907. dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
  908. if (!dsd_ptr)
  909. return 1;
  910. /* allocate new list */
  911. dsd_ptr->dsd_addr = next_dsd =
  912. dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
  913. &dsd_ptr->dsd_list_dma);
  914. if (!next_dsd) {
  915. /*
  916. * Need to cleanup only this dsd_ptr, rest
  917. * will be done by sp_free_dma()
  918. */
  919. kfree(dsd_ptr);
  920. return 1;
  921. }
  922. list_add_tail(&dsd_ptr->list,
  923. &((struct crc_context *)sp->ctx)->dsd_list);
  924. sp->flags |= SRB_CRC_CTX_DSD_VALID;
  925. /* add new list to cmd iocb or last list */
  926. *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
  927. *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
  928. *cur_dsd++ = dsd_list_len;
  929. cur_dsd = (uint32_t *)next_dsd;
  930. }
  931. sle_dma = sg_dma_address(sg);
  932. if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
  933. ql_dbg(ql_dbg_io, vha, 0x3027,
  934. "%s(): %p, sg_entry %d - "
  935. "addr=0x%x0x%x, len=%d.\n",
  936. __func__, cur_dsd, i,
  937. LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
  938. }
  939. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  940. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  941. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  942. if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
  943. cp = page_address(sg_page(sg)) + sg->offset;
  944. ql_dbg(ql_dbg_io, vha, 0x3028,
  945. "%s(): Protection Data buffer = %p.\n", __func__,
  946. cp);
  947. }
  948. avail_dsds--;
  949. }
  950. /* Null termination */
  951. *cur_dsd++ = 0;
  952. *cur_dsd++ = 0;
  953. *cur_dsd++ = 0;
  954. return 0;
  955. }
  956. /**
  957. * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
  958. * Type 6 IOCB types.
  959. *
  960. * @sp: SRB command to process
  961. * @cmd_pkt: Command type 3 IOCB
  962. * @tot_dsds: Total number of segments to transfer
  963. */
  964. static inline int
  965. qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
  966. uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
  967. {
  968. uint32_t *cur_dsd, *fcp_dl;
  969. scsi_qla_host_t *vha;
  970. struct scsi_cmnd *cmd;
  971. struct scatterlist *cur_seg;
  972. int sgc;
  973. uint32_t total_bytes = 0;
  974. uint32_t data_bytes;
  975. uint32_t dif_bytes;
  976. uint8_t bundling = 1;
  977. uint16_t blk_size;
  978. uint8_t *clr_ptr;
  979. struct crc_context *crc_ctx_pkt = NULL;
  980. struct qla_hw_data *ha;
  981. uint8_t additional_fcpcdb_len;
  982. uint16_t fcp_cmnd_len;
  983. struct fcp_cmnd *fcp_cmnd;
  984. dma_addr_t crc_ctx_dma;
  985. char tag[2];
  986. cmd = sp->cmd;
  987. sgc = 0;
  988. /* Update entry type to indicate Command Type CRC_2 IOCB */
  989. *((uint32_t *)(&cmd_pkt->entry_type)) =
  990. __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
  991. vha = sp->fcport->vha;
  992. ha = vha->hw;
  993. /* No data transfer */
  994. data_bytes = scsi_bufflen(cmd);
  995. if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
  996. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  997. return QLA_SUCCESS;
  998. }
  999. cmd_pkt->vp_index = sp->fcport->vp_idx;
  1000. /* Set transfer direction */
  1001. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  1002. cmd_pkt->control_flags =
  1003. __constant_cpu_to_le16(CF_WRITE_DATA);
  1004. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  1005. cmd_pkt->control_flags =
  1006. __constant_cpu_to_le16(CF_READ_DATA);
  1007. }
  1008. if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
  1009. (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
  1010. (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
  1011. (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
  1012. bundling = 0;
  1013. /* Allocate CRC context from global pool */
  1014. crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
  1015. GFP_ATOMIC, &crc_ctx_dma);
  1016. if (!crc_ctx_pkt)
  1017. goto crc_queuing_error;
  1018. /* Zero out CTX area. */
  1019. clr_ptr = (uint8_t *)crc_ctx_pkt;
  1020. memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
  1021. crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
  1022. sp->flags |= SRB_CRC_CTX_DMA_VALID;
  1023. /* Set handle */
  1024. crc_ctx_pkt->handle = cmd_pkt->handle;
  1025. INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
  1026. qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
  1027. &crc_ctx_pkt->ref_tag, tot_prot_dsds);
  1028. cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
  1029. cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
  1030. cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
  1031. /* Determine SCSI command length -- align to 4 byte boundary */
  1032. if (cmd->cmd_len > 16) {
  1033. additional_fcpcdb_len = cmd->cmd_len - 16;
  1034. if ((cmd->cmd_len % 4) != 0) {
  1035. /* SCSI cmd > 16 bytes must be multiple of 4 */
  1036. goto crc_queuing_error;
  1037. }
  1038. fcp_cmnd_len = 12 + cmd->cmd_len + 4;
  1039. } else {
  1040. additional_fcpcdb_len = 0;
  1041. fcp_cmnd_len = 12 + 16 + 4;
  1042. }
  1043. fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
  1044. fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
  1045. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  1046. fcp_cmnd->additional_cdb_len |= 1;
  1047. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  1048. fcp_cmnd->additional_cdb_len |= 2;
  1049. int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
  1050. memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
  1051. cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
  1052. cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
  1053. LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
  1054. cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
  1055. MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
  1056. fcp_cmnd->task_management = 0;
  1057. /*
  1058. * Update tagged queuing modifier if using command tag queuing
  1059. */
  1060. if (scsi_populate_tag_msg(cmd, tag)) {
  1061. switch (tag[0]) {
  1062. case HEAD_OF_QUEUE_TAG:
  1063. fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
  1064. break;
  1065. case ORDERED_QUEUE_TAG:
  1066. fcp_cmnd->task_attribute = TSK_ORDERED;
  1067. break;
  1068. default:
  1069. fcp_cmnd->task_attribute = 0;
  1070. break;
  1071. }
  1072. } else {
  1073. fcp_cmnd->task_attribute = 0;
  1074. }
  1075. cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
  1076. /* Compute dif len and adjust data len to incude protection */
  1077. dif_bytes = 0;
  1078. blk_size = cmd->device->sector_size;
  1079. dif_bytes = (data_bytes / blk_size) * 8;
  1080. switch (scsi_get_prot_op(sp->cmd)) {
  1081. case SCSI_PROT_READ_INSERT:
  1082. case SCSI_PROT_WRITE_STRIP:
  1083. total_bytes = data_bytes;
  1084. data_bytes += dif_bytes;
  1085. break;
  1086. case SCSI_PROT_READ_STRIP:
  1087. case SCSI_PROT_WRITE_INSERT:
  1088. case SCSI_PROT_READ_PASS:
  1089. case SCSI_PROT_WRITE_PASS:
  1090. total_bytes = data_bytes + dif_bytes;
  1091. break;
  1092. default:
  1093. BUG();
  1094. }
  1095. if (!qla2x00_hba_err_chk_enabled(sp))
  1096. fw_prot_opts |= 0x10; /* Disable Guard tag checking */
  1097. if (!bundling) {
  1098. cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
  1099. } else {
  1100. /*
  1101. * Configure Bundling if we need to fetch interlaving
  1102. * protection PCI accesses
  1103. */
  1104. fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
  1105. crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
  1106. crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
  1107. tot_prot_dsds);
  1108. cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
  1109. }
  1110. /* Finish the common fields of CRC pkt */
  1111. crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
  1112. crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
  1113. crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
  1114. crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
  1115. /* Fibre channel byte count */
  1116. cmd_pkt->byte_count = cpu_to_le32(total_bytes);
  1117. fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
  1118. additional_fcpcdb_len);
  1119. *fcp_dl = htonl(total_bytes);
  1120. if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
  1121. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  1122. return QLA_SUCCESS;
  1123. }
  1124. /* Walks data segments */
  1125. cmd_pkt->control_flags |=
  1126. __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
  1127. if (!bundling && tot_prot_dsds) {
  1128. if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
  1129. cur_dsd, tot_dsds))
  1130. goto crc_queuing_error;
  1131. } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
  1132. (tot_dsds - tot_prot_dsds)))
  1133. goto crc_queuing_error;
  1134. if (bundling && tot_prot_dsds) {
  1135. /* Walks dif segments */
  1136. cur_seg = scsi_prot_sglist(cmd);
  1137. cmd_pkt->control_flags |=
  1138. __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
  1139. cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
  1140. if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
  1141. tot_prot_dsds))
  1142. goto crc_queuing_error;
  1143. }
  1144. return QLA_SUCCESS;
  1145. crc_queuing_error:
  1146. /* Cleanup will be performed by the caller */
  1147. return QLA_FUNCTION_FAILED;
  1148. }
  1149. /**
  1150. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  1151. * @sp: command to send to the ISP
  1152. *
  1153. * Returns non-zero if a failure occurred, else zero.
  1154. */
  1155. int
  1156. qla24xx_start_scsi(srb_t *sp)
  1157. {
  1158. int ret, nseg;
  1159. unsigned long flags;
  1160. uint32_t *clr_ptr;
  1161. uint32_t index;
  1162. uint32_t handle;
  1163. struct cmd_type_7 *cmd_pkt;
  1164. uint16_t cnt;
  1165. uint16_t req_cnt;
  1166. uint16_t tot_dsds;
  1167. struct req_que *req = NULL;
  1168. struct rsp_que *rsp = NULL;
  1169. struct scsi_cmnd *cmd = sp->cmd;
  1170. struct scsi_qla_host *vha = sp->fcport->vha;
  1171. struct qla_hw_data *ha = vha->hw;
  1172. char tag[2];
  1173. /* Setup device pointers. */
  1174. ret = 0;
  1175. qla25xx_set_que(sp, &rsp);
  1176. req = vha->req;
  1177. /* So we know we haven't pci_map'ed anything yet */
  1178. tot_dsds = 0;
  1179. /* Send marker if required */
  1180. if (vha->marker_needed != 0) {
  1181. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
  1182. QLA_SUCCESS)
  1183. return QLA_FUNCTION_FAILED;
  1184. vha->marker_needed = 0;
  1185. }
  1186. /* Acquire ring specific lock */
  1187. spin_lock_irqsave(&ha->hardware_lock, flags);
  1188. /* Check for room in outstanding command list. */
  1189. handle = req->current_outstanding_cmd;
  1190. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  1191. handle++;
  1192. if (handle == MAX_OUTSTANDING_COMMANDS)
  1193. handle = 1;
  1194. if (!req->outstanding_cmds[handle])
  1195. break;
  1196. }
  1197. if (index == MAX_OUTSTANDING_COMMANDS) {
  1198. goto queuing_error;
  1199. }
  1200. /* Map the sg table so we have an accurate count of sg entries needed */
  1201. if (scsi_sg_count(cmd)) {
  1202. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  1203. scsi_sg_count(cmd), cmd->sc_data_direction);
  1204. if (unlikely(!nseg))
  1205. goto queuing_error;
  1206. } else
  1207. nseg = 0;
  1208. tot_dsds = nseg;
  1209. req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
  1210. if (req->cnt < (req_cnt + 2)) {
  1211. cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
  1212. if (req->ring_index < cnt)
  1213. req->cnt = cnt - req->ring_index;
  1214. else
  1215. req->cnt = req->length -
  1216. (req->ring_index - cnt);
  1217. }
  1218. if (req->cnt < (req_cnt + 2))
  1219. goto queuing_error;
  1220. /* Build command packet. */
  1221. req->current_outstanding_cmd = handle;
  1222. req->outstanding_cmds[handle] = sp;
  1223. sp->handle = handle;
  1224. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  1225. req->cnt -= req_cnt;
  1226. cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
  1227. cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
  1228. /* Zero out remaining portion of packet. */
  1229. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  1230. clr_ptr = (uint32_t *)cmd_pkt + 2;
  1231. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  1232. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  1233. /* Set NPORT-ID and LUN number*/
  1234. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1235. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  1236. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  1237. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  1238. cmd_pkt->vp_index = sp->fcport->vp_idx;
  1239. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  1240. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  1241. /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
  1242. if (scsi_populate_tag_msg(cmd, tag)) {
  1243. switch (tag[0]) {
  1244. case HEAD_OF_QUEUE_TAG:
  1245. cmd_pkt->task = TSK_HEAD_OF_QUEUE;
  1246. break;
  1247. case ORDERED_QUEUE_TAG:
  1248. cmd_pkt->task = TSK_ORDERED;
  1249. break;
  1250. }
  1251. }
  1252. /* Load SCSI command packet. */
  1253. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  1254. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  1255. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  1256. /* Build IOCB segments */
  1257. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  1258. /* Set total data segment count. */
  1259. cmd_pkt->entry_count = (uint8_t)req_cnt;
  1260. /* Specify response queue number where completion should happen */
  1261. cmd_pkt->entry_status = (uint8_t) rsp->id;
  1262. wmb();
  1263. /* Adjust ring index. */
  1264. req->ring_index++;
  1265. if (req->ring_index == req->length) {
  1266. req->ring_index = 0;
  1267. req->ring_ptr = req->ring;
  1268. } else
  1269. req->ring_ptr++;
  1270. sp->flags |= SRB_DMA_VALID;
  1271. /* Set chip new ring index. */
  1272. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  1273. RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
  1274. /* Manage unprocessed RIO/ZIO commands in response queue. */
  1275. if (vha->flags.process_response_queue &&
  1276. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  1277. qla24xx_process_response_queue(vha, rsp);
  1278. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1279. return QLA_SUCCESS;
  1280. queuing_error:
  1281. if (tot_dsds)
  1282. scsi_dma_unmap(cmd);
  1283. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1284. return QLA_FUNCTION_FAILED;
  1285. }
  1286. /**
  1287. * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
  1288. * @sp: command to send to the ISP
  1289. *
  1290. * Returns non-zero if a failure occurred, else zero.
  1291. */
  1292. int
  1293. qla24xx_dif_start_scsi(srb_t *sp)
  1294. {
  1295. int nseg;
  1296. unsigned long flags;
  1297. uint32_t *clr_ptr;
  1298. uint32_t index;
  1299. uint32_t handle;
  1300. uint16_t cnt;
  1301. uint16_t req_cnt = 0;
  1302. uint16_t tot_dsds;
  1303. uint16_t tot_prot_dsds;
  1304. uint16_t fw_prot_opts = 0;
  1305. struct req_que *req = NULL;
  1306. struct rsp_que *rsp = NULL;
  1307. struct scsi_cmnd *cmd = sp->cmd;
  1308. struct scsi_qla_host *vha = sp->fcport->vha;
  1309. struct qla_hw_data *ha = vha->hw;
  1310. struct cmd_type_crc_2 *cmd_pkt;
  1311. uint32_t status = 0;
  1312. #define QDSS_GOT_Q_SPACE BIT_0
  1313. /* Only process protection or >16 cdb in this routine */
  1314. if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
  1315. if (cmd->cmd_len <= 16)
  1316. return qla24xx_start_scsi(sp);
  1317. }
  1318. /* Setup device pointers. */
  1319. qla25xx_set_que(sp, &rsp);
  1320. req = vha->req;
  1321. /* So we know we haven't pci_map'ed anything yet */
  1322. tot_dsds = 0;
  1323. /* Send marker if required */
  1324. if (vha->marker_needed != 0) {
  1325. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
  1326. QLA_SUCCESS)
  1327. return QLA_FUNCTION_FAILED;
  1328. vha->marker_needed = 0;
  1329. }
  1330. /* Acquire ring specific lock */
  1331. spin_lock_irqsave(&ha->hardware_lock, flags);
  1332. /* Check for room in outstanding command list. */
  1333. handle = req->current_outstanding_cmd;
  1334. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  1335. handle++;
  1336. if (handle == MAX_OUTSTANDING_COMMANDS)
  1337. handle = 1;
  1338. if (!req->outstanding_cmds[handle])
  1339. break;
  1340. }
  1341. if (index == MAX_OUTSTANDING_COMMANDS)
  1342. goto queuing_error;
  1343. /* Compute number of required data segments */
  1344. /* Map the sg table so we have an accurate count of sg entries needed */
  1345. if (scsi_sg_count(cmd)) {
  1346. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  1347. scsi_sg_count(cmd), cmd->sc_data_direction);
  1348. if (unlikely(!nseg))
  1349. goto queuing_error;
  1350. else
  1351. sp->flags |= SRB_DMA_VALID;
  1352. if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
  1353. (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
  1354. struct qla2_sgx sgx;
  1355. uint32_t partial;
  1356. memset(&sgx, 0, sizeof(struct qla2_sgx));
  1357. sgx.tot_bytes = scsi_bufflen(cmd);
  1358. sgx.cur_sg = scsi_sglist(cmd);
  1359. sgx.sp = sp;
  1360. nseg = 0;
  1361. while (qla24xx_get_one_block_sg(
  1362. cmd->device->sector_size, &sgx, &partial))
  1363. nseg++;
  1364. }
  1365. } else
  1366. nseg = 0;
  1367. /* number of required data segments */
  1368. tot_dsds = nseg;
  1369. /* Compute number of required protection segments */
  1370. if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
  1371. nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
  1372. scsi_prot_sg_count(cmd), cmd->sc_data_direction);
  1373. if (unlikely(!nseg))
  1374. goto queuing_error;
  1375. else
  1376. sp->flags |= SRB_CRC_PROT_DMA_VALID;
  1377. if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
  1378. (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
  1379. nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
  1380. }
  1381. } else {
  1382. nseg = 0;
  1383. }
  1384. req_cnt = 1;
  1385. /* Total Data and protection sg segment(s) */
  1386. tot_prot_dsds = nseg;
  1387. tot_dsds += nseg;
  1388. if (req->cnt < (req_cnt + 2)) {
  1389. cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
  1390. if (req->ring_index < cnt)
  1391. req->cnt = cnt - req->ring_index;
  1392. else
  1393. req->cnt = req->length -
  1394. (req->ring_index - cnt);
  1395. }
  1396. if (req->cnt < (req_cnt + 2))
  1397. goto queuing_error;
  1398. status |= QDSS_GOT_Q_SPACE;
  1399. /* Build header part of command packet (excluding the OPCODE). */
  1400. req->current_outstanding_cmd = handle;
  1401. req->outstanding_cmds[handle] = sp;
  1402. sp->handle = handle;
  1403. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  1404. req->cnt -= req_cnt;
  1405. /* Fill-in common area */
  1406. cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
  1407. cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
  1408. clr_ptr = (uint32_t *)cmd_pkt + 2;
  1409. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  1410. /* Set NPORT-ID and LUN number*/
  1411. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1412. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  1413. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  1414. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  1415. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  1416. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  1417. /* Total Data and protection segment(s) */
  1418. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  1419. /* Build IOCB segments and adjust for data protection segments */
  1420. if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
  1421. req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
  1422. QLA_SUCCESS)
  1423. goto queuing_error;
  1424. cmd_pkt->entry_count = (uint8_t)req_cnt;
  1425. /* Specify response queue number where completion should happen */
  1426. cmd_pkt->entry_status = (uint8_t) rsp->id;
  1427. cmd_pkt->timeout = __constant_cpu_to_le16(0);
  1428. wmb();
  1429. /* Adjust ring index. */
  1430. req->ring_index++;
  1431. if (req->ring_index == req->length) {
  1432. req->ring_index = 0;
  1433. req->ring_ptr = req->ring;
  1434. } else
  1435. req->ring_ptr++;
  1436. /* Set chip new ring index. */
  1437. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  1438. RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
  1439. /* Manage unprocessed RIO/ZIO commands in response queue. */
  1440. if (vha->flags.process_response_queue &&
  1441. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  1442. qla24xx_process_response_queue(vha, rsp);
  1443. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1444. return QLA_SUCCESS;
  1445. queuing_error:
  1446. if (status & QDSS_GOT_Q_SPACE) {
  1447. req->outstanding_cmds[handle] = NULL;
  1448. req->cnt += req_cnt;
  1449. }
  1450. /* Cleanup will be performed by the caller (queuecommand) */
  1451. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1452. return QLA_FUNCTION_FAILED;
  1453. }
  1454. static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
  1455. {
  1456. struct scsi_cmnd *cmd = sp->cmd;
  1457. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1458. int affinity = cmd->request->cpu;
  1459. if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
  1460. affinity < ha->max_rsp_queues - 1)
  1461. *rsp = ha->rsp_q_map[affinity + 1];
  1462. else
  1463. *rsp = ha->rsp_q_map[0];
  1464. }
  1465. /* Generic Control-SRB manipulation functions. */
  1466. void *
  1467. qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
  1468. {
  1469. struct qla_hw_data *ha = vha->hw;
  1470. struct req_que *req = ha->req_q_map[0];
  1471. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  1472. uint32_t index, handle;
  1473. request_t *pkt;
  1474. uint16_t cnt, req_cnt;
  1475. pkt = NULL;
  1476. req_cnt = 1;
  1477. handle = 0;
  1478. if (!sp)
  1479. goto skip_cmd_array;
  1480. /* Check for room in outstanding command list. */
  1481. handle = req->current_outstanding_cmd;
  1482. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  1483. handle++;
  1484. if (handle == MAX_OUTSTANDING_COMMANDS)
  1485. handle = 1;
  1486. if (!req->outstanding_cmds[handle])
  1487. break;
  1488. }
  1489. if (index == MAX_OUTSTANDING_COMMANDS) {
  1490. ql_log(ql_log_warn, vha, 0x700b,
  1491. "No room on oustanding cmd array.\n");
  1492. goto queuing_error;
  1493. }
  1494. /* Prep command array. */
  1495. req->current_outstanding_cmd = handle;
  1496. req->outstanding_cmds[handle] = sp;
  1497. sp->handle = handle;
  1498. skip_cmd_array:
  1499. /* Check for room on request queue. */
  1500. if (req->cnt < req_cnt) {
  1501. if (ha->mqenable)
  1502. cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
  1503. else if (IS_QLA82XX(ha))
  1504. cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
  1505. else if (IS_FWI2_CAPABLE(ha))
  1506. cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
  1507. else
  1508. cnt = qla2x00_debounce_register(
  1509. ISP_REQ_Q_OUT(ha, &reg->isp));
  1510. if (req->ring_index < cnt)
  1511. req->cnt = cnt - req->ring_index;
  1512. else
  1513. req->cnt = req->length -
  1514. (req->ring_index - cnt);
  1515. }
  1516. if (req->cnt < req_cnt)
  1517. goto queuing_error;
  1518. /* Prep packet */
  1519. req->cnt -= req_cnt;
  1520. pkt = req->ring_ptr;
  1521. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  1522. pkt->entry_count = req_cnt;
  1523. pkt->handle = handle;
  1524. queuing_error:
  1525. return pkt;
  1526. }
  1527. static void
  1528. qla2x00_start_iocbs(srb_t *sp)
  1529. {
  1530. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1531. struct req_que *req = ha->req_q_map[0];
  1532. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  1533. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  1534. if (IS_QLA82XX(ha)) {
  1535. qla82xx_start_iocbs(sp);
  1536. } else {
  1537. /* Adjust ring index. */
  1538. req->ring_index++;
  1539. if (req->ring_index == req->length) {
  1540. req->ring_index = 0;
  1541. req->ring_ptr = req->ring;
  1542. } else
  1543. req->ring_ptr++;
  1544. /* Set chip new ring index. */
  1545. if (ha->mqenable) {
  1546. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  1547. RD_REG_DWORD(&ioreg->hccr);
  1548. } else if (IS_QLA82XX(ha)) {
  1549. qla82xx_start_iocbs(sp);
  1550. } else if (IS_FWI2_CAPABLE(ha)) {
  1551. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  1552. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  1553. } else {
  1554. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  1555. req->ring_index);
  1556. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  1557. }
  1558. }
  1559. }
  1560. static void
  1561. qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  1562. {
  1563. struct srb_ctx *ctx = sp->ctx;
  1564. struct srb_iocb *lio = ctx->u.iocb_cmd;
  1565. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  1566. logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
  1567. if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
  1568. logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
  1569. if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
  1570. logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
  1571. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1572. logio->port_id[0] = sp->fcport->d_id.b.al_pa;
  1573. logio->port_id[1] = sp->fcport->d_id.b.area;
  1574. logio->port_id[2] = sp->fcport->d_id.b.domain;
  1575. logio->vp_index = sp->fcport->vp_idx;
  1576. }
  1577. static void
  1578. qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
  1579. {
  1580. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1581. struct srb_ctx *ctx = sp->ctx;
  1582. struct srb_iocb *lio = ctx->u.iocb_cmd;
  1583. uint16_t opts;
  1584. mbx->entry_type = MBX_IOCB_TYPE;
  1585. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  1586. mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
  1587. opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
  1588. opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
  1589. if (HAS_EXTENDED_IDS(ha)) {
  1590. mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
  1591. mbx->mb10 = cpu_to_le16(opts);
  1592. } else {
  1593. mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
  1594. }
  1595. mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
  1596. mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
  1597. sp->fcport->d_id.b.al_pa);
  1598. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  1599. }
  1600. static void
  1601. qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  1602. {
  1603. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  1604. logio->control_flags =
  1605. cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
  1606. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1607. logio->port_id[0] = sp->fcport->d_id.b.al_pa;
  1608. logio->port_id[1] = sp->fcport->d_id.b.area;
  1609. logio->port_id[2] = sp->fcport->d_id.b.domain;
  1610. logio->vp_index = sp->fcport->vp_idx;
  1611. }
  1612. static void
  1613. qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
  1614. {
  1615. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1616. mbx->entry_type = MBX_IOCB_TYPE;
  1617. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  1618. mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
  1619. mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
  1620. cpu_to_le16(sp->fcport->loop_id):
  1621. cpu_to_le16(sp->fcport->loop_id << 8);
  1622. mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
  1623. mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
  1624. sp->fcport->d_id.b.al_pa);
  1625. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  1626. /* Implicit: mbx->mbx10 = 0. */
  1627. }
  1628. static void
  1629. qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  1630. {
  1631. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  1632. logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
  1633. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1634. logio->vp_index = sp->fcport->vp_idx;
  1635. }
  1636. static void
  1637. qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
  1638. {
  1639. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1640. mbx->entry_type = MBX_IOCB_TYPE;
  1641. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  1642. mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
  1643. if (HAS_EXTENDED_IDS(ha)) {
  1644. mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
  1645. mbx->mb10 = cpu_to_le16(BIT_0);
  1646. } else {
  1647. mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
  1648. }
  1649. mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
  1650. mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
  1651. mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
  1652. mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
  1653. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  1654. }
  1655. static void
  1656. qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
  1657. {
  1658. uint32_t flags;
  1659. unsigned int lun;
  1660. struct fc_port *fcport = sp->fcport;
  1661. scsi_qla_host_t *vha = fcport->vha;
  1662. struct qla_hw_data *ha = vha->hw;
  1663. struct srb_ctx *ctx = sp->ctx;
  1664. struct srb_iocb *iocb = ctx->u.iocb_cmd;
  1665. struct req_que *req = vha->req;
  1666. flags = iocb->u.tmf.flags;
  1667. lun = iocb->u.tmf.lun;
  1668. tsk->entry_type = TSK_MGMT_IOCB_TYPE;
  1669. tsk->entry_count = 1;
  1670. tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
  1671. tsk->nport_handle = cpu_to_le16(fcport->loop_id);
  1672. tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
  1673. tsk->control_flags = cpu_to_le32(flags);
  1674. tsk->port_id[0] = fcport->d_id.b.al_pa;
  1675. tsk->port_id[1] = fcport->d_id.b.area;
  1676. tsk->port_id[2] = fcport->d_id.b.domain;
  1677. tsk->vp_index = fcport->vp_idx;
  1678. if (flags == TCF_LUN_RESET) {
  1679. int_to_scsilun(lun, &tsk->lun);
  1680. host_to_fcp_swap((uint8_t *)&tsk->lun,
  1681. sizeof(tsk->lun));
  1682. }
  1683. }
  1684. static void
  1685. qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
  1686. {
  1687. struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
  1688. els_iocb->entry_type = ELS_IOCB_TYPE;
  1689. els_iocb->entry_count = 1;
  1690. els_iocb->sys_define = 0;
  1691. els_iocb->entry_status = 0;
  1692. els_iocb->handle = sp->handle;
  1693. els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1694. els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
  1695. els_iocb->vp_index = sp->fcport->vp_idx;
  1696. els_iocb->sof_type = EST_SOFI3;
  1697. els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
  1698. els_iocb->opcode =
  1699. (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
  1700. bsg_job->request->rqst_data.r_els.els_code :
  1701. bsg_job->request->rqst_data.h_els.command_code;
  1702. els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
  1703. els_iocb->port_id[1] = sp->fcport->d_id.b.area;
  1704. els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
  1705. els_iocb->control_flags = 0;
  1706. els_iocb->rx_byte_count =
  1707. cpu_to_le32(bsg_job->reply_payload.payload_len);
  1708. els_iocb->tx_byte_count =
  1709. cpu_to_le32(bsg_job->request_payload.payload_len);
  1710. els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
  1711. (bsg_job->request_payload.sg_list)));
  1712. els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
  1713. (bsg_job->request_payload.sg_list)));
  1714. els_iocb->tx_len = cpu_to_le32(sg_dma_len
  1715. (bsg_job->request_payload.sg_list));
  1716. els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
  1717. (bsg_job->reply_payload.sg_list)));
  1718. els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
  1719. (bsg_job->reply_payload.sg_list)));
  1720. els_iocb->rx_len = cpu_to_le32(sg_dma_len
  1721. (bsg_job->reply_payload.sg_list));
  1722. }
  1723. static void
  1724. qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
  1725. {
  1726. uint16_t avail_dsds;
  1727. uint32_t *cur_dsd;
  1728. struct scatterlist *sg;
  1729. int index;
  1730. uint16_t tot_dsds;
  1731. scsi_qla_host_t *vha = sp->fcport->vha;
  1732. struct qla_hw_data *ha = vha->hw;
  1733. struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
  1734. int loop_iterartion = 0;
  1735. int cont_iocb_prsnt = 0;
  1736. int entry_count = 1;
  1737. memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
  1738. ct_iocb->entry_type = CT_IOCB_TYPE;
  1739. ct_iocb->entry_status = 0;
  1740. ct_iocb->handle1 = sp->handle;
  1741. SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
  1742. ct_iocb->status = __constant_cpu_to_le16(0);
  1743. ct_iocb->control_flags = __constant_cpu_to_le16(0);
  1744. ct_iocb->timeout = 0;
  1745. ct_iocb->cmd_dsd_count =
  1746. __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
  1747. ct_iocb->total_dsd_count =
  1748. __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
  1749. ct_iocb->req_bytecount =
  1750. cpu_to_le32(bsg_job->request_payload.payload_len);
  1751. ct_iocb->rsp_bytecount =
  1752. cpu_to_le32(bsg_job->reply_payload.payload_len);
  1753. ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
  1754. (bsg_job->request_payload.sg_list)));
  1755. ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
  1756. (bsg_job->request_payload.sg_list)));
  1757. ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
  1758. ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
  1759. (bsg_job->reply_payload.sg_list)));
  1760. ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
  1761. (bsg_job->reply_payload.sg_list)));
  1762. ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
  1763. avail_dsds = 1;
  1764. cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
  1765. index = 0;
  1766. tot_dsds = bsg_job->reply_payload.sg_cnt;
  1767. for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
  1768. dma_addr_t sle_dma;
  1769. cont_a64_entry_t *cont_pkt;
  1770. /* Allocate additional continuation packets? */
  1771. if (avail_dsds == 0) {
  1772. /*
  1773. * Five DSDs are available in the Cont.
  1774. * Type 1 IOCB.
  1775. */
  1776. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  1777. cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
  1778. avail_dsds = 5;
  1779. cont_iocb_prsnt = 1;
  1780. entry_count++;
  1781. }
  1782. sle_dma = sg_dma_address(sg);
  1783. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  1784. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  1785. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  1786. loop_iterartion++;
  1787. avail_dsds--;
  1788. }
  1789. ct_iocb->entry_count = entry_count;
  1790. }
  1791. static void
  1792. qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
  1793. {
  1794. uint16_t avail_dsds;
  1795. uint32_t *cur_dsd;
  1796. struct scatterlist *sg;
  1797. int index;
  1798. uint16_t tot_dsds;
  1799. scsi_qla_host_t *vha = sp->fcport->vha;
  1800. struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
  1801. int loop_iterartion = 0;
  1802. int cont_iocb_prsnt = 0;
  1803. int entry_count = 1;
  1804. ct_iocb->entry_type = CT_IOCB_TYPE;
  1805. ct_iocb->entry_status = 0;
  1806. ct_iocb->sys_define = 0;
  1807. ct_iocb->handle = sp->handle;
  1808. ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1809. ct_iocb->vp_index = sp->fcport->vp_idx;
  1810. ct_iocb->comp_status = __constant_cpu_to_le16(0);
  1811. ct_iocb->cmd_dsd_count =
  1812. __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
  1813. ct_iocb->timeout = 0;
  1814. ct_iocb->rsp_dsd_count =
  1815. __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
  1816. ct_iocb->rsp_byte_count =
  1817. cpu_to_le32(bsg_job->reply_payload.payload_len);
  1818. ct_iocb->cmd_byte_count =
  1819. cpu_to_le32(bsg_job->request_payload.payload_len);
  1820. ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
  1821. (bsg_job->request_payload.sg_list)));
  1822. ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
  1823. (bsg_job->request_payload.sg_list)));
  1824. ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
  1825. (bsg_job->request_payload.sg_list));
  1826. avail_dsds = 1;
  1827. cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
  1828. index = 0;
  1829. tot_dsds = bsg_job->reply_payload.sg_cnt;
  1830. for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
  1831. dma_addr_t sle_dma;
  1832. cont_a64_entry_t *cont_pkt;
  1833. /* Allocate additional continuation packets? */
  1834. if (avail_dsds == 0) {
  1835. /*
  1836. * Five DSDs are available in the Cont.
  1837. * Type 1 IOCB.
  1838. */
  1839. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  1840. cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
  1841. avail_dsds = 5;
  1842. cont_iocb_prsnt = 1;
  1843. entry_count++;
  1844. }
  1845. sle_dma = sg_dma_address(sg);
  1846. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  1847. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  1848. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  1849. loop_iterartion++;
  1850. avail_dsds--;
  1851. }
  1852. ct_iocb->entry_count = entry_count;
  1853. }
  1854. int
  1855. qla2x00_start_sp(srb_t *sp)
  1856. {
  1857. int rval;
  1858. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1859. void *pkt;
  1860. struct srb_ctx *ctx = sp->ctx;
  1861. unsigned long flags;
  1862. rval = QLA_FUNCTION_FAILED;
  1863. spin_lock_irqsave(&ha->hardware_lock, flags);
  1864. pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
  1865. if (!pkt) {
  1866. ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
  1867. "qla2x00_alloc_iocbs failed.\n");
  1868. goto done;
  1869. }
  1870. rval = QLA_SUCCESS;
  1871. switch (ctx->type) {
  1872. case SRB_LOGIN_CMD:
  1873. IS_FWI2_CAPABLE(ha) ?
  1874. qla24xx_login_iocb(sp, pkt) :
  1875. qla2x00_login_iocb(sp, pkt);
  1876. break;
  1877. case SRB_LOGOUT_CMD:
  1878. IS_FWI2_CAPABLE(ha) ?
  1879. qla24xx_logout_iocb(sp, pkt) :
  1880. qla2x00_logout_iocb(sp, pkt);
  1881. break;
  1882. case SRB_ELS_CMD_RPT:
  1883. case SRB_ELS_CMD_HST:
  1884. qla24xx_els_iocb(sp, pkt);
  1885. break;
  1886. case SRB_CT_CMD:
  1887. IS_FWI2_CAPABLE(ha) ?
  1888. qla24xx_ct_iocb(sp, pkt) :
  1889. qla2x00_ct_iocb(sp, pkt);
  1890. break;
  1891. case SRB_ADISC_CMD:
  1892. IS_FWI2_CAPABLE(ha) ?
  1893. qla24xx_adisc_iocb(sp, pkt) :
  1894. qla2x00_adisc_iocb(sp, pkt);
  1895. break;
  1896. case SRB_TM_CMD:
  1897. qla24xx_tm_iocb(sp, pkt);
  1898. break;
  1899. default:
  1900. break;
  1901. }
  1902. wmb();
  1903. qla2x00_start_iocbs(sp);
  1904. done:
  1905. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1906. return rval;
  1907. }