esas2r_io.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_io.c
  3. * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * NO WARRANTY
  19. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  20. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  21. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  22. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  23. * solely responsible for determining the appropriateness of using and
  24. * distributing the Program and assumes all risks associated with its
  25. * exercise of rights under this Agreement, including but not limited to
  26. * the risks and costs of program errors, damage to or loss of data,
  27. * programs or equipment, and unavailability or interruption of operations.
  28. *
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. *
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include "esas2r.h"
  44. void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
  45. {
  46. struct esas2r_target *t = NULL;
  47. struct esas2r_request *startrq = rq;
  48. unsigned long flags;
  49. if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
  50. if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
  51. rq->req_stat = RS_SEL2;
  52. else
  53. rq->req_stat = RS_DEGRADED;
  54. } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
  55. t = a->targetdb + rq->target_id;
  56. if (unlikely(t >= a->targetdb_end
  57. || !(t->flags & TF_USED))) {
  58. rq->req_stat = RS_SEL;
  59. } else {
  60. /* copy in the target ID. */
  61. rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
  62. /*
  63. * Test if we want to report RS_SEL for missing target.
  64. * Note that if AF_DISC_PENDING is set than this will
  65. * go on the defer queue.
  66. */
  67. if (unlikely(t->target_state != TS_PRESENT
  68. && !(a->flags & AF_DISC_PENDING)))
  69. rq->req_stat = RS_SEL;
  70. }
  71. }
  72. if (unlikely(rq->req_stat != RS_PENDING)) {
  73. esas2r_complete_request(a, rq);
  74. return;
  75. }
  76. esas2r_trace("rq=%p", rq);
  77. esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
  78. if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
  79. esas2r_trace("rq->target_id=%d", rq->target_id);
  80. esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
  81. }
  82. spin_lock_irqsave(&a->queue_lock, flags);
  83. if (likely(list_empty(&a->defer_list) &&
  84. !(a->flags &
  85. (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
  86. esas2r_local_start_request(a, startrq);
  87. else
  88. list_add_tail(&startrq->req_list, &a->defer_list);
  89. spin_unlock_irqrestore(&a->queue_lock, flags);
  90. }
  91. /*
  92. * Starts the specified request. all requests have RS_PENDING set when this
  93. * routine is called. The caller is usually esas2r_start_request, but
  94. * esas2r_do_deferred_processes will start request that are deferred.
  95. *
  96. * The caller must ensure that requests can be started.
  97. *
  98. * esas2r_start_request will defer a request if there are already requests
  99. * waiting or there is a chip reset pending. once the reset condition clears,
  100. * esas2r_do_deferred_processes will call this function to start the request.
  101. *
  102. * When a request is started, it is placed on the active list and queued to
  103. * the controller.
  104. */
  105. void esas2r_local_start_request(struct esas2r_adapter *a,
  106. struct esas2r_request *rq)
  107. {
  108. esas2r_trace_enter();
  109. esas2r_trace("rq=%p", rq);
  110. esas2r_trace("rq->vrq:%p", rq->vrq);
  111. esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
  112. if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
  113. && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
  114. esas2r_lock_set_flags(&a->flags, AF_FLASHING);
  115. list_add_tail(&rq->req_list, &a->active_list);
  116. esas2r_start_vda_request(a, rq);
  117. esas2r_trace_exit();
  118. return;
  119. }
  120. void esas2r_start_vda_request(struct esas2r_adapter *a,
  121. struct esas2r_request *rq)
  122. {
  123. struct esas2r_inbound_list_source_entry *element;
  124. u32 dw;
  125. rq->req_stat = RS_STARTED;
  126. /*
  127. * Calculate the inbound list entry location and the current state of
  128. * toggle bit.
  129. */
  130. a->last_write++;
  131. if (a->last_write >= a->list_size) {
  132. a->last_write = 0;
  133. /* update the toggle bit */
  134. if (a->flags & AF_COMM_LIST_TOGGLE)
  135. esas2r_lock_clear_flags(&a->flags,
  136. AF_COMM_LIST_TOGGLE);
  137. else
  138. esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
  139. }
  140. element =
  141. (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
  142. virt_addr
  143. + a->last_write;
  144. /* Set the VDA request size if it was never modified */
  145. if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
  146. rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
  147. element->address = cpu_to_le64(rq->vrq_md->phys_addr);
  148. element->length = cpu_to_le32(rq->vda_req_sz);
  149. /* Update the write pointer */
  150. dw = a->last_write;
  151. if (a->flags & AF_COMM_LIST_TOGGLE)
  152. dw |= MU_ILW_TOGGLE;
  153. esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
  154. esas2r_trace("dw:%x", dw);
  155. esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
  156. esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
  157. }
  158. /*
  159. * Build the scatter/gather list for an I/O request according to the
  160. * specifications placed in the s/g context. The caller must initialize
  161. * context prior to the initial call by calling esas2r_sgc_init().
  162. */
  163. bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
  164. struct esas2r_sg_context *sgc)
  165. {
  166. struct esas2r_request *rq = sgc->first_req;
  167. union atto_vda_req *vrq = rq->vrq;
  168. while (sgc->length) {
  169. u32 rem = 0;
  170. u64 addr;
  171. u32 len;
  172. len = (*sgc->get_phys_addr)(sgc, &addr);
  173. if (unlikely(len == 0))
  174. return false;
  175. /* if current length is more than what's left, stop there */
  176. if (unlikely(len > sgc->length))
  177. len = sgc->length;
  178. another_entry:
  179. /* limit to a round number less than the maximum length */
  180. if (len > SGE_LEN_MAX) {
  181. /*
  182. * Save the remainder of the split. Whenever we limit
  183. * an entry we come back around to build entries out
  184. * of the leftover. We do this to prevent multiple
  185. * calls to the get_phys_addr() function for an SGE
  186. * that is too large.
  187. */
  188. rem = len - SGE_LEN_MAX;
  189. len = SGE_LEN_MAX;
  190. }
  191. /* See if we need to allocate a new SGL */
  192. if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
  193. u8 sgelen;
  194. struct esas2r_mem_desc *sgl;
  195. /*
  196. * If no SGls are available, return failure. The
  197. * caller can call us later with the current context
  198. * to pick up here.
  199. */
  200. sgl = esas2r_alloc_sgl(a);
  201. if (unlikely(sgl == NULL))
  202. return false;
  203. /* Calculate the length of the last SGE filled in */
  204. sgelen = (u8)((u8 *)sgc->sge.a64.curr
  205. - (u8 *)sgc->sge.a64.last);
  206. /*
  207. * Copy the last SGE filled in to the first entry of
  208. * the new SGL to make room for the chain entry.
  209. */
  210. memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
  211. /* Figure out the new curr pointer in the new segment */
  212. sgc->sge.a64.curr =
  213. (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
  214. sgelen);
  215. /* Set the limit pointer and build the chain entry */
  216. sgc->sge.a64.limit =
  217. (struct atto_vda_sge *)((u8 *)sgl->virt_addr
  218. + sgl_page_size
  219. - sizeof(struct
  220. atto_vda_sge));
  221. sgc->sge.a64.last->length = cpu_to_le32(
  222. SGE_CHAIN | SGE_ADDR_64);
  223. sgc->sge.a64.last->address =
  224. cpu_to_le64(sgl->phys_addr);
  225. /*
  226. * Now, if there was a previous chain entry, then
  227. * update it to contain the length of this segment
  228. * and size of this chain. otherwise this is the
  229. * first SGL, so set the chain_offset in the request.
  230. */
  231. if (sgc->sge.a64.chain) {
  232. sgc->sge.a64.chain->length |=
  233. cpu_to_le32(
  234. ((u8 *)(sgc->sge.a64.
  235. last + 1)
  236. - (u8 *)rq->sg_table->
  237. virt_addr)
  238. + sizeof(struct atto_vda_sge) *
  239. LOBIT(SGE_CHAIN_SZ));
  240. } else {
  241. vrq->scsi.chain_offset = (u8)
  242. ((u8 *)sgc->
  243. sge.a64.last -
  244. (u8 *)vrq);
  245. /*
  246. * This is the first SGL, so set the
  247. * chain_offset and the VDA request size in
  248. * the request.
  249. */
  250. rq->vda_req_sz =
  251. (vrq->scsi.chain_offset +
  252. sizeof(struct atto_vda_sge) +
  253. 3)
  254. / sizeof(u32);
  255. }
  256. /*
  257. * Remember this so when we get a new SGL filled in we
  258. * can update the length of this chain entry.
  259. */
  260. sgc->sge.a64.chain = sgc->sge.a64.last;
  261. /* Now link the new SGL onto the primary request. */
  262. list_add(&sgl->next_desc, &rq->sg_table_head);
  263. }
  264. /* Update last one filled in */
  265. sgc->sge.a64.last = sgc->sge.a64.curr;
  266. /* Build the new SGE and update the S/G context */
  267. sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
  268. sgc->sge.a64.curr->address = cpu_to_le32(addr);
  269. sgc->sge.a64.curr++;
  270. sgc->cur_offset += len;
  271. sgc->length -= len;
  272. /*
  273. * Check if we previously split an entry. If so we have to
  274. * pick up where we left off.
  275. */
  276. if (rem) {
  277. addr += len;
  278. len = rem;
  279. rem = 0;
  280. goto another_entry;
  281. }
  282. }
  283. /* Mark the end of the SGL */
  284. sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
  285. /*
  286. * If there was a previous chain entry, update the length to indicate
  287. * the length of this last segment.
  288. */
  289. if (sgc->sge.a64.chain) {
  290. sgc->sge.a64.chain->length |= cpu_to_le32(
  291. ((u8 *)(sgc->sge.a64.curr) -
  292. (u8 *)rq->sg_table->virt_addr));
  293. } else {
  294. u16 reqsize;
  295. /*
  296. * The entire VDA request was not used so lets
  297. * set the size of the VDA request to be DMA'd
  298. */
  299. reqsize =
  300. ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
  301. + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
  302. /*
  303. * Only update the request size if it is bigger than what is
  304. * already there. We can come in here twice for some management
  305. * commands.
  306. */
  307. if (reqsize > rq->vda_req_sz)
  308. rq->vda_req_sz = reqsize;
  309. }
  310. return true;
  311. }
  312. /*
  313. * Create PRD list for each I-block consumed by the command. This routine
  314. * determines how much data is required from each I-block being consumed
  315. * by the command. The first and last I-blocks can be partials and all of
  316. * the I-blocks in between are for a full I-block of data.
  317. *
  318. * The interleave size is used to determine the number of bytes in the 1st
  319. * I-block and the remaining I-blocks are what remeains.
  320. */
  321. static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
  322. struct esas2r_sg_context *sgc)
  323. {
  324. struct esas2r_request *rq = sgc->first_req;
  325. u64 addr;
  326. u32 len;
  327. struct esas2r_mem_desc *sgl;
  328. u32 numchain = 1;
  329. u32 rem = 0;
  330. while (sgc->length) {
  331. /* Get the next address/length pair */
  332. len = (*sgc->get_phys_addr)(sgc, &addr);
  333. if (unlikely(len == 0))
  334. return false;
  335. /* If current length is more than what's left, stop there */
  336. if (unlikely(len > sgc->length))
  337. len = sgc->length;
  338. another_entry:
  339. /* Limit to a round number less than the maximum length */
  340. if (len > PRD_LEN_MAX) {
  341. /*
  342. * Save the remainder of the split. whenever we limit
  343. * an entry we come back around to build entries out
  344. * of the leftover. We do this to prevent multiple
  345. * calls to the get_phys_addr() function for an SGE
  346. * that is too large.
  347. */
  348. rem = len - PRD_LEN_MAX;
  349. len = PRD_LEN_MAX;
  350. }
  351. /* See if we need to allocate a new SGL */
  352. if (sgc->sge.prd.sge_cnt == 0) {
  353. if (len == sgc->length) {
  354. /*
  355. * We only have 1 PRD entry left.
  356. * It can be placed where the chain
  357. * entry would have gone
  358. */
  359. /* Build the simple SGE */
  360. sgc->sge.prd.curr->ctl_len = cpu_to_le32(
  361. PRD_DATA | len);
  362. sgc->sge.prd.curr->address = cpu_to_le64(addr);
  363. /* Adjust length related fields */
  364. sgc->cur_offset += len;
  365. sgc->length -= len;
  366. /* We use the reserved chain entry for data */
  367. numchain = 0;
  368. break;
  369. }
  370. if (sgc->sge.prd.chain) {
  371. /*
  372. * Fill # of entries of current SGL in previous
  373. * chain the length of this current SGL may not
  374. * full.
  375. */
  376. sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
  377. sgc->sge.prd.sgl_max_cnt);
  378. }
  379. /*
  380. * If no SGls are available, return failure. The
  381. * caller can call us later with the current context
  382. * to pick up here.
  383. */
  384. sgl = esas2r_alloc_sgl(a);
  385. if (unlikely(sgl == NULL))
  386. return false;
  387. /*
  388. * Link the new SGL onto the chain
  389. * They are in reverse order
  390. */
  391. list_add(&sgl->next_desc, &rq->sg_table_head);
  392. /*
  393. * An SGL was just filled in and we are starting
  394. * a new SGL. Prime the chain of the ending SGL with
  395. * info that points to the new SGL. The length gets
  396. * filled in when the new SGL is filled or ended
  397. */
  398. sgc->sge.prd.chain = sgc->sge.prd.curr;
  399. sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
  400. sgc->sge.prd.chain->address =
  401. cpu_to_le64(sgl->phys_addr);
  402. /*
  403. * Start a new segment.
  404. * Take one away and save for chain SGE
  405. */
  406. sgc->sge.prd.curr =
  407. (struct atto_physical_region_description *)sgl
  408. ->
  409. virt_addr;
  410. sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
  411. }
  412. sgc->sge.prd.sge_cnt--;
  413. /* Build the simple SGE */
  414. sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
  415. sgc->sge.prd.curr->address = cpu_to_le64(addr);
  416. /* Used another element. Point to the next one */
  417. sgc->sge.prd.curr++;
  418. /* Adjust length related fields */
  419. sgc->cur_offset += len;
  420. sgc->length -= len;
  421. /*
  422. * Check if we previously split an entry. If so we have to
  423. * pick up where we left off.
  424. */
  425. if (rem) {
  426. addr += len;
  427. len = rem;
  428. rem = 0;
  429. goto another_entry;
  430. }
  431. }
  432. if (!list_empty(&rq->sg_table_head)) {
  433. if (sgc->sge.prd.chain) {
  434. sgc->sge.prd.chain->ctl_len |=
  435. cpu_to_le32(sgc->sge.prd.sgl_max_cnt
  436. - sgc->sge.prd.sge_cnt
  437. - numchain);
  438. }
  439. }
  440. return true;
  441. }
  442. bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
  443. struct esas2r_sg_context *sgc)
  444. {
  445. struct esas2r_request *rq = sgc->first_req;
  446. u32 len = sgc->length;
  447. struct esas2r_target *t = a->targetdb + rq->target_id;
  448. u8 is_i_o = 0;
  449. u16 reqsize;
  450. struct atto_physical_region_description *curr_iblk_chn;
  451. u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
  452. /*
  453. * extract LBA from command so we can determine
  454. * the I-Block boundary
  455. */
  456. if (rq->vrq->scsi.function == VDA_FUNC_SCSI
  457. && t->target_state == TS_PRESENT
  458. && !(t->flags & TF_PASS_THRU)) {
  459. u32 lbalo = 0;
  460. switch (rq->vrq->scsi.cdb[0]) {
  461. case READ_16:
  462. case WRITE_16:
  463. {
  464. lbalo =
  465. MAKEDWORD(MAKEWORD(cdb[9],
  466. cdb[8]),
  467. MAKEWORD(cdb[7],
  468. cdb[6]));
  469. is_i_o = 1;
  470. break;
  471. }
  472. case READ_12:
  473. case WRITE_12:
  474. case READ_10:
  475. case WRITE_10:
  476. {
  477. lbalo =
  478. MAKEDWORD(MAKEWORD(cdb[5],
  479. cdb[4]),
  480. MAKEWORD(cdb[3],
  481. cdb[2]));
  482. is_i_o = 1;
  483. break;
  484. }
  485. case READ_6:
  486. case WRITE_6:
  487. {
  488. lbalo =
  489. MAKEDWORD(MAKEWORD(cdb[3],
  490. cdb[2]),
  491. MAKEWORD(cdb[1] & 0x1F,
  492. 0));
  493. is_i_o = 1;
  494. break;
  495. }
  496. default:
  497. break;
  498. }
  499. if (is_i_o) {
  500. u32 startlba;
  501. rq->vrq->scsi.iblk_cnt_prd = 0;
  502. /* Determine size of 1st I-block PRD list */
  503. startlba = t->inter_block - (lbalo & (t->inter_block -
  504. 1));
  505. sgc->length = startlba * t->block_size;
  506. /* Chk if the 1st iblk chain starts at base of Iblock */
  507. if ((lbalo & (t->inter_block - 1)) == 0)
  508. rq->flags |= RF_1ST_IBLK_BASE;
  509. if (sgc->length > len)
  510. sgc->length = len;
  511. } else {
  512. sgc->length = len;
  513. }
  514. } else {
  515. sgc->length = len;
  516. }
  517. /* get our starting chain address */
  518. curr_iblk_chn =
  519. (struct atto_physical_region_description *)sgc->sge.a64.curr;
  520. sgc->sge.prd.sgl_max_cnt = sgl_page_size /
  521. sizeof(struct
  522. atto_physical_region_description);
  523. /* create all of the I-block PRD lists */
  524. while (len) {
  525. sgc->sge.prd.sge_cnt = 0;
  526. sgc->sge.prd.chain = NULL;
  527. sgc->sge.prd.curr = curr_iblk_chn;
  528. /* increment to next I-Block */
  529. len -= sgc->length;
  530. /* go build the next I-Block PRD list */
  531. if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
  532. return false;
  533. curr_iblk_chn++;
  534. if (is_i_o) {
  535. rq->vrq->scsi.iblk_cnt_prd++;
  536. if (len > t->inter_byte)
  537. sgc->length = t->inter_byte;
  538. else
  539. sgc->length = len;
  540. }
  541. }
  542. /* figure out the size used of the VDA request */
  543. reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
  544. / sizeof(u32);
  545. /*
  546. * only update the request size if it is bigger than what is
  547. * already there. we can come in here twice for some management
  548. * commands.
  549. */
  550. if (reqsize > rq->vda_req_sz)
  551. rq->vda_req_sz = reqsize;
  552. return true;
  553. }
  554. static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
  555. {
  556. u32 delta = currtime - a->chip_init_time;
  557. if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
  558. /* Wait before accessing registers */
  559. } else if (delta >= ESAS2R_CHPRST_TIME) {
  560. /*
  561. * The last reset failed so try again. Reset
  562. * processing will give up after three tries.
  563. */
  564. esas2r_local_reset_adapter(a);
  565. } else {
  566. /* We can now see if the firmware is ready */
  567. u32 doorbell;
  568. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  569. if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
  570. esas2r_force_interrupt(a);
  571. } else {
  572. u32 ver = (doorbell & DRBL_FW_VER_MSK);
  573. /* Driver supports API version 0 and 1 */
  574. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  575. doorbell);
  576. if (ver == DRBL_FW_VER_0) {
  577. esas2r_lock_set_flags(&a->flags,
  578. AF_CHPRST_DETECTED);
  579. esas2r_lock_set_flags(&a->flags,
  580. AF_LEGACY_SGE_MODE);
  581. a->max_vdareq_size = 128;
  582. a->build_sgl = esas2r_build_sg_list_sge;
  583. } else if (ver == DRBL_FW_VER_1) {
  584. esas2r_lock_set_flags(&a->flags,
  585. AF_CHPRST_DETECTED);
  586. esas2r_lock_clear_flags(&a->flags,
  587. AF_LEGACY_SGE_MODE);
  588. a->max_vdareq_size = 1024;
  589. a->build_sgl = esas2r_build_sg_list_prd;
  590. } else {
  591. esas2r_local_reset_adapter(a);
  592. }
  593. }
  594. }
  595. }
  596. /* This function must be called once per timer tick */
  597. void esas2r_timer_tick(struct esas2r_adapter *a)
  598. {
  599. u32 currtime = jiffies_to_msecs(jiffies);
  600. u32 deltatime = currtime - a->last_tick_time;
  601. a->last_tick_time = currtime;
  602. /* count down the uptime */
  603. if (a->chip_uptime
  604. && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
  605. if (deltatime >= a->chip_uptime)
  606. a->chip_uptime = 0;
  607. else
  608. a->chip_uptime -= deltatime;
  609. }
  610. if (a->flags & AF_CHPRST_PENDING) {
  611. if (!(a->flags & AF_CHPRST_NEEDED)
  612. && !(a->flags & AF_CHPRST_DETECTED))
  613. esas2r_handle_pending_reset(a, currtime);
  614. } else {
  615. if (a->flags & AF_DISC_PENDING)
  616. esas2r_disc_check_complete(a);
  617. if (a->flags & AF_HEARTBEAT_ENB) {
  618. if (a->flags & AF_HEARTBEAT) {
  619. if ((currtime - a->heartbeat_time) >=
  620. ESAS2R_HEARTBEAT_TIME) {
  621. esas2r_lock_clear_flags(&a->flags,
  622. AF_HEARTBEAT);
  623. esas2r_hdebug("heartbeat failed");
  624. esas2r_log(ESAS2R_LOG_CRIT,
  625. "heartbeat failed");
  626. esas2r_bugon();
  627. esas2r_local_reset_adapter(a);
  628. }
  629. } else {
  630. esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
  631. a->heartbeat_time = currtime;
  632. esas2r_force_interrupt(a);
  633. }
  634. }
  635. }
  636. if (atomic_read(&a->disable_cnt) == 0)
  637. esas2r_do_deferred_processes(a);
  638. }
  639. /*
  640. * Send the specified task management function to the target and LUN
  641. * specified in rqaux. in addition, immediately abort any commands that
  642. * are queued but not sent to the device according to the rules specified
  643. * by the task management function.
  644. */
  645. bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
  646. struct esas2r_request *rqaux, u8 task_mgt_func)
  647. {
  648. u16 targetid = rqaux->target_id;
  649. u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
  650. bool ret = false;
  651. struct esas2r_request *rq;
  652. struct list_head *next, *element;
  653. unsigned long flags;
  654. LIST_HEAD(comp_list);
  655. esas2r_trace_enter();
  656. esas2r_trace("rqaux:%p", rqaux);
  657. esas2r_trace("task_mgt_func:%x", task_mgt_func);
  658. spin_lock_irqsave(&a->queue_lock, flags);
  659. /* search the defer queue looking for requests for the device */
  660. list_for_each_safe(element, next, &a->defer_list) {
  661. rq = list_entry(element, struct esas2r_request, req_list);
  662. if (rq->vrq->scsi.function == VDA_FUNC_SCSI
  663. && rq->target_id == targetid
  664. && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
  665. || task_mgt_func == 0x20)) { /* target reset */
  666. /* Found a request affected by the task management */
  667. if (rq->req_stat == RS_PENDING) {
  668. /*
  669. * The request is pending or waiting. We can
  670. * safelycomplete the request now.
  671. */
  672. if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
  673. list_add_tail(&rq->comp_list,
  674. &comp_list);
  675. }
  676. }
  677. }
  678. /* Send the task management request to the firmware */
  679. rqaux->sense_len = 0;
  680. rqaux->vrq->scsi.length = 0;
  681. rqaux->target_id = targetid;
  682. rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
  683. memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
  684. rqaux->vrq->scsi.flags |=
  685. cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
  686. if (a->flags & AF_FLASHING) {
  687. /* Assume success. if there are active requests, return busy */
  688. rqaux->req_stat = RS_SUCCESS;
  689. list_for_each_safe(element, next, &a->active_list) {
  690. rq = list_entry(element, struct esas2r_request,
  691. req_list);
  692. if (rq->vrq->scsi.function == VDA_FUNC_SCSI
  693. && rq->target_id == targetid
  694. && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
  695. || task_mgt_func == 0x20)) /* target reset */
  696. rqaux->req_stat = RS_BUSY;
  697. }
  698. ret = true;
  699. }
  700. spin_unlock_irqrestore(&a->queue_lock, flags);
  701. if (!(a->flags & AF_FLASHING))
  702. esas2r_start_request(a, rqaux);
  703. esas2r_comp_list_drain(a, &comp_list);
  704. if (atomic_read(&a->disable_cnt) == 0)
  705. esas2r_do_deferred_processes(a);
  706. esas2r_trace_exit();
  707. return ret;
  708. }
  709. void esas2r_reset_bus(struct esas2r_adapter *a)
  710. {
  711. esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
  712. if (!(a->flags & AF_DEGRADED_MODE)
  713. && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
  714. esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
  715. esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
  716. esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
  717. esas2r_schedule_tasklet(a);
  718. }
  719. }
  720. bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
  721. u8 status)
  722. {
  723. esas2r_trace_enter();
  724. esas2r_trace("rq:%p", rq);
  725. list_del_init(&rq->req_list);
  726. if (rq->timeout > RQ_MAX_TIMEOUT) {
  727. /*
  728. * The request timed out, but we could not abort it because a
  729. * chip reset occurred. Return busy status.
  730. */
  731. rq->req_stat = RS_BUSY;
  732. esas2r_trace_exit();
  733. return true;
  734. }
  735. rq->req_stat = status;
  736. esas2r_trace_exit();
  737. return true;
  738. }