stp_request.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas.h>
  56. #include "sas.h"
  57. #include "state_machine.h"
  58. #include "remote_device.h"
  59. #include "stp_request.h"
  60. #include "unsolicited_frame_control.h"
  61. #include "scu_completion_codes.h"
  62. #include "scu_event_codes.h"
  63. #include "scu_task_context.h"
  64. #include "request.h"
  65. /**
  66. * This method is will fill in the SCU Task Context for any type of SATA
  67. * request. This is called from the various SATA constructors.
  68. * @sci_req: The general IO request object which is to be used in
  69. * constructing the SCU task context.
  70. * @task_context: The buffer pointer for the SCU task context which is being
  71. * constructed.
  72. *
  73. * The general io request construction is complete. The buffer assignment for
  74. * the command buffer is complete. none Revisit task context construction to
  75. * determine what is common for SSP/SMP/STP task context structures.
  76. */
  77. static void scu_sata_reqeust_construct_task_context(
  78. struct scic_sds_request *sci_req,
  79. struct scu_task_context *task_context)
  80. {
  81. dma_addr_t dma_addr;
  82. struct scic_sds_controller *controller;
  83. struct scic_sds_remote_device *target_device;
  84. struct scic_sds_port *target_port;
  85. controller = scic_sds_request_get_controller(sci_req);
  86. target_device = scic_sds_request_get_device(sci_req);
  87. target_port = scic_sds_request_get_port(sci_req);
  88. /* Fill in the TC with the its required data */
  89. task_context->abort = 0;
  90. task_context->priority = SCU_TASK_PRIORITY_NORMAL;
  91. task_context->initiator_request = 1;
  92. task_context->connection_rate = target_device->connection_rate;
  93. task_context->protocol_engine_index =
  94. scic_sds_controller_get_protocol_engine_group(controller);
  95. task_context->logical_port_index =
  96. scic_sds_port_get_index(target_port);
  97. task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
  98. task_context->valid = SCU_TASK_CONTEXT_VALID;
  99. task_context->context_type = SCU_TASK_CONTEXT_TYPE;
  100. task_context->remote_node_index =
  101. scic_sds_remote_device_get_index(sci_req->target_device);
  102. task_context->command_code = 0;
  103. task_context->link_layer_control = 0;
  104. task_context->do_not_dma_ssp_good_response = 1;
  105. task_context->strict_ordering = 0;
  106. task_context->control_frame = 0;
  107. task_context->timeout_enable = 0;
  108. task_context->block_guard_enable = 0;
  109. task_context->address_modifier = 0;
  110. task_context->task_phase = 0x01;
  111. task_context->ssp_command_iu_length =
  112. (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
  113. /* Set the first word of the H2D REG FIS */
  114. task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
  115. if (sci_req->was_tag_assigned_by_user) {
  116. /*
  117. * Build the task context now since we have already read
  118. * the data
  119. */
  120. sci_req->post_context =
  121. (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  122. (scic_sds_controller_get_protocol_engine_group(
  123. controller) <<
  124. SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  125. (scic_sds_port_get_index(target_port) <<
  126. SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  127. scic_sds_io_tag_get_index(sci_req->io_tag));
  128. } else {
  129. /*
  130. * Build the task context now since we have already read
  131. * the data.
  132. * I/O tag index is not assigned because we have to wait
  133. * until we get a TCi.
  134. */
  135. sci_req->post_context =
  136. (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  137. (scic_sds_controller_get_protocol_engine_group(
  138. controller) <<
  139. SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  140. (scic_sds_port_get_index(target_port) <<
  141. SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
  142. }
  143. /*
  144. * Copy the physical address for the command buffer to the SCU Task
  145. * Context. We must offset the command buffer by 4 bytes because the
  146. * first 4 bytes are transfered in the body of the TC.
  147. */
  148. dma_addr = scic_io_request_get_dma_addr(sci_req,
  149. ((char *) &sci_req->stp.cmd) +
  150. sizeof(u32));
  151. task_context->command_iu_upper = upper_32_bits(dma_addr);
  152. task_context->command_iu_lower = lower_32_bits(dma_addr);
  153. /* SATA Requests do not have a response buffer */
  154. task_context->response_iu_upper = 0;
  155. task_context->response_iu_lower = 0;
  156. }
  157. /**
  158. *
  159. * @sci_req:
  160. *
  161. * This method will perform any general sata request construction. What part of
  162. * SATA IO request construction is general? none
  163. */
  164. static void scic_sds_stp_non_ncq_request_construct(
  165. struct scic_sds_request *sci_req)
  166. {
  167. sci_req->has_started_substate_machine = true;
  168. }
  169. /**
  170. *
  171. * @sci_req: This parameter specifies the request to be constructed as an
  172. * optimized request.
  173. * @optimized_task_type: This parameter specifies whether the request is to be
  174. * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
  175. * value of 1 indicates NCQ.
  176. *
  177. * This method will perform request construction common to all types of STP
  178. * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
  179. * returns an indication as to whether the construction was successful.
  180. */
  181. static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
  182. u8 optimized_task_type,
  183. u32 len,
  184. enum dma_data_direction dir)
  185. {
  186. struct scu_task_context *task_context = sci_req->task_context_buffer;
  187. /* Build the STP task context structure */
  188. scu_sata_reqeust_construct_task_context(sci_req, task_context);
  189. /* Copy over the SGL elements */
  190. scic_sds_request_build_sgl(sci_req);
  191. /* Copy over the number of bytes to be transfered */
  192. task_context->transfer_length_bytes = len;
  193. if (dir == DMA_TO_DEVICE) {
  194. /*
  195. * The difference between the DMA IN and DMA OUT request task type
  196. * values are consistent with the difference between FPDMA READ
  197. * and FPDMA WRITE values. Add the supplied task type parameter
  198. * to this difference to set the task type properly for this
  199. * DATA OUT (WRITE) case. */
  200. task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
  201. - SCU_TASK_TYPE_DMA_IN);
  202. } else {
  203. /*
  204. * For the DATA IN (READ) case, simply save the supplied
  205. * optimized task type. */
  206. task_context->task_type = optimized_task_type;
  207. }
  208. }
  209. /**
  210. *
  211. * @sci_req: This parameter specifies the request to be constructed.
  212. *
  213. * This method will construct the STP UDMA request and its associated TC data.
  214. * This method returns an indication as to whether the construction was
  215. * successful. SCI_SUCCESS Currently this method always returns this value.
  216. */
  217. enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
  218. u32 len,
  219. enum dma_data_direction dir)
  220. {
  221. scic_sds_stp_optimized_request_construct(sci_req,
  222. SCU_TASK_TYPE_FPDMAQ_READ,
  223. len, dir);
  224. return SCI_SUCCESS;
  225. }
  226. /**
  227. * scu_stp_raw_request_construct_task_context -
  228. * @sci_req: This parameter specifies the STP request object for which to
  229. * construct a RAW command frame task context.
  230. * @task_context: This parameter specifies the SCU specific task context buffer
  231. * to construct.
  232. *
  233. * This method performs the operations common to all SATA/STP requests
  234. * utilizing the raw frame method. none
  235. */
  236. static void scu_stp_raw_request_construct_task_context(
  237. struct scic_sds_stp_request *stp_req,
  238. struct scu_task_context *task_context)
  239. {
  240. struct scic_sds_request *sci_req = to_sci_req(stp_req);
  241. scu_sata_reqeust_construct_task_context(sci_req, task_context);
  242. task_context->control_frame = 0;
  243. task_context->priority = SCU_TASK_PRIORITY_NORMAL;
  244. task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
  245. task_context->type.stp.fis_type = FIS_REGH2D;
  246. task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
  247. }
  248. void scic_stp_io_request_set_ncq_tag(
  249. struct scic_sds_request *req,
  250. u16 ncq_tag)
  251. {
  252. /**
  253. * @note This could be made to return an error to the user if the user
  254. * attempts to set the NCQ tag in the wrong state.
  255. */
  256. req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
  257. }
  258. /**
  259. *
  260. * @sci_req:
  261. *
  262. * Get the next SGL element from the request. - Check on which SGL element pair
  263. * we are working - if working on SLG pair element A - advance to element B -
  264. * else - check to see if there are more SGL element pairs for this IO request
  265. * - if there are more SGL element pairs - advance to the next pair and return
  266. * element A struct scu_sgl_element*
  267. */
  268. static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
  269. {
  270. struct scu_sgl_element *current_sgl;
  271. struct scic_sds_request *sci_req = to_sci_req(stp_req);
  272. struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
  273. if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
  274. if (pio_sgl->sgl_pair->B.address_lower == 0 &&
  275. pio_sgl->sgl_pair->B.address_upper == 0) {
  276. current_sgl = NULL;
  277. } else {
  278. pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
  279. current_sgl = &pio_sgl->sgl_pair->B;
  280. }
  281. } else {
  282. if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
  283. pio_sgl->sgl_pair->next_pair_upper == 0) {
  284. current_sgl = NULL;
  285. } else {
  286. u64 phys_addr;
  287. phys_addr = pio_sgl->sgl_pair->next_pair_upper;
  288. phys_addr <<= 32;
  289. phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
  290. pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
  291. pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
  292. current_sgl = &pio_sgl->sgl_pair->A;
  293. }
  294. }
  295. return current_sgl;
  296. }
  297. /**
  298. *
  299. * @sci_req:
  300. * @completion_code:
  301. *
  302. * This method processes a TC completion. The expected TC completion is for
  303. * the transmission of the H2D register FIS containing the SATA/STP non-data
  304. * request. This method always successfully processes the TC completion.
  305. * SCI_SUCCESS This value is always returned.
  306. */
  307. static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
  308. struct scic_sds_request *sci_req,
  309. u32 completion_code)
  310. {
  311. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  312. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  313. scic_sds_request_set_status(
  314. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  315. );
  316. sci_base_state_machine_change_state(
  317. &sci_req->started_substate_machine,
  318. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
  319. );
  320. break;
  321. default:
  322. /*
  323. * All other completion status cause the IO to be complete. If a NAK
  324. * was received, then it is up to the user to retry the request. */
  325. scic_sds_request_set_status(
  326. sci_req,
  327. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  328. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  329. );
  330. sci_base_state_machine_change_state(
  331. &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
  332. break;
  333. }
  334. return SCI_SUCCESS;
  335. }
  336. /**
  337. *
  338. * @request: This parameter specifies the request for which a frame has been
  339. * received.
  340. * @frame_index: This parameter specifies the index of the frame that has been
  341. * received.
  342. *
  343. * This method processes frames received from the target while waiting for a
  344. * device to host register FIS. If a non-register FIS is received during this
  345. * time, it is treated as a protocol violation from an IO perspective. Indicate
  346. * if the received frame was processed successfully.
  347. */
  348. static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
  349. struct scic_sds_request *sci_req,
  350. u32 frame_index)
  351. {
  352. enum sci_status status;
  353. struct dev_to_host_fis *frame_header;
  354. u32 *frame_buffer;
  355. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  356. struct scic_sds_controller *scic = sci_req->owning_controller;
  357. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  358. frame_index,
  359. (void **)&frame_header);
  360. if (status != SCI_SUCCESS) {
  361. dev_err(scic_to_dev(sci_req->owning_controller),
  362. "%s: SCIC IO Request 0x%p could not get frame header "
  363. "for frame index %d, status %x\n",
  364. __func__, stp_req, frame_index, status);
  365. return status;
  366. }
  367. switch (frame_header->fis_type) {
  368. case FIS_REGD2H:
  369. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  370. frame_index,
  371. (void **)&frame_buffer);
  372. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  373. frame_header,
  374. frame_buffer);
  375. /* The command has completed with error */
  376. scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
  377. SCI_FAILURE_IO_RESPONSE_VALID);
  378. break;
  379. default:
  380. dev_warn(scic_to_dev(scic),
  381. "%s: IO Request:0x%p Frame Id:%d protocol "
  382. "violation occurred\n", __func__, stp_req,
  383. frame_index);
  384. scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
  385. SCI_FAILURE_PROTOCOL_VIOLATION);
  386. break;
  387. }
  388. sci_base_state_machine_change_state(&sci_req->state_machine,
  389. SCI_BASE_REQUEST_STATE_COMPLETED);
  390. /* Frame has been decoded return it to the controller */
  391. scic_sds_controller_release_frame(scic, frame_index);
  392. return status;
  393. }
  394. /* --------------------------------------------------------------------------- */
  395. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
  396. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  397. .abort_handler = scic_sds_request_started_state_abort_handler,
  398. .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
  399. },
  400. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
  401. .abort_handler = scic_sds_request_started_state_abort_handler,
  402. .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
  403. }
  404. };
  405. static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
  406. void *object)
  407. {
  408. struct scic_sds_request *sci_req = object;
  409. SET_STATE_HANDLER(
  410. sci_req,
  411. scic_sds_stp_request_started_non_data_substate_handler_table,
  412. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
  413. );
  414. scic_sds_remote_device_set_working_request(
  415. sci_req->target_device, sci_req
  416. );
  417. }
  418. static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
  419. {
  420. struct scic_sds_request *sci_req = object;
  421. SET_STATE_HANDLER(
  422. sci_req,
  423. scic_sds_stp_request_started_non_data_substate_handler_table,
  424. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
  425. );
  426. }
  427. /* --------------------------------------------------------------------------- */
  428. static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
  429. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  430. .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
  431. },
  432. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
  433. .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
  434. },
  435. };
  436. enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
  437. {
  438. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  439. scic_sds_stp_non_ncq_request_construct(sci_req);
  440. /* Build the STP task context structure */
  441. scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
  442. sci_base_state_machine_construct(&sci_req->started_substate_machine,
  443. sci_req,
  444. scic_sds_stp_request_started_non_data_substate_table,
  445. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
  446. return SCI_SUCCESS;
  447. }
  448. #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
  449. /* transmit DATA_FIS from (current sgl + offset) for input
  450. * parameter length. current sgl and offset is alreay stored in the IO request
  451. */
  452. static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
  453. struct scic_sds_request *sci_req,
  454. u32 length)
  455. {
  456. struct scic_sds_controller *scic = sci_req->owning_controller;
  457. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  458. struct scu_task_context *task_context;
  459. struct scu_sgl_element *current_sgl;
  460. /* Recycle the TC and reconstruct it for sending out DATA FIS containing
  461. * for the data from current_sgl+offset for the input length
  462. */
  463. task_context = scic_sds_controller_get_task_context_buffer(scic,
  464. sci_req->io_tag);
  465. if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
  466. current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
  467. else
  468. current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
  469. /* update the TC */
  470. task_context->command_iu_upper = current_sgl->address_upper;
  471. task_context->command_iu_lower = current_sgl->address_lower;
  472. task_context->transfer_length_bytes = length;
  473. task_context->type.stp.fis_type = FIS_DATA;
  474. /* send the new TC out. */
  475. return scic_controller_continue_io(sci_req);
  476. }
  477. static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
  478. {
  479. struct scu_sgl_element *current_sgl;
  480. u32 sgl_offset;
  481. u32 remaining_bytes_in_current_sgl = 0;
  482. enum sci_status status = SCI_SUCCESS;
  483. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  484. sgl_offset = stp_req->type.pio.request_current.sgl_offset;
  485. if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
  486. current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
  487. remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
  488. } else {
  489. current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
  490. remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
  491. }
  492. if (stp_req->type.pio.pio_transfer_bytes > 0) {
  493. if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
  494. /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
  495. status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
  496. if (status == SCI_SUCCESS) {
  497. stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
  498. /* update the current sgl, sgl_offset and save for future */
  499. current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
  500. sgl_offset = 0;
  501. }
  502. } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
  503. /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
  504. scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
  505. if (status == SCI_SUCCESS) {
  506. /* Sgl offset will be adjusted and saved for future */
  507. sgl_offset += stp_req->type.pio.pio_transfer_bytes;
  508. current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
  509. stp_req->type.pio.pio_transfer_bytes = 0;
  510. }
  511. }
  512. }
  513. if (status == SCI_SUCCESS) {
  514. stp_req->type.pio.request_current.sgl_offset = sgl_offset;
  515. }
  516. return status;
  517. }
  518. /**
  519. *
  520. * @stp_request: The request that is used for the SGL processing.
  521. * @data_buffer: The buffer of data to be copied.
  522. * @length: The length of the data transfer.
  523. *
  524. * Copy the data from the buffer for the length specified to the IO reqeust SGL
  525. * specified data region. enum sci_status
  526. */
  527. static enum sci_status
  528. scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
  529. u8 *data_buf, u32 len)
  530. {
  531. struct scic_sds_request *sci_req;
  532. struct isci_request *ireq;
  533. u8 *src_addr;
  534. int copy_len;
  535. struct sas_task *task;
  536. struct scatterlist *sg;
  537. void *kaddr;
  538. int total_len = len;
  539. sci_req = to_sci_req(stp_req);
  540. ireq = sci_req_to_ireq(sci_req);
  541. task = isci_request_access_task(ireq);
  542. src_addr = data_buf;
  543. if (task->num_scatter > 0) {
  544. sg = task->scatter;
  545. while (total_len > 0) {
  546. struct page *page = sg_page(sg);
  547. copy_len = min_t(int, total_len, sg_dma_len(sg));
  548. kaddr = kmap_atomic(page, KM_IRQ0);
  549. memcpy(kaddr + sg->offset, src_addr, copy_len);
  550. kunmap_atomic(kaddr, KM_IRQ0);
  551. total_len -= copy_len;
  552. src_addr += copy_len;
  553. sg = sg_next(sg);
  554. }
  555. } else {
  556. BUG_ON(task->total_xfer_len < total_len);
  557. memcpy(task->scatter, src_addr, total_len);
  558. }
  559. return SCI_SUCCESS;
  560. }
  561. /**
  562. *
  563. * @sci_req: The PIO DATA IN request that is to receive the data.
  564. * @data_buffer: The buffer to copy from.
  565. *
  566. * Copy the data buffer to the io request data region. enum sci_status
  567. */
  568. static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
  569. struct scic_sds_stp_request *sci_req,
  570. u8 *data_buffer)
  571. {
  572. enum sci_status status;
  573. /*
  574. * If there is less than 1K remaining in the transfer request
  575. * copy just the data for the transfer */
  576. if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
  577. status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
  578. sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
  579. if (status == SCI_SUCCESS)
  580. sci_req->type.pio.pio_transfer_bytes = 0;
  581. } else {
  582. /* We are transfering the whole frame so copy */
  583. status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
  584. sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
  585. if (status == SCI_SUCCESS)
  586. sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
  587. }
  588. return status;
  589. }
  590. /**
  591. *
  592. * @sci_req:
  593. * @completion_code:
  594. *
  595. * enum sci_status
  596. */
  597. static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
  598. struct scic_sds_request *sci_req,
  599. u32 completion_code)
  600. {
  601. enum sci_status status = SCI_SUCCESS;
  602. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  603. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  604. scic_sds_request_set_status(
  605. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  606. );
  607. sci_base_state_machine_change_state(
  608. &sci_req->started_substate_machine,
  609. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  610. );
  611. break;
  612. default:
  613. /*
  614. * All other completion status cause the IO to be complete. If a NAK
  615. * was received, then it is up to the user to retry the request. */
  616. scic_sds_request_set_status(
  617. sci_req,
  618. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  619. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  620. );
  621. sci_base_state_machine_change_state(
  622. &sci_req->state_machine,
  623. SCI_BASE_REQUEST_STATE_COMPLETED
  624. );
  625. break;
  626. }
  627. return status;
  628. }
  629. static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
  630. u32 frame_index)
  631. {
  632. struct scic_sds_controller *scic = sci_req->owning_controller;
  633. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  634. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  635. struct sas_task *task = isci_request_access_task(ireq);
  636. struct dev_to_host_fis *frame_header;
  637. enum sci_status status;
  638. u32 *frame_buffer;
  639. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  640. frame_index,
  641. (void **)&frame_header);
  642. if (status != SCI_SUCCESS) {
  643. dev_err(scic_to_dev(scic),
  644. "%s: SCIC IO Request 0x%p could not get frame header "
  645. "for frame index %d, status %x\n",
  646. __func__, stp_req, frame_index, status);
  647. return status;
  648. }
  649. switch (frame_header->fis_type) {
  650. case FIS_PIO_SETUP:
  651. /* Get from the frame buffer the PIO Setup Data */
  652. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  653. frame_index,
  654. (void **)&frame_buffer);
  655. /* Get the data from the PIO Setup The SCU Hardware returns
  656. * first word in the frame_header and the rest of the data is in
  657. * the frame buffer so we need to back up one dword
  658. */
  659. /* transfer_count: first 16bits in the 4th dword */
  660. stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
  661. /* ending_status: 4th byte in the 3rd dword */
  662. stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
  663. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  664. frame_header,
  665. frame_buffer);
  666. sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
  667. /* The next state is dependent on whether the
  668. * request was PIO Data-in or Data out
  669. */
  670. if (task->data_dir == DMA_FROM_DEVICE) {
  671. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  672. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
  673. } else if (task->data_dir == DMA_TO_DEVICE) {
  674. /* Transmit data */
  675. status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
  676. if (status != SCI_SUCCESS)
  677. break;
  678. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  679. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
  680. }
  681. break;
  682. case FIS_SETDEVBITS:
  683. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  684. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
  685. break;
  686. case FIS_REGD2H:
  687. if (frame_header->status & ATA_BUSY) {
  688. /* Now why is the drive sending a D2H Register FIS when
  689. * it is still busy? Do nothing since we are still in
  690. * the right state.
  691. */
  692. dev_dbg(scic_to_dev(scic),
  693. "%s: SCIC PIO Request 0x%p received "
  694. "D2H Register FIS with BSY status "
  695. "0x%x\n", __func__, stp_req,
  696. frame_header->status);
  697. break;
  698. }
  699. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  700. frame_index,
  701. (void **)&frame_buffer);
  702. scic_sds_controller_copy_sata_response(&sci_req->stp.req,
  703. frame_header,
  704. frame_buffer);
  705. scic_sds_request_set_status(sci_req,
  706. SCU_TASK_DONE_CHECK_RESPONSE,
  707. SCI_FAILURE_IO_RESPONSE_VALID);
  708. sci_base_state_machine_change_state(&sci_req->state_machine,
  709. SCI_BASE_REQUEST_STATE_COMPLETED);
  710. break;
  711. default:
  712. /* FIXME: what do we do here? */
  713. break;
  714. }
  715. /* Frame is decoded return it to the controller */
  716. scic_sds_controller_release_frame(scic, frame_index);
  717. return status;
  718. }
  719. static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
  720. u32 frame_index)
  721. {
  722. enum sci_status status;
  723. struct dev_to_host_fis *frame_header;
  724. struct sata_fis_data *frame_buffer;
  725. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  726. struct scic_sds_controller *scic = sci_req->owning_controller;
  727. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  728. frame_index,
  729. (void **)&frame_header);
  730. if (status != SCI_SUCCESS) {
  731. dev_err(scic_to_dev(scic),
  732. "%s: SCIC IO Request 0x%p could not get frame header "
  733. "for frame index %d, status %x\n",
  734. __func__, stp_req, frame_index, status);
  735. return status;
  736. }
  737. if (frame_header->fis_type == FIS_DATA) {
  738. if (stp_req->type.pio.request_current.sgl_pair == NULL) {
  739. sci_req->saved_rx_frame_index = frame_index;
  740. stp_req->type.pio.pio_transfer_bytes = 0;
  741. } else {
  742. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  743. frame_index,
  744. (void **)&frame_buffer);
  745. status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
  746. (u8 *)frame_buffer);
  747. /* Frame is decoded return it to the controller */
  748. scic_sds_controller_release_frame(scic, frame_index);
  749. }
  750. /* Check for the end of the transfer, are there more
  751. * bytes remaining for this data transfer
  752. */
  753. if (status != SCI_SUCCESS ||
  754. stp_req->type.pio.pio_transfer_bytes != 0)
  755. return status;
  756. if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
  757. scic_sds_request_set_status(sci_req,
  758. SCU_TASK_DONE_CHECK_RESPONSE,
  759. SCI_FAILURE_IO_RESPONSE_VALID);
  760. sci_base_state_machine_change_state(&sci_req->state_machine,
  761. SCI_BASE_REQUEST_STATE_COMPLETED);
  762. } else {
  763. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  764. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
  765. }
  766. } else {
  767. dev_err(scic_to_dev(scic),
  768. "%s: SCIC PIO Request 0x%p received frame %d "
  769. "with fis type 0x%02x when expecting a data "
  770. "fis.\n", __func__, stp_req, frame_index,
  771. frame_header->fis_type);
  772. scic_sds_request_set_status(sci_req,
  773. SCU_TASK_DONE_GOOD,
  774. SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
  775. sci_base_state_machine_change_state(&sci_req->state_machine,
  776. SCI_BASE_REQUEST_STATE_COMPLETED);
  777. /* Frame is decoded return it to the controller */
  778. scic_sds_controller_release_frame(scic, frame_index);
  779. }
  780. return status;
  781. }
  782. /**
  783. *
  784. * @sci_req:
  785. * @completion_code:
  786. *
  787. * enum sci_status
  788. */
  789. static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
  790. struct scic_sds_request *sci_req,
  791. u32 completion_code)
  792. {
  793. enum sci_status status = SCI_SUCCESS;
  794. bool all_frames_transferred = false;
  795. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  796. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  797. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  798. /* Transmit data */
  799. if (stp_req->type.pio.pio_transfer_bytes != 0) {
  800. status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
  801. if (status == SCI_SUCCESS) {
  802. if (stp_req->type.pio.pio_transfer_bytes == 0)
  803. all_frames_transferred = true;
  804. }
  805. } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
  806. /*
  807. * this will happen if the all data is written at the
  808. * first time after the pio setup fis is received
  809. */
  810. all_frames_transferred = true;
  811. }
  812. /* all data transferred. */
  813. if (all_frames_transferred) {
  814. /*
  815. * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
  816. * and wait for PIO_SETUP fis / or D2H REg fis. */
  817. sci_base_state_machine_change_state(
  818. &sci_req->started_substate_machine,
  819. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  820. );
  821. }
  822. break;
  823. default:
  824. /*
  825. * All other completion status cause the IO to be complete. If a NAK
  826. * was received, then it is up to the user to retry the request. */
  827. scic_sds_request_set_status(
  828. sci_req,
  829. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  830. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  831. );
  832. sci_base_state_machine_change_state(
  833. &sci_req->state_machine,
  834. SCI_BASE_REQUEST_STATE_COMPLETED
  835. );
  836. break;
  837. }
  838. return status;
  839. }
  840. /**
  841. *
  842. * @request: This is the request which is receiving the event.
  843. * @event_code: This is the event code that the request on which the request is
  844. * expected to take action.
  845. *
  846. * This method will handle any link layer events while waiting for the data
  847. * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
  848. */
  849. static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
  850. struct scic_sds_request *request,
  851. u32 event_code)
  852. {
  853. enum sci_status status;
  854. switch (scu_get_event_specifier(event_code)) {
  855. case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
  856. /*
  857. * We are waiting for data and the SCU has R_ERR the data frame.
  858. * Go back to waiting for the D2H Register FIS */
  859. sci_base_state_machine_change_state(
  860. &request->started_substate_machine,
  861. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  862. );
  863. status = SCI_SUCCESS;
  864. break;
  865. default:
  866. dev_err(scic_to_dev(request->owning_controller),
  867. "%s: SCIC PIO Request 0x%p received unexpected "
  868. "event 0x%08x\n",
  869. __func__, request, event_code);
  870. /* / @todo Should we fail the PIO request when we get an unexpected event? */
  871. status = SCI_FAILURE;
  872. break;
  873. }
  874. return status;
  875. }
  876. /* --------------------------------------------------------------------------- */
  877. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
  878. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  879. .abort_handler = scic_sds_request_started_state_abort_handler,
  880. .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
  881. },
  882. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
  883. .abort_handler = scic_sds_request_started_state_abort_handler,
  884. .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
  885. },
  886. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
  887. .abort_handler = scic_sds_request_started_state_abort_handler,
  888. .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
  889. .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
  890. },
  891. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
  892. .abort_handler = scic_sds_request_started_state_abort_handler,
  893. .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
  894. }
  895. };
  896. static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
  897. void *object)
  898. {
  899. struct scic_sds_request *sci_req = object;
  900. SET_STATE_HANDLER(
  901. sci_req,
  902. scic_sds_stp_request_started_pio_substate_handler_table,
  903. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
  904. );
  905. scic_sds_remote_device_set_working_request(
  906. sci_req->target_device, sci_req);
  907. }
  908. static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
  909. {
  910. struct scic_sds_request *sci_req = object;
  911. SET_STATE_HANDLER(
  912. sci_req,
  913. scic_sds_stp_request_started_pio_substate_handler_table,
  914. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  915. );
  916. }
  917. static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
  918. void *object)
  919. {
  920. struct scic_sds_request *sci_req = object;
  921. SET_STATE_HANDLER(
  922. sci_req,
  923. scic_sds_stp_request_started_pio_substate_handler_table,
  924. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
  925. );
  926. }
  927. static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
  928. void *object)
  929. {
  930. struct scic_sds_request *sci_req = object;
  931. SET_STATE_HANDLER(
  932. sci_req,
  933. scic_sds_stp_request_started_pio_substate_handler_table,
  934. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
  935. );
  936. }
  937. /* --------------------------------------------------------------------------- */
  938. static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
  939. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  940. .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
  941. },
  942. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
  943. .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
  944. },
  945. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
  946. .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
  947. },
  948. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
  949. .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
  950. }
  951. };
  952. enum sci_status
  953. scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
  954. bool copy_rx_frame)
  955. {
  956. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  957. struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
  958. scic_sds_stp_non_ncq_request_construct(sci_req);
  959. scu_stp_raw_request_construct_task_context(stp_req,
  960. sci_req->task_context_buffer);
  961. pio->current_transfer_bytes = 0;
  962. pio->ending_error = 0;
  963. pio->ending_status = 0;
  964. pio->request_current.sgl_offset = 0;
  965. pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
  966. if (copy_rx_frame) {
  967. scic_sds_request_build_sgl(sci_req);
  968. /* Since the IO request copy of the TC contains the same data as
  969. * the actual TC this pointer is vaild for either.
  970. */
  971. pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
  972. } else {
  973. /* The user does not want the data copied to the SGL buffer location */
  974. pio->request_current.sgl_pair = NULL;
  975. }
  976. sci_base_state_machine_construct(&sci_req->started_substate_machine,
  977. sci_req,
  978. scic_sds_stp_request_started_pio_substate_table,
  979. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
  980. return SCI_SUCCESS;
  981. }
  982. static void scic_sds_stp_request_udma_complete_request(
  983. struct scic_sds_request *request,
  984. u32 scu_status,
  985. enum sci_status sci_status)
  986. {
  987. scic_sds_request_set_status(request, scu_status, sci_status);
  988. sci_base_state_machine_change_state(&request->state_machine,
  989. SCI_BASE_REQUEST_STATE_COMPLETED);
  990. }
  991. static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
  992. u32 frame_index)
  993. {
  994. struct scic_sds_controller *scic = sci_req->owning_controller;
  995. struct dev_to_host_fis *frame_header;
  996. enum sci_status status;
  997. u32 *frame_buffer;
  998. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  999. frame_index,
  1000. (void **)&frame_header);
  1001. if ((status == SCI_SUCCESS) &&
  1002. (frame_header->fis_type == FIS_REGD2H)) {
  1003. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  1004. frame_index,
  1005. (void **)&frame_buffer);
  1006. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  1007. frame_header,
  1008. frame_buffer);
  1009. }
  1010. scic_sds_controller_release_frame(scic, frame_index);
  1011. return status;
  1012. }
  1013. static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
  1014. struct scic_sds_request *sci_req,
  1015. u32 completion_code)
  1016. {
  1017. enum sci_status status = SCI_SUCCESS;
  1018. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  1019. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  1020. scic_sds_stp_request_udma_complete_request(sci_req,
  1021. SCU_TASK_DONE_GOOD,
  1022. SCI_SUCCESS);
  1023. break;
  1024. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
  1025. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
  1026. /*
  1027. * We must check ther response buffer to see if the D2H Register FIS was
  1028. * received before we got the TC completion. */
  1029. if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
  1030. scic_sds_remote_device_suspend(sci_req->target_device,
  1031. SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
  1032. scic_sds_stp_request_udma_complete_request(sci_req,
  1033. SCU_TASK_DONE_CHECK_RESPONSE,
  1034. SCI_FAILURE_IO_RESPONSE_VALID);
  1035. } else {
  1036. /*
  1037. * If we have an error completion status for the TC then we can expect a
  1038. * D2H register FIS from the device so we must change state to wait for it */
  1039. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  1040. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
  1041. }
  1042. break;
  1043. /*
  1044. * / @todo Check to see if any of these completion status need to wait for
  1045. * / the device to host register fis. */
  1046. /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
  1047. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
  1048. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
  1049. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
  1050. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
  1051. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
  1052. scic_sds_remote_device_suspend(sci_req->target_device,
  1053. SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
  1054. /* Fall through to the default case */
  1055. default:
  1056. /* All other completion status cause the IO to be complete. */
  1057. scic_sds_stp_request_udma_complete_request(sci_req,
  1058. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  1059. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  1060. break;
  1061. }
  1062. return status;
  1063. }
  1064. static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
  1065. struct scic_sds_request *sci_req,
  1066. u32 frame_index)
  1067. {
  1068. enum sci_status status;
  1069. /* Use the general frame handler to copy the resposne data */
  1070. status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
  1071. if (status != SCI_SUCCESS)
  1072. return status;
  1073. scic_sds_stp_request_udma_complete_request(sci_req,
  1074. SCU_TASK_DONE_CHECK_RESPONSE,
  1075. SCI_FAILURE_IO_RESPONSE_VALID);
  1076. return status;
  1077. }
  1078. /* --------------------------------------------------------------------------- */
  1079. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
  1080. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
  1081. .abort_handler = scic_sds_request_started_state_abort_handler,
  1082. .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
  1083. .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
  1084. },
  1085. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
  1086. .abort_handler = scic_sds_request_started_state_abort_handler,
  1087. .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
  1088. },
  1089. };
  1090. static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
  1091. void *object)
  1092. {
  1093. struct scic_sds_request *sci_req = object;
  1094. SET_STATE_HANDLER(
  1095. sci_req,
  1096. scic_sds_stp_request_started_udma_substate_handler_table,
  1097. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
  1098. );
  1099. }
  1100. /**
  1101. *
  1102. *
  1103. * This state is entered when there is an TC completion failure. The hardware
  1104. * received an unexpected condition while processing the IO request and now
  1105. * will UF the D2H register FIS to complete the IO.
  1106. */
  1107. static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
  1108. void *object)
  1109. {
  1110. struct scic_sds_request *sci_req = object;
  1111. SET_STATE_HANDLER(
  1112. sci_req,
  1113. scic_sds_stp_request_started_udma_substate_handler_table,
  1114. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
  1115. );
  1116. }
  1117. /* --------------------------------------------------------------------------- */
  1118. static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
  1119. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
  1120. .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
  1121. },
  1122. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
  1123. .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
  1124. },
  1125. };
  1126. enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
  1127. u32 len,
  1128. enum dma_data_direction dir)
  1129. {
  1130. scic_sds_stp_non_ncq_request_construct(sci_req);
  1131. scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
  1132. len, dir);
  1133. sci_base_state_machine_construct(
  1134. &sci_req->started_substate_machine,
  1135. sci_req,
  1136. scic_sds_stp_request_started_udma_substate_table,
  1137. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
  1138. );
  1139. return SCI_SUCCESS;
  1140. }
  1141. /**
  1142. *
  1143. * @sci_req:
  1144. * @completion_code:
  1145. *
  1146. * This method processes a TC completion. The expected TC completion is for
  1147. * the transmission of the H2D register FIS containing the SATA/STP non-data
  1148. * request. This method always successfully processes the TC completion.
  1149. * SCI_SUCCESS This value is always returned.
  1150. */
  1151. static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
  1152. struct scic_sds_request *sci_req,
  1153. u32 completion_code)
  1154. {
  1155. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  1156. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  1157. scic_sds_request_set_status(
  1158. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  1159. );
  1160. sci_base_state_machine_change_state(
  1161. &sci_req->started_substate_machine,
  1162. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
  1163. );
  1164. break;
  1165. default:
  1166. /*
  1167. * All other completion status cause the IO to be complete. If a NAK
  1168. * was received, then it is up to the user to retry the request. */
  1169. scic_sds_request_set_status(
  1170. sci_req,
  1171. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  1172. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  1173. );
  1174. sci_base_state_machine_change_state(
  1175. &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
  1176. break;
  1177. }
  1178. return SCI_SUCCESS;
  1179. }
  1180. /**
  1181. *
  1182. * @sci_req:
  1183. * @completion_code:
  1184. *
  1185. * This method processes a TC completion. The expected TC completion is for
  1186. * the transmission of the H2D register FIS containing the SATA/STP non-data
  1187. * request. This method always successfully processes the TC completion.
  1188. * SCI_SUCCESS This value is always returned.
  1189. */
  1190. static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
  1191. struct scic_sds_request *sci_req,
  1192. u32 completion_code)
  1193. {
  1194. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  1195. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  1196. scic_sds_request_set_status(
  1197. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  1198. );
  1199. sci_base_state_machine_change_state(
  1200. &sci_req->started_substate_machine,
  1201. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
  1202. );
  1203. break;
  1204. default:
  1205. /*
  1206. * All other completion status cause the IO to be complete. If a NAK
  1207. * was received, then it is up to the user to retry the request. */
  1208. scic_sds_request_set_status(
  1209. sci_req,
  1210. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  1211. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  1212. );
  1213. sci_base_state_machine_change_state(&sci_req->state_machine,
  1214. SCI_BASE_REQUEST_STATE_COMPLETED);
  1215. break;
  1216. }
  1217. return SCI_SUCCESS;
  1218. }
  1219. /**
  1220. *
  1221. * @request: This parameter specifies the request for which a frame has been
  1222. * received.
  1223. * @frame_index: This parameter specifies the index of the frame that has been
  1224. * received.
  1225. *
  1226. * This method processes frames received from the target while waiting for a
  1227. * device to host register FIS. If a non-register FIS is received during this
  1228. * time, it is treated as a protocol violation from an IO perspective. Indicate
  1229. * if the received frame was processed successfully.
  1230. */
  1231. static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
  1232. struct scic_sds_request *sci_req,
  1233. u32 frame_index)
  1234. {
  1235. enum sci_status status;
  1236. struct dev_to_host_fis *frame_header;
  1237. u32 *frame_buffer;
  1238. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  1239. struct scic_sds_controller *scic = sci_req->owning_controller;
  1240. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  1241. frame_index,
  1242. (void **)&frame_header);
  1243. if (status != SCI_SUCCESS) {
  1244. dev_err(scic_to_dev(scic),
  1245. "%s: SCIC IO Request 0x%p could not get frame header "
  1246. "for frame index %d, status %x\n",
  1247. __func__, stp_req, frame_index, status);
  1248. return status;
  1249. }
  1250. switch (frame_header->fis_type) {
  1251. case FIS_REGD2H:
  1252. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  1253. frame_index,
  1254. (void **)&frame_buffer);
  1255. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  1256. frame_header,
  1257. frame_buffer);
  1258. /* The command has completed with error */
  1259. scic_sds_request_set_status(sci_req,
  1260. SCU_TASK_DONE_CHECK_RESPONSE,
  1261. SCI_FAILURE_IO_RESPONSE_VALID);
  1262. break;
  1263. default:
  1264. dev_warn(scic_to_dev(scic),
  1265. "%s: IO Request:0x%p Frame Id:%d protocol "
  1266. "violation occurred\n", __func__, stp_req,
  1267. frame_index);
  1268. scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
  1269. SCI_FAILURE_PROTOCOL_VIOLATION);
  1270. break;
  1271. }
  1272. sci_base_state_machine_change_state(&sci_req->state_machine,
  1273. SCI_BASE_REQUEST_STATE_COMPLETED);
  1274. /* Frame has been decoded return it to the controller */
  1275. scic_sds_controller_release_frame(scic, frame_index);
  1276. return status;
  1277. }
  1278. /* --------------------------------------------------------------------------- */
  1279. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
  1280. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
  1281. .abort_handler = scic_sds_request_started_state_abort_handler,
  1282. .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
  1283. },
  1284. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
  1285. .abort_handler = scic_sds_request_started_state_abort_handler,
  1286. .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
  1287. },
  1288. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
  1289. .abort_handler = scic_sds_request_started_state_abort_handler,
  1290. .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
  1291. },
  1292. };
  1293. static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
  1294. void *object)
  1295. {
  1296. struct scic_sds_request *sci_req = object;
  1297. SET_STATE_HANDLER(
  1298. sci_req,
  1299. scic_sds_stp_request_started_soft_reset_substate_handler_table,
  1300. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
  1301. );
  1302. scic_sds_remote_device_set_working_request(
  1303. sci_req->target_device, sci_req
  1304. );
  1305. }
  1306. static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
  1307. void *object)
  1308. {
  1309. struct scic_sds_request *sci_req = object;
  1310. struct scu_task_context *task_context;
  1311. struct host_to_dev_fis *h2d_fis;
  1312. enum sci_status status;
  1313. /* Clear the SRST bit */
  1314. h2d_fis = &sci_req->stp.cmd;
  1315. h2d_fis->control = 0;
  1316. /* Clear the TC control bit */
  1317. task_context = scic_sds_controller_get_task_context_buffer(
  1318. sci_req->owning_controller, sci_req->io_tag);
  1319. task_context->control_frame = 0;
  1320. status = scic_controller_continue_io(sci_req);
  1321. if (status == SCI_SUCCESS) {
  1322. SET_STATE_HANDLER(
  1323. sci_req,
  1324. scic_sds_stp_request_started_soft_reset_substate_handler_table,
  1325. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
  1326. );
  1327. }
  1328. }
  1329. static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
  1330. void *object)
  1331. {
  1332. struct scic_sds_request *sci_req = object;
  1333. SET_STATE_HANDLER(
  1334. sci_req,
  1335. scic_sds_stp_request_started_soft_reset_substate_handler_table,
  1336. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
  1337. );
  1338. }
  1339. static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
  1340. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
  1341. .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
  1342. },
  1343. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
  1344. .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
  1345. },
  1346. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
  1347. .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
  1348. },
  1349. };
  1350. enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
  1351. {
  1352. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  1353. scic_sds_stp_non_ncq_request_construct(sci_req);
  1354. /* Build the STP task context structure */
  1355. scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
  1356. sci_base_state_machine_construct(&sci_req->started_substate_machine,
  1357. sci_req,
  1358. scic_sds_stp_request_started_soft_reset_substate_table,
  1359. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
  1360. return SCI_SUCCESS;
  1361. }