stp_request.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas.h>
  56. #include "sas.h"
  57. #include "state_machine.h"
  58. #include "remote_device.h"
  59. #include "stp_request.h"
  60. #include "unsolicited_frame_control.h"
  61. #include "scu_completion_codes.h"
  62. #include "scu_event_codes.h"
  63. #include "scu_task_context.h"
  64. #include "request.h"
  65. void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req)
  66. {
  67. if (sci_req->was_tag_assigned_by_user == false)
  68. sci_req->task_context_buffer = &sci_req->tc;
  69. }
  70. /**
  71. * This method is will fill in the SCU Task Context for any type of SATA
  72. * request. This is called from the various SATA constructors.
  73. * @sci_req: The general IO request object which is to be used in
  74. * constructing the SCU task context.
  75. * @task_context: The buffer pointer for the SCU task context which is being
  76. * constructed.
  77. *
  78. * The general io request construction is complete. The buffer assignment for
  79. * the command buffer is complete. none Revisit task context construction to
  80. * determine what is common for SSP/SMP/STP task context structures.
  81. */
  82. static void scu_sata_reqeust_construct_task_context(
  83. struct scic_sds_request *sci_req,
  84. struct scu_task_context *task_context)
  85. {
  86. dma_addr_t dma_addr;
  87. struct scic_sds_controller *controller;
  88. struct scic_sds_remote_device *target_device;
  89. struct scic_sds_port *target_port;
  90. controller = scic_sds_request_get_controller(sci_req);
  91. target_device = scic_sds_request_get_device(sci_req);
  92. target_port = scic_sds_request_get_port(sci_req);
  93. /* Fill in the TC with the its required data */
  94. task_context->abort = 0;
  95. task_context->priority = SCU_TASK_PRIORITY_NORMAL;
  96. task_context->initiator_request = 1;
  97. task_context->connection_rate = target_device->connection_rate;
  98. task_context->protocol_engine_index =
  99. scic_sds_controller_get_protocol_engine_group(controller);
  100. task_context->logical_port_index =
  101. scic_sds_port_get_index(target_port);
  102. task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
  103. task_context->valid = SCU_TASK_CONTEXT_VALID;
  104. task_context->context_type = SCU_TASK_CONTEXT_TYPE;
  105. task_context->remote_node_index =
  106. scic_sds_remote_device_get_index(sci_req->target_device);
  107. task_context->command_code = 0;
  108. task_context->link_layer_control = 0;
  109. task_context->do_not_dma_ssp_good_response = 1;
  110. task_context->strict_ordering = 0;
  111. task_context->control_frame = 0;
  112. task_context->timeout_enable = 0;
  113. task_context->block_guard_enable = 0;
  114. task_context->address_modifier = 0;
  115. task_context->task_phase = 0x01;
  116. task_context->ssp_command_iu_length =
  117. (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
  118. /* Set the first word of the H2D REG FIS */
  119. task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
  120. if (sci_req->was_tag_assigned_by_user) {
  121. /*
  122. * Build the task context now since we have already read
  123. * the data
  124. */
  125. sci_req->post_context =
  126. (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  127. (scic_sds_controller_get_protocol_engine_group(
  128. controller) <<
  129. SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  130. (scic_sds_port_get_index(target_port) <<
  131. SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  132. scic_sds_io_tag_get_index(sci_req->io_tag));
  133. } else {
  134. /*
  135. * Build the task context now since we have already read
  136. * the data.
  137. * I/O tag index is not assigned because we have to wait
  138. * until we get a TCi.
  139. */
  140. sci_req->post_context =
  141. (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  142. (scic_sds_controller_get_protocol_engine_group(
  143. controller) <<
  144. SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  145. (scic_sds_port_get_index(target_port) <<
  146. SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
  147. }
  148. /*
  149. * Copy the physical address for the command buffer to the SCU Task
  150. * Context. We must offset the command buffer by 4 bytes because the
  151. * first 4 bytes are transfered in the body of the TC.
  152. */
  153. dma_addr = scic_io_request_get_dma_addr(sci_req,
  154. ((char *) &sci_req->stp.cmd) +
  155. sizeof(u32));
  156. task_context->command_iu_upper = upper_32_bits(dma_addr);
  157. task_context->command_iu_lower = lower_32_bits(dma_addr);
  158. /* SATA Requests do not have a response buffer */
  159. task_context->response_iu_upper = 0;
  160. task_context->response_iu_lower = 0;
  161. }
  162. /**
  163. *
  164. * @sci_req:
  165. *
  166. * This method will perform any general sata request construction. What part of
  167. * SATA IO request construction is general? none
  168. */
  169. static void scic_sds_stp_non_ncq_request_construct(
  170. struct scic_sds_request *sci_req)
  171. {
  172. sci_req->has_started_substate_machine = true;
  173. }
  174. /**
  175. *
  176. * @sci_req: This parameter specifies the request to be constructed as an
  177. * optimized request.
  178. * @optimized_task_type: This parameter specifies whether the request is to be
  179. * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
  180. * value of 1 indicates NCQ.
  181. *
  182. * This method will perform request construction common to all types of STP
  183. * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
  184. * returns an indication as to whether the construction was successful.
  185. */
  186. static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
  187. u8 optimized_task_type,
  188. u32 len,
  189. enum dma_data_direction dir)
  190. {
  191. struct scu_task_context *task_context = sci_req->task_context_buffer;
  192. /* Build the STP task context structure */
  193. scu_sata_reqeust_construct_task_context(sci_req, task_context);
  194. /* Copy over the SGL elements */
  195. scic_sds_request_build_sgl(sci_req);
  196. /* Copy over the number of bytes to be transfered */
  197. task_context->transfer_length_bytes = len;
  198. if (dir == DMA_TO_DEVICE) {
  199. /*
  200. * The difference between the DMA IN and DMA OUT request task type
  201. * values are consistent with the difference between FPDMA READ
  202. * and FPDMA WRITE values. Add the supplied task type parameter
  203. * to this difference to set the task type properly for this
  204. * DATA OUT (WRITE) case. */
  205. task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
  206. - SCU_TASK_TYPE_DMA_IN);
  207. } else {
  208. /*
  209. * For the DATA IN (READ) case, simply save the supplied
  210. * optimized task type. */
  211. task_context->task_type = optimized_task_type;
  212. }
  213. }
  214. /**
  215. *
  216. * @sci_req: This parameter specifies the request to be constructed.
  217. *
  218. * This method will construct the STP UDMA request and its associated TC data.
  219. * This method returns an indication as to whether the construction was
  220. * successful. SCI_SUCCESS Currently this method always returns this value.
  221. */
  222. enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
  223. u32 len,
  224. enum dma_data_direction dir)
  225. {
  226. scic_sds_stp_optimized_request_construct(sci_req,
  227. SCU_TASK_TYPE_FPDMAQ_READ,
  228. len, dir);
  229. return SCI_SUCCESS;
  230. }
  231. /**
  232. * scu_stp_raw_request_construct_task_context -
  233. * @sci_req: This parameter specifies the STP request object for which to
  234. * construct a RAW command frame task context.
  235. * @task_context: This parameter specifies the SCU specific task context buffer
  236. * to construct.
  237. *
  238. * This method performs the operations common to all SATA/STP requests
  239. * utilizing the raw frame method. none
  240. */
  241. static void scu_stp_raw_request_construct_task_context(
  242. struct scic_sds_stp_request *stp_req,
  243. struct scu_task_context *task_context)
  244. {
  245. struct scic_sds_request *sci_req = to_sci_req(stp_req);
  246. scu_sata_reqeust_construct_task_context(sci_req, task_context);
  247. task_context->control_frame = 0;
  248. task_context->priority = SCU_TASK_PRIORITY_NORMAL;
  249. task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
  250. task_context->type.stp.fis_type = FIS_REGH2D;
  251. task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
  252. }
  253. void scic_stp_io_request_set_ncq_tag(
  254. struct scic_sds_request *req,
  255. u16 ncq_tag)
  256. {
  257. /**
  258. * @note This could be made to return an error to the user if the user
  259. * attempts to set the NCQ tag in the wrong state.
  260. */
  261. req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
  262. }
  263. /**
  264. *
  265. * @sci_req:
  266. *
  267. * Get the next SGL element from the request. - Check on which SGL element pair
  268. * we are working - if working on SLG pair element A - advance to element B -
  269. * else - check to see if there are more SGL element pairs for this IO request
  270. * - if there are more SGL element pairs - advance to the next pair and return
  271. * element A struct scu_sgl_element*
  272. */
  273. static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
  274. {
  275. struct scu_sgl_element *current_sgl;
  276. struct scic_sds_request *sci_req = to_sci_req(stp_req);
  277. struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
  278. if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
  279. if (pio_sgl->sgl_pair->B.address_lower == 0 &&
  280. pio_sgl->sgl_pair->B.address_upper == 0) {
  281. current_sgl = NULL;
  282. } else {
  283. pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
  284. current_sgl = &pio_sgl->sgl_pair->B;
  285. }
  286. } else {
  287. if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
  288. pio_sgl->sgl_pair->next_pair_upper == 0) {
  289. current_sgl = NULL;
  290. } else {
  291. u64 phys_addr;
  292. phys_addr = pio_sgl->sgl_pair->next_pair_upper;
  293. phys_addr <<= 32;
  294. phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
  295. pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
  296. pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
  297. current_sgl = &pio_sgl->sgl_pair->A;
  298. }
  299. }
  300. return current_sgl;
  301. }
  302. /**
  303. *
  304. * @sci_req:
  305. * @completion_code:
  306. *
  307. * This method processes a TC completion. The expected TC completion is for
  308. * the transmission of the H2D register FIS containing the SATA/STP non-data
  309. * request. This method always successfully processes the TC completion.
  310. * SCI_SUCCESS This value is always returned.
  311. */
  312. static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
  313. struct scic_sds_request *sci_req,
  314. u32 completion_code)
  315. {
  316. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  317. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  318. scic_sds_request_set_status(
  319. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  320. );
  321. sci_base_state_machine_change_state(
  322. &sci_req->started_substate_machine,
  323. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
  324. );
  325. break;
  326. default:
  327. /*
  328. * All other completion status cause the IO to be complete. If a NAK
  329. * was received, then it is up to the user to retry the request. */
  330. scic_sds_request_set_status(
  331. sci_req,
  332. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  333. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  334. );
  335. sci_base_state_machine_change_state(
  336. &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
  337. break;
  338. }
  339. return SCI_SUCCESS;
  340. }
  341. /**
  342. *
  343. * @request: This parameter specifies the request for which a frame has been
  344. * received.
  345. * @frame_index: This parameter specifies the index of the frame that has been
  346. * received.
  347. *
  348. * This method processes frames received from the target while waiting for a
  349. * device to host register FIS. If a non-register FIS is received during this
  350. * time, it is treated as a protocol violation from an IO perspective. Indicate
  351. * if the received frame was processed successfully.
  352. */
  353. static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
  354. struct scic_sds_request *sci_req,
  355. u32 frame_index)
  356. {
  357. enum sci_status status;
  358. struct dev_to_host_fis *frame_header;
  359. u32 *frame_buffer;
  360. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  361. struct scic_sds_controller *scic = sci_req->owning_controller;
  362. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  363. frame_index,
  364. (void **)&frame_header);
  365. if (status != SCI_SUCCESS) {
  366. dev_err(scic_to_dev(sci_req->owning_controller),
  367. "%s: SCIC IO Request 0x%p could not get frame header "
  368. "for frame index %d, status %x\n",
  369. __func__, stp_req, frame_index, status);
  370. return status;
  371. }
  372. switch (frame_header->fis_type) {
  373. case FIS_REGD2H:
  374. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  375. frame_index,
  376. (void **)&frame_buffer);
  377. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  378. frame_header,
  379. frame_buffer);
  380. /* The command has completed with error */
  381. scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
  382. SCI_FAILURE_IO_RESPONSE_VALID);
  383. break;
  384. default:
  385. dev_warn(scic_to_dev(scic),
  386. "%s: IO Request:0x%p Frame Id:%d protocol "
  387. "violation occurred\n", __func__, stp_req,
  388. frame_index);
  389. scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
  390. SCI_FAILURE_PROTOCOL_VIOLATION);
  391. break;
  392. }
  393. sci_base_state_machine_change_state(&sci_req->state_machine,
  394. SCI_BASE_REQUEST_STATE_COMPLETED);
  395. /* Frame has been decoded return it to the controller */
  396. scic_sds_controller_release_frame(scic, frame_index);
  397. return status;
  398. }
  399. /* --------------------------------------------------------------------------- */
  400. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
  401. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  402. .abort_handler = scic_sds_request_started_state_abort_handler,
  403. .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
  404. },
  405. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
  406. .abort_handler = scic_sds_request_started_state_abort_handler,
  407. .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
  408. }
  409. };
  410. static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
  411. void *object)
  412. {
  413. struct scic_sds_request *sci_req = object;
  414. SET_STATE_HANDLER(
  415. sci_req,
  416. scic_sds_stp_request_started_non_data_substate_handler_table,
  417. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
  418. );
  419. scic_sds_remote_device_set_working_request(
  420. sci_req->target_device, sci_req
  421. );
  422. }
  423. static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
  424. {
  425. struct scic_sds_request *sci_req = object;
  426. SET_STATE_HANDLER(
  427. sci_req,
  428. scic_sds_stp_request_started_non_data_substate_handler_table,
  429. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
  430. );
  431. }
  432. /* --------------------------------------------------------------------------- */
  433. static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
  434. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  435. .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
  436. },
  437. [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
  438. .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
  439. },
  440. };
  441. enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
  442. {
  443. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  444. scic_sds_stp_non_ncq_request_construct(sci_req);
  445. /* Build the STP task context structure */
  446. scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
  447. sci_base_state_machine_construct(&sci_req->started_substate_machine,
  448. sci_req,
  449. scic_sds_stp_request_started_non_data_substate_table,
  450. SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
  451. return SCI_SUCCESS;
  452. }
  453. #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
  454. /* transmit DATA_FIS from (current sgl + offset) for input
  455. * parameter length. current sgl and offset is alreay stored in the IO request
  456. */
  457. static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
  458. struct scic_sds_request *sci_req,
  459. u32 length)
  460. {
  461. struct scic_sds_controller *scic = sci_req->owning_controller;
  462. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  463. struct scu_task_context *task_context;
  464. struct scu_sgl_element *current_sgl;
  465. /* Recycle the TC and reconstruct it for sending out DATA FIS containing
  466. * for the data from current_sgl+offset for the input length
  467. */
  468. task_context = scic_sds_controller_get_task_context_buffer(scic,
  469. sci_req->io_tag);
  470. if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
  471. current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
  472. else
  473. current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
  474. /* update the TC */
  475. task_context->command_iu_upper = current_sgl->address_upper;
  476. task_context->command_iu_lower = current_sgl->address_lower;
  477. task_context->transfer_length_bytes = length;
  478. task_context->type.stp.fis_type = FIS_DATA;
  479. /* send the new TC out. */
  480. return scic_controller_continue_io(sci_req);
  481. }
  482. static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
  483. {
  484. struct scu_sgl_element *current_sgl;
  485. u32 sgl_offset;
  486. u32 remaining_bytes_in_current_sgl = 0;
  487. enum sci_status status = SCI_SUCCESS;
  488. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  489. sgl_offset = stp_req->type.pio.request_current.sgl_offset;
  490. if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
  491. current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
  492. remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
  493. } else {
  494. current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
  495. remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
  496. }
  497. if (stp_req->type.pio.pio_transfer_bytes > 0) {
  498. if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
  499. /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
  500. status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
  501. if (status == SCI_SUCCESS) {
  502. stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
  503. /* update the current sgl, sgl_offset and save for future */
  504. current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
  505. sgl_offset = 0;
  506. }
  507. } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
  508. /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
  509. scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
  510. if (status == SCI_SUCCESS) {
  511. /* Sgl offset will be adjusted and saved for future */
  512. sgl_offset += stp_req->type.pio.pio_transfer_bytes;
  513. current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
  514. stp_req->type.pio.pio_transfer_bytes = 0;
  515. }
  516. }
  517. }
  518. if (status == SCI_SUCCESS) {
  519. stp_req->type.pio.request_current.sgl_offset = sgl_offset;
  520. }
  521. return status;
  522. }
  523. /**
  524. *
  525. * @stp_request: The request that is used for the SGL processing.
  526. * @data_buffer: The buffer of data to be copied.
  527. * @length: The length of the data transfer.
  528. *
  529. * Copy the data from the buffer for the length specified to the IO reqeust SGL
  530. * specified data region. enum sci_status
  531. */
  532. static enum sci_status
  533. scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
  534. u8 *data_buf, u32 len)
  535. {
  536. struct scic_sds_request *sci_req;
  537. struct isci_request *ireq;
  538. u8 *src_addr;
  539. int copy_len;
  540. struct sas_task *task;
  541. struct scatterlist *sg;
  542. void *kaddr;
  543. int total_len = len;
  544. sci_req = to_sci_req(stp_req);
  545. ireq = sci_req_to_ireq(sci_req);
  546. task = isci_request_access_task(ireq);
  547. src_addr = data_buf;
  548. if (task->num_scatter > 0) {
  549. sg = task->scatter;
  550. while (total_len > 0) {
  551. struct page *page = sg_page(sg);
  552. copy_len = min_t(int, total_len, sg_dma_len(sg));
  553. kaddr = kmap_atomic(page, KM_IRQ0);
  554. memcpy(kaddr + sg->offset, src_addr, copy_len);
  555. kunmap_atomic(kaddr, KM_IRQ0);
  556. total_len -= copy_len;
  557. src_addr += copy_len;
  558. sg = sg_next(sg);
  559. }
  560. } else {
  561. BUG_ON(task->total_xfer_len < total_len);
  562. memcpy(task->scatter, src_addr, total_len);
  563. }
  564. return SCI_SUCCESS;
  565. }
  566. /**
  567. *
  568. * @sci_req: The PIO DATA IN request that is to receive the data.
  569. * @data_buffer: The buffer to copy from.
  570. *
  571. * Copy the data buffer to the io request data region. enum sci_status
  572. */
  573. static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
  574. struct scic_sds_stp_request *sci_req,
  575. u8 *data_buffer)
  576. {
  577. enum sci_status status;
  578. /*
  579. * If there is less than 1K remaining in the transfer request
  580. * copy just the data for the transfer */
  581. if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
  582. status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
  583. sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
  584. if (status == SCI_SUCCESS)
  585. sci_req->type.pio.pio_transfer_bytes = 0;
  586. } else {
  587. /* We are transfering the whole frame so copy */
  588. status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
  589. sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
  590. if (status == SCI_SUCCESS)
  591. sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
  592. }
  593. return status;
  594. }
  595. /**
  596. *
  597. * @sci_req:
  598. * @completion_code:
  599. *
  600. * enum sci_status
  601. */
  602. static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
  603. struct scic_sds_request *sci_req,
  604. u32 completion_code)
  605. {
  606. enum sci_status status = SCI_SUCCESS;
  607. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  608. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  609. scic_sds_request_set_status(
  610. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  611. );
  612. sci_base_state_machine_change_state(
  613. &sci_req->started_substate_machine,
  614. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  615. );
  616. break;
  617. default:
  618. /*
  619. * All other completion status cause the IO to be complete. If a NAK
  620. * was received, then it is up to the user to retry the request. */
  621. scic_sds_request_set_status(
  622. sci_req,
  623. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  624. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  625. );
  626. sci_base_state_machine_change_state(
  627. &sci_req->state_machine,
  628. SCI_BASE_REQUEST_STATE_COMPLETED
  629. );
  630. break;
  631. }
  632. return status;
  633. }
  634. static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
  635. u32 frame_index)
  636. {
  637. struct scic_sds_controller *scic = sci_req->owning_controller;
  638. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  639. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  640. struct sas_task *task = isci_request_access_task(ireq);
  641. struct dev_to_host_fis *frame_header;
  642. enum sci_status status;
  643. u32 *frame_buffer;
  644. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  645. frame_index,
  646. (void **)&frame_header);
  647. if (status != SCI_SUCCESS) {
  648. dev_err(scic_to_dev(scic),
  649. "%s: SCIC IO Request 0x%p could not get frame header "
  650. "for frame index %d, status %x\n",
  651. __func__, stp_req, frame_index, status);
  652. return status;
  653. }
  654. switch (frame_header->fis_type) {
  655. case FIS_PIO_SETUP:
  656. /* Get from the frame buffer the PIO Setup Data */
  657. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  658. frame_index,
  659. (void **)&frame_buffer);
  660. /* Get the data from the PIO Setup The SCU Hardware returns
  661. * first word in the frame_header and the rest of the data is in
  662. * the frame buffer so we need to back up one dword
  663. */
  664. /* transfer_count: first 16bits in the 4th dword */
  665. stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
  666. /* ending_status: 4th byte in the 3rd dword */
  667. stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
  668. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  669. frame_header,
  670. frame_buffer);
  671. sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
  672. /* The next state is dependent on whether the
  673. * request was PIO Data-in or Data out
  674. */
  675. if (task->data_dir == DMA_FROM_DEVICE) {
  676. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  677. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
  678. } else if (task->data_dir == DMA_TO_DEVICE) {
  679. /* Transmit data */
  680. status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
  681. if (status != SCI_SUCCESS)
  682. break;
  683. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  684. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
  685. }
  686. break;
  687. case FIS_SETDEVBITS:
  688. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  689. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
  690. break;
  691. case FIS_REGD2H:
  692. if (frame_header->status & ATA_BUSY) {
  693. /* Now why is the drive sending a D2H Register FIS when
  694. * it is still busy? Do nothing since we are still in
  695. * the right state.
  696. */
  697. dev_dbg(scic_to_dev(scic),
  698. "%s: SCIC PIO Request 0x%p received "
  699. "D2H Register FIS with BSY status "
  700. "0x%x\n", __func__, stp_req,
  701. frame_header->status);
  702. break;
  703. }
  704. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  705. frame_index,
  706. (void **)&frame_buffer);
  707. scic_sds_controller_copy_sata_response(&sci_req->stp.req,
  708. frame_header,
  709. frame_buffer);
  710. scic_sds_request_set_status(sci_req,
  711. SCU_TASK_DONE_CHECK_RESPONSE,
  712. SCI_FAILURE_IO_RESPONSE_VALID);
  713. sci_base_state_machine_change_state(&sci_req->state_machine,
  714. SCI_BASE_REQUEST_STATE_COMPLETED);
  715. break;
  716. default:
  717. /* FIXME: what do we do here? */
  718. break;
  719. }
  720. /* Frame is decoded return it to the controller */
  721. scic_sds_controller_release_frame(scic, frame_index);
  722. return status;
  723. }
  724. static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
  725. u32 frame_index)
  726. {
  727. enum sci_status status;
  728. struct dev_to_host_fis *frame_header;
  729. struct sata_fis_data *frame_buffer;
  730. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  731. struct scic_sds_controller *scic = sci_req->owning_controller;
  732. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  733. frame_index,
  734. (void **)&frame_header);
  735. if (status != SCI_SUCCESS) {
  736. dev_err(scic_to_dev(scic),
  737. "%s: SCIC IO Request 0x%p could not get frame header "
  738. "for frame index %d, status %x\n",
  739. __func__, stp_req, frame_index, status);
  740. return status;
  741. }
  742. if (frame_header->fis_type == FIS_DATA) {
  743. if (stp_req->type.pio.request_current.sgl_pair == NULL) {
  744. sci_req->saved_rx_frame_index = frame_index;
  745. stp_req->type.pio.pio_transfer_bytes = 0;
  746. } else {
  747. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  748. frame_index,
  749. (void **)&frame_buffer);
  750. status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
  751. (u8 *)frame_buffer);
  752. /* Frame is decoded return it to the controller */
  753. scic_sds_controller_release_frame(scic, frame_index);
  754. }
  755. /* Check for the end of the transfer, are there more
  756. * bytes remaining for this data transfer
  757. */
  758. if (status != SCI_SUCCESS ||
  759. stp_req->type.pio.pio_transfer_bytes != 0)
  760. return status;
  761. if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
  762. scic_sds_request_set_status(sci_req,
  763. SCU_TASK_DONE_CHECK_RESPONSE,
  764. SCI_FAILURE_IO_RESPONSE_VALID);
  765. sci_base_state_machine_change_state(&sci_req->state_machine,
  766. SCI_BASE_REQUEST_STATE_COMPLETED);
  767. } else {
  768. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  769. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
  770. }
  771. } else {
  772. dev_err(scic_to_dev(scic),
  773. "%s: SCIC PIO Request 0x%p received frame %d "
  774. "with fis type 0x%02x when expecting a data "
  775. "fis.\n", __func__, stp_req, frame_index,
  776. frame_header->fis_type);
  777. scic_sds_request_set_status(sci_req,
  778. SCU_TASK_DONE_GOOD,
  779. SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
  780. sci_base_state_machine_change_state(&sci_req->state_machine,
  781. SCI_BASE_REQUEST_STATE_COMPLETED);
  782. /* Frame is decoded return it to the controller */
  783. scic_sds_controller_release_frame(scic, frame_index);
  784. }
  785. return status;
  786. }
  787. /**
  788. *
  789. * @sci_req:
  790. * @completion_code:
  791. *
  792. * enum sci_status
  793. */
  794. static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
  795. struct scic_sds_request *sci_req,
  796. u32 completion_code)
  797. {
  798. enum sci_status status = SCI_SUCCESS;
  799. bool all_frames_transferred = false;
  800. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  801. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  802. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  803. /* Transmit data */
  804. if (stp_req->type.pio.pio_transfer_bytes != 0) {
  805. status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
  806. if (status == SCI_SUCCESS) {
  807. if (stp_req->type.pio.pio_transfer_bytes == 0)
  808. all_frames_transferred = true;
  809. }
  810. } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
  811. /*
  812. * this will happen if the all data is written at the
  813. * first time after the pio setup fis is received
  814. */
  815. all_frames_transferred = true;
  816. }
  817. /* all data transferred. */
  818. if (all_frames_transferred) {
  819. /*
  820. * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
  821. * and wait for PIO_SETUP fis / or D2H REg fis. */
  822. sci_base_state_machine_change_state(
  823. &sci_req->started_substate_machine,
  824. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  825. );
  826. }
  827. break;
  828. default:
  829. /*
  830. * All other completion status cause the IO to be complete. If a NAK
  831. * was received, then it is up to the user to retry the request. */
  832. scic_sds_request_set_status(
  833. sci_req,
  834. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  835. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  836. );
  837. sci_base_state_machine_change_state(
  838. &sci_req->state_machine,
  839. SCI_BASE_REQUEST_STATE_COMPLETED
  840. );
  841. break;
  842. }
  843. return status;
  844. }
  845. /**
  846. *
  847. * @request: This is the request which is receiving the event.
  848. * @event_code: This is the event code that the request on which the request is
  849. * expected to take action.
  850. *
  851. * This method will handle any link layer events while waiting for the data
  852. * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
  853. */
  854. static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
  855. struct scic_sds_request *request,
  856. u32 event_code)
  857. {
  858. enum sci_status status;
  859. switch (scu_get_event_specifier(event_code)) {
  860. case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
  861. /*
  862. * We are waiting for data and the SCU has R_ERR the data frame.
  863. * Go back to waiting for the D2H Register FIS */
  864. sci_base_state_machine_change_state(
  865. &request->started_substate_machine,
  866. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  867. );
  868. status = SCI_SUCCESS;
  869. break;
  870. default:
  871. dev_err(scic_to_dev(request->owning_controller),
  872. "%s: SCIC PIO Request 0x%p received unexpected "
  873. "event 0x%08x\n",
  874. __func__, request, event_code);
  875. /* / @todo Should we fail the PIO request when we get an unexpected event? */
  876. status = SCI_FAILURE;
  877. break;
  878. }
  879. return status;
  880. }
  881. /* --------------------------------------------------------------------------- */
  882. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
  883. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  884. .abort_handler = scic_sds_request_started_state_abort_handler,
  885. .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
  886. },
  887. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
  888. .abort_handler = scic_sds_request_started_state_abort_handler,
  889. .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
  890. },
  891. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
  892. .abort_handler = scic_sds_request_started_state_abort_handler,
  893. .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
  894. .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
  895. },
  896. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
  897. .abort_handler = scic_sds_request_started_state_abort_handler,
  898. .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
  899. }
  900. };
  901. static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
  902. void *object)
  903. {
  904. struct scic_sds_request *sci_req = object;
  905. SET_STATE_HANDLER(
  906. sci_req,
  907. scic_sds_stp_request_started_pio_substate_handler_table,
  908. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
  909. );
  910. scic_sds_remote_device_set_working_request(
  911. sci_req->target_device, sci_req);
  912. }
  913. static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
  914. {
  915. struct scic_sds_request *sci_req = object;
  916. SET_STATE_HANDLER(
  917. sci_req,
  918. scic_sds_stp_request_started_pio_substate_handler_table,
  919. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
  920. );
  921. }
  922. static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
  923. void *object)
  924. {
  925. struct scic_sds_request *sci_req = object;
  926. SET_STATE_HANDLER(
  927. sci_req,
  928. scic_sds_stp_request_started_pio_substate_handler_table,
  929. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
  930. );
  931. }
  932. static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
  933. void *object)
  934. {
  935. struct scic_sds_request *sci_req = object;
  936. SET_STATE_HANDLER(
  937. sci_req,
  938. scic_sds_stp_request_started_pio_substate_handler_table,
  939. SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
  940. );
  941. }
  942. /* --------------------------------------------------------------------------- */
  943. static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
  944. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
  945. .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
  946. },
  947. [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
  948. .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
  949. },
  950. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
  951. .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
  952. },
  953. [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
  954. .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
  955. }
  956. };
  957. enum sci_status
  958. scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
  959. bool copy_rx_frame)
  960. {
  961. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  962. struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
  963. scic_sds_stp_non_ncq_request_construct(sci_req);
  964. scu_stp_raw_request_construct_task_context(stp_req,
  965. sci_req->task_context_buffer);
  966. pio->current_transfer_bytes = 0;
  967. pio->ending_error = 0;
  968. pio->ending_status = 0;
  969. pio->request_current.sgl_offset = 0;
  970. pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
  971. if (copy_rx_frame) {
  972. scic_sds_request_build_sgl(sci_req);
  973. /* Since the IO request copy of the TC contains the same data as
  974. * the actual TC this pointer is vaild for either.
  975. */
  976. pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
  977. } else {
  978. /* The user does not want the data copied to the SGL buffer location */
  979. pio->request_current.sgl_pair = NULL;
  980. }
  981. sci_base_state_machine_construct(&sci_req->started_substate_machine,
  982. sci_req,
  983. scic_sds_stp_request_started_pio_substate_table,
  984. SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
  985. return SCI_SUCCESS;
  986. }
  987. static void scic_sds_stp_request_udma_complete_request(
  988. struct scic_sds_request *request,
  989. u32 scu_status,
  990. enum sci_status sci_status)
  991. {
  992. scic_sds_request_set_status(request, scu_status, sci_status);
  993. sci_base_state_machine_change_state(&request->state_machine,
  994. SCI_BASE_REQUEST_STATE_COMPLETED);
  995. }
  996. static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
  997. u32 frame_index)
  998. {
  999. struct scic_sds_controller *scic = sci_req->owning_controller;
  1000. struct dev_to_host_fis *frame_header;
  1001. enum sci_status status;
  1002. u32 *frame_buffer;
  1003. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  1004. frame_index,
  1005. (void **)&frame_header);
  1006. if ((status == SCI_SUCCESS) &&
  1007. (frame_header->fis_type == FIS_REGD2H)) {
  1008. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  1009. frame_index,
  1010. (void **)&frame_buffer);
  1011. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  1012. frame_header,
  1013. frame_buffer);
  1014. }
  1015. scic_sds_controller_release_frame(scic, frame_index);
  1016. return status;
  1017. }
  1018. static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
  1019. struct scic_sds_request *sci_req,
  1020. u32 completion_code)
  1021. {
  1022. enum sci_status status = SCI_SUCCESS;
  1023. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  1024. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  1025. scic_sds_stp_request_udma_complete_request(sci_req,
  1026. SCU_TASK_DONE_GOOD,
  1027. SCI_SUCCESS);
  1028. break;
  1029. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
  1030. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
  1031. /*
  1032. * We must check ther response buffer to see if the D2H Register FIS was
  1033. * received before we got the TC completion. */
  1034. if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
  1035. scic_sds_remote_device_suspend(sci_req->target_device,
  1036. SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
  1037. scic_sds_stp_request_udma_complete_request(sci_req,
  1038. SCU_TASK_DONE_CHECK_RESPONSE,
  1039. SCI_FAILURE_IO_RESPONSE_VALID);
  1040. } else {
  1041. /*
  1042. * If we have an error completion status for the TC then we can expect a
  1043. * D2H register FIS from the device so we must change state to wait for it */
  1044. sci_base_state_machine_change_state(&sci_req->started_substate_machine,
  1045. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
  1046. }
  1047. break;
  1048. /*
  1049. * / @todo Check to see if any of these completion status need to wait for
  1050. * / the device to host register fis. */
  1051. /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
  1052. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
  1053. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
  1054. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
  1055. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
  1056. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
  1057. scic_sds_remote_device_suspend(sci_req->target_device,
  1058. SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
  1059. /* Fall through to the default case */
  1060. default:
  1061. /* All other completion status cause the IO to be complete. */
  1062. scic_sds_stp_request_udma_complete_request(sci_req,
  1063. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  1064. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  1065. break;
  1066. }
  1067. return status;
  1068. }
  1069. static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
  1070. struct scic_sds_request *sci_req,
  1071. u32 frame_index)
  1072. {
  1073. enum sci_status status;
  1074. /* Use the general frame handler to copy the resposne data */
  1075. status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
  1076. if (status != SCI_SUCCESS)
  1077. return status;
  1078. scic_sds_stp_request_udma_complete_request(sci_req,
  1079. SCU_TASK_DONE_CHECK_RESPONSE,
  1080. SCI_FAILURE_IO_RESPONSE_VALID);
  1081. return status;
  1082. }
  1083. /* --------------------------------------------------------------------------- */
  1084. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
  1085. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
  1086. .abort_handler = scic_sds_request_started_state_abort_handler,
  1087. .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
  1088. .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
  1089. },
  1090. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
  1091. .abort_handler = scic_sds_request_started_state_abort_handler,
  1092. .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
  1093. },
  1094. };
  1095. static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
  1096. void *object)
  1097. {
  1098. struct scic_sds_request *sci_req = object;
  1099. SET_STATE_HANDLER(
  1100. sci_req,
  1101. scic_sds_stp_request_started_udma_substate_handler_table,
  1102. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
  1103. );
  1104. }
  1105. /**
  1106. *
  1107. *
  1108. * This state is entered when there is an TC completion failure. The hardware
  1109. * received an unexpected condition while processing the IO request and now
  1110. * will UF the D2H register FIS to complete the IO.
  1111. */
  1112. static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
  1113. void *object)
  1114. {
  1115. struct scic_sds_request *sci_req = object;
  1116. SET_STATE_HANDLER(
  1117. sci_req,
  1118. scic_sds_stp_request_started_udma_substate_handler_table,
  1119. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
  1120. );
  1121. }
  1122. /* --------------------------------------------------------------------------- */
  1123. static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
  1124. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
  1125. .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
  1126. },
  1127. [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
  1128. .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
  1129. },
  1130. };
  1131. enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
  1132. u32 len,
  1133. enum dma_data_direction dir)
  1134. {
  1135. scic_sds_stp_non_ncq_request_construct(sci_req);
  1136. scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
  1137. len, dir);
  1138. sci_base_state_machine_construct(
  1139. &sci_req->started_substate_machine,
  1140. sci_req,
  1141. scic_sds_stp_request_started_udma_substate_table,
  1142. SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
  1143. );
  1144. return SCI_SUCCESS;
  1145. }
  1146. /**
  1147. *
  1148. * @sci_req:
  1149. * @completion_code:
  1150. *
  1151. * This method processes a TC completion. The expected TC completion is for
  1152. * the transmission of the H2D register FIS containing the SATA/STP non-data
  1153. * request. This method always successfully processes the TC completion.
  1154. * SCI_SUCCESS This value is always returned.
  1155. */
  1156. static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
  1157. struct scic_sds_request *sci_req,
  1158. u32 completion_code)
  1159. {
  1160. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  1161. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  1162. scic_sds_request_set_status(
  1163. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  1164. );
  1165. sci_base_state_machine_change_state(
  1166. &sci_req->started_substate_machine,
  1167. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
  1168. );
  1169. break;
  1170. default:
  1171. /*
  1172. * All other completion status cause the IO to be complete. If a NAK
  1173. * was received, then it is up to the user to retry the request. */
  1174. scic_sds_request_set_status(
  1175. sci_req,
  1176. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  1177. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  1178. );
  1179. sci_base_state_machine_change_state(
  1180. &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
  1181. break;
  1182. }
  1183. return SCI_SUCCESS;
  1184. }
  1185. /**
  1186. *
  1187. * @sci_req:
  1188. * @completion_code:
  1189. *
  1190. * This method processes a TC completion. The expected TC completion is for
  1191. * the transmission of the H2D register FIS containing the SATA/STP non-data
  1192. * request. This method always successfully processes the TC completion.
  1193. * SCI_SUCCESS This value is always returned.
  1194. */
  1195. static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
  1196. struct scic_sds_request *sci_req,
  1197. u32 completion_code)
  1198. {
  1199. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  1200. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  1201. scic_sds_request_set_status(
  1202. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
  1203. );
  1204. sci_base_state_machine_change_state(
  1205. &sci_req->started_substate_machine,
  1206. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
  1207. );
  1208. break;
  1209. default:
  1210. /*
  1211. * All other completion status cause the IO to be complete. If a NAK
  1212. * was received, then it is up to the user to retry the request. */
  1213. scic_sds_request_set_status(
  1214. sci_req,
  1215. SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
  1216. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
  1217. );
  1218. sci_base_state_machine_change_state(&sci_req->state_machine,
  1219. SCI_BASE_REQUEST_STATE_COMPLETED);
  1220. break;
  1221. }
  1222. return SCI_SUCCESS;
  1223. }
  1224. /**
  1225. *
  1226. * @request: This parameter specifies the request for which a frame has been
  1227. * received.
  1228. * @frame_index: This parameter specifies the index of the frame that has been
  1229. * received.
  1230. *
  1231. * This method processes frames received from the target while waiting for a
  1232. * device to host register FIS. If a non-register FIS is received during this
  1233. * time, it is treated as a protocol violation from an IO perspective. Indicate
  1234. * if the received frame was processed successfully.
  1235. */
  1236. static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
  1237. struct scic_sds_request *sci_req,
  1238. u32 frame_index)
  1239. {
  1240. enum sci_status status;
  1241. struct dev_to_host_fis *frame_header;
  1242. u32 *frame_buffer;
  1243. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  1244. struct scic_sds_controller *scic = sci_req->owning_controller;
  1245. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  1246. frame_index,
  1247. (void **)&frame_header);
  1248. if (status != SCI_SUCCESS) {
  1249. dev_err(scic_to_dev(scic),
  1250. "%s: SCIC IO Request 0x%p could not get frame header "
  1251. "for frame index %d, status %x\n",
  1252. __func__, stp_req, frame_index, status);
  1253. return status;
  1254. }
  1255. switch (frame_header->fis_type) {
  1256. case FIS_REGD2H:
  1257. scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
  1258. frame_index,
  1259. (void **)&frame_buffer);
  1260. scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
  1261. frame_header,
  1262. frame_buffer);
  1263. /* The command has completed with error */
  1264. scic_sds_request_set_status(sci_req,
  1265. SCU_TASK_DONE_CHECK_RESPONSE,
  1266. SCI_FAILURE_IO_RESPONSE_VALID);
  1267. break;
  1268. default:
  1269. dev_warn(scic_to_dev(scic),
  1270. "%s: IO Request:0x%p Frame Id:%d protocol "
  1271. "violation occurred\n", __func__, stp_req,
  1272. frame_index);
  1273. scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
  1274. SCI_FAILURE_PROTOCOL_VIOLATION);
  1275. break;
  1276. }
  1277. sci_base_state_machine_change_state(&sci_req->state_machine,
  1278. SCI_BASE_REQUEST_STATE_COMPLETED);
  1279. /* Frame has been decoded return it to the controller */
  1280. scic_sds_controller_release_frame(scic, frame_index);
  1281. return status;
  1282. }
  1283. /* --------------------------------------------------------------------------- */
  1284. static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
  1285. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
  1286. .abort_handler = scic_sds_request_started_state_abort_handler,
  1287. .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
  1288. },
  1289. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
  1290. .abort_handler = scic_sds_request_started_state_abort_handler,
  1291. .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
  1292. },
  1293. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
  1294. .abort_handler = scic_sds_request_started_state_abort_handler,
  1295. .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
  1296. },
  1297. };
  1298. static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
  1299. void *object)
  1300. {
  1301. struct scic_sds_request *sci_req = object;
  1302. SET_STATE_HANDLER(
  1303. sci_req,
  1304. scic_sds_stp_request_started_soft_reset_substate_handler_table,
  1305. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
  1306. );
  1307. scic_sds_remote_device_set_working_request(
  1308. sci_req->target_device, sci_req
  1309. );
  1310. }
  1311. static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
  1312. void *object)
  1313. {
  1314. struct scic_sds_request *sci_req = object;
  1315. struct scu_task_context *task_context;
  1316. struct host_to_dev_fis *h2d_fis;
  1317. enum sci_status status;
  1318. /* Clear the SRST bit */
  1319. h2d_fis = &sci_req->stp.cmd;
  1320. h2d_fis->control = 0;
  1321. /* Clear the TC control bit */
  1322. task_context = scic_sds_controller_get_task_context_buffer(
  1323. sci_req->owning_controller, sci_req->io_tag);
  1324. task_context->control_frame = 0;
  1325. status = scic_controller_continue_io(sci_req);
  1326. if (status == SCI_SUCCESS) {
  1327. SET_STATE_HANDLER(
  1328. sci_req,
  1329. scic_sds_stp_request_started_soft_reset_substate_handler_table,
  1330. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
  1331. );
  1332. }
  1333. }
  1334. static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
  1335. void *object)
  1336. {
  1337. struct scic_sds_request *sci_req = object;
  1338. SET_STATE_HANDLER(
  1339. sci_req,
  1340. scic_sds_stp_request_started_soft_reset_substate_handler_table,
  1341. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
  1342. );
  1343. }
  1344. static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
  1345. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
  1346. .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
  1347. },
  1348. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
  1349. .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
  1350. },
  1351. [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
  1352. .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
  1353. },
  1354. };
  1355. enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
  1356. {
  1357. struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
  1358. scic_sds_stp_non_ncq_request_construct(sci_req);
  1359. /* Build the STP task context structure */
  1360. scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
  1361. sci_base_state_machine_construct(&sci_req->started_substate_machine,
  1362. sci_req,
  1363. scic_sds_stp_request_started_soft_reset_substate_table,
  1364. SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
  1365. return SCI_SUCCESS;
  1366. }