request.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include "isci.h"
  56. #include "task.h"
  57. #include "request.h"
  58. #include "sata.h"
  59. #include "scu_completion_codes.h"
  60. #include "sas.h"
  61. /**
  62. * This method returns the sgl element pair for the specificed sgl_pair index.
  63. * @sci_req: This parameter specifies the IO request for which to retrieve
  64. * the Scatter-Gather List element pair.
  65. * @sgl_pair_index: This parameter specifies the index into the SGL element
  66. * pair to be retrieved.
  67. *
  68. * This method returns a pointer to an struct scu_sgl_element_pair.
  69. */
  70. static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
  71. struct scic_sds_request *sci_req,
  72. u32 sgl_pair_index
  73. ) {
  74. struct scu_task_context *task_context;
  75. task_context = (struct scu_task_context *)sci_req->task_context_buffer;
  76. if (sgl_pair_index == 0) {
  77. return &task_context->sgl_pair_ab;
  78. } else if (sgl_pair_index == 1) {
  79. return &task_context->sgl_pair_cd;
  80. }
  81. return &sci_req->sg_table[sgl_pair_index - 2];
  82. }
  83. /**
  84. * This function will build the SGL list for an IO request.
  85. * @sci_req: This parameter specifies the IO request for which to build
  86. * the Scatter-Gather List.
  87. *
  88. */
  89. void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
  90. {
  91. struct isci_request *isci_request = sci_req_to_ireq(sds_request);
  92. struct isci_host *isci_host = isci_request->isci_host;
  93. struct sas_task *task = isci_request_access_task(isci_request);
  94. struct scatterlist *sg = NULL;
  95. dma_addr_t dma_addr;
  96. u32 sg_idx = 0;
  97. struct scu_sgl_element_pair *scu_sg = NULL;
  98. struct scu_sgl_element_pair *prev_sg = NULL;
  99. if (task->num_scatter > 0) {
  100. sg = task->scatter;
  101. while (sg) {
  102. scu_sg = scic_sds_request_get_sgl_element_pair(
  103. sds_request,
  104. sg_idx);
  105. SCU_SGL_COPY(scu_sg->A, sg);
  106. sg = sg_next(sg);
  107. if (sg) {
  108. SCU_SGL_COPY(scu_sg->B, sg);
  109. sg = sg_next(sg);
  110. } else
  111. SCU_SGL_ZERO(scu_sg->B);
  112. if (prev_sg) {
  113. dma_addr =
  114. scic_io_request_get_dma_addr(
  115. sds_request,
  116. scu_sg);
  117. prev_sg->next_pair_upper =
  118. upper_32_bits(dma_addr);
  119. prev_sg->next_pair_lower =
  120. lower_32_bits(dma_addr);
  121. }
  122. prev_sg = scu_sg;
  123. sg_idx++;
  124. }
  125. } else { /* handle when no sg */
  126. scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
  127. sg_idx);
  128. dma_addr = dma_map_single(&isci_host->pdev->dev,
  129. task->scatter,
  130. task->total_xfer_len,
  131. task->data_dir);
  132. isci_request->zero_scatter_daddr = dma_addr;
  133. scu_sg->A.length = task->total_xfer_len;
  134. scu_sg->A.address_upper = upper_32_bits(dma_addr);
  135. scu_sg->A.address_lower = lower_32_bits(dma_addr);
  136. }
  137. if (scu_sg) {
  138. scu_sg->next_pair_upper = 0;
  139. scu_sg->next_pair_lower = 0;
  140. }
  141. }
  142. static void scic_sds_ssp_io_request_assign_buffers(struct scic_sds_request *sci_req)
  143. {
  144. if (sci_req->was_tag_assigned_by_user == false)
  145. sci_req->task_context_buffer = &sci_req->tc;
  146. }
  147. static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
  148. {
  149. struct ssp_cmd_iu *cmd_iu;
  150. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  151. struct sas_task *task = isci_request_access_task(ireq);
  152. cmd_iu = &sci_req->ssp.cmd;
  153. memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
  154. cmd_iu->add_cdb_len = 0;
  155. cmd_iu->_r_a = 0;
  156. cmd_iu->_r_b = 0;
  157. cmd_iu->en_fburst = 0; /* unsupported */
  158. cmd_iu->task_prio = task->ssp_task.task_prio;
  159. cmd_iu->task_attr = task->ssp_task.task_attr;
  160. cmd_iu->_r_c = 0;
  161. sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
  162. sizeof(task->ssp_task.cdb) / sizeof(u32));
  163. }
  164. static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
  165. {
  166. struct ssp_task_iu *task_iu;
  167. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  168. struct sas_task *task = isci_request_access_task(ireq);
  169. struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
  170. task_iu = &sci_req->ssp.tmf;
  171. memset(task_iu, 0, sizeof(struct ssp_task_iu));
  172. memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
  173. task_iu->task_func = isci_tmf->tmf_code;
  174. task_iu->task_tag =
  175. (ireq->ttype == tmf_task) ?
  176. isci_tmf->io_tag :
  177. SCI_CONTROLLER_INVALID_IO_TAG;
  178. }
  179. /**
  180. * This method is will fill in the SCU Task Context for any type of SSP request.
  181. * @sci_req:
  182. * @task_context:
  183. *
  184. */
  185. static void scu_ssp_reqeust_construct_task_context(
  186. struct scic_sds_request *sds_request,
  187. struct scu_task_context *task_context)
  188. {
  189. dma_addr_t dma_addr;
  190. struct scic_sds_controller *controller;
  191. struct scic_sds_remote_device *target_device;
  192. struct scic_sds_port *target_port;
  193. controller = scic_sds_request_get_controller(sds_request);
  194. target_device = scic_sds_request_get_device(sds_request);
  195. target_port = scic_sds_request_get_port(sds_request);
  196. /* Fill in the TC with the its required data */
  197. task_context->abort = 0;
  198. task_context->priority = 0;
  199. task_context->initiator_request = 1;
  200. task_context->connection_rate = target_device->connection_rate;
  201. task_context->protocol_engine_index =
  202. scic_sds_controller_get_protocol_engine_group(controller);
  203. task_context->logical_port_index =
  204. scic_sds_port_get_index(target_port);
  205. task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
  206. task_context->valid = SCU_TASK_CONTEXT_VALID;
  207. task_context->context_type = SCU_TASK_CONTEXT_TYPE;
  208. task_context->remote_node_index =
  209. scic_sds_remote_device_get_index(sds_request->target_device);
  210. task_context->command_code = 0;
  211. task_context->link_layer_control = 0;
  212. task_context->do_not_dma_ssp_good_response = 1;
  213. task_context->strict_ordering = 0;
  214. task_context->control_frame = 0;
  215. task_context->timeout_enable = 0;
  216. task_context->block_guard_enable = 0;
  217. task_context->address_modifier = 0;
  218. /* task_context->type.ssp.tag = sci_req->io_tag; */
  219. task_context->task_phase = 0x01;
  220. if (sds_request->was_tag_assigned_by_user) {
  221. /*
  222. * Build the task context now since we have already read
  223. * the data
  224. */
  225. sds_request->post_context =
  226. (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  227. (scic_sds_controller_get_protocol_engine_group(
  228. controller) <<
  229. SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  230. (scic_sds_port_get_index(target_port) <<
  231. SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  232. scic_sds_io_tag_get_index(sds_request->io_tag));
  233. } else {
  234. /*
  235. * Build the task context now since we have already read
  236. * the data
  237. *
  238. * I/O tag index is not assigned because we have to wait
  239. * until we get a TCi
  240. */
  241. sds_request->post_context =
  242. (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  243. (scic_sds_controller_get_protocol_engine_group(
  244. owning_controller) <<
  245. SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  246. (scic_sds_port_get_index(target_port) <<
  247. SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
  248. }
  249. /*
  250. * Copy the physical address for the command buffer to the
  251. * SCU Task Context
  252. */
  253. dma_addr = scic_io_request_get_dma_addr(sds_request,
  254. &sds_request->ssp.cmd);
  255. task_context->command_iu_upper = upper_32_bits(dma_addr);
  256. task_context->command_iu_lower = lower_32_bits(dma_addr);
  257. /*
  258. * Copy the physical address for the response buffer to the
  259. * SCU Task Context
  260. */
  261. dma_addr = scic_io_request_get_dma_addr(sds_request,
  262. &sds_request->ssp.rsp);
  263. task_context->response_iu_upper = upper_32_bits(dma_addr);
  264. task_context->response_iu_lower = lower_32_bits(dma_addr);
  265. }
  266. /**
  267. * This method is will fill in the SCU Task Context for a SSP IO request.
  268. * @sci_req:
  269. *
  270. */
  271. static void scu_ssp_io_request_construct_task_context(
  272. struct scic_sds_request *sci_req,
  273. enum dma_data_direction dir,
  274. u32 len)
  275. {
  276. struct scu_task_context *task_context;
  277. task_context = scic_sds_request_get_task_context(sci_req);
  278. scu_ssp_reqeust_construct_task_context(sci_req, task_context);
  279. task_context->ssp_command_iu_length =
  280. sizeof(struct ssp_cmd_iu) / sizeof(u32);
  281. task_context->type.ssp.frame_type = SSP_COMMAND;
  282. switch (dir) {
  283. case DMA_FROM_DEVICE:
  284. case DMA_NONE:
  285. default:
  286. task_context->task_type = SCU_TASK_TYPE_IOREAD;
  287. break;
  288. case DMA_TO_DEVICE:
  289. task_context->task_type = SCU_TASK_TYPE_IOWRITE;
  290. break;
  291. }
  292. task_context->transfer_length_bytes = len;
  293. if (task_context->transfer_length_bytes > 0)
  294. scic_sds_request_build_sgl(sci_req);
  295. }
  296. static void scic_sds_ssp_task_request_assign_buffers(struct scic_sds_request *sci_req)
  297. {
  298. if (sci_req->was_tag_assigned_by_user == false)
  299. sci_req->task_context_buffer = &sci_req->tc;
  300. }
  301. /**
  302. * This method will fill in the SCU Task Context for a SSP Task request. The
  303. * following important settings are utilized: -# priority ==
  304. * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
  305. * ahead of other task destined for the same Remote Node. -# task_type ==
  306. * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
  307. * (i.e. non-raw frame) is being utilized to perform task management. -#
  308. * control_frame == 1. This ensures that the proper endianess is set so
  309. * that the bytes are transmitted in the right order for a task frame.
  310. * @sci_req: This parameter specifies the task request object being
  311. * constructed.
  312. *
  313. */
  314. static void scu_ssp_task_request_construct_task_context(
  315. struct scic_sds_request *sci_req)
  316. {
  317. struct scu_task_context *task_context;
  318. task_context = scic_sds_request_get_task_context(sci_req);
  319. scu_ssp_reqeust_construct_task_context(sci_req, task_context);
  320. task_context->control_frame = 1;
  321. task_context->priority = SCU_TASK_PRIORITY_HIGH;
  322. task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
  323. task_context->transfer_length_bytes = 0;
  324. task_context->type.ssp.frame_type = SSP_TASK;
  325. task_context->ssp_command_iu_length =
  326. sizeof(struct ssp_task_iu) / sizeof(u32);
  327. }
  328. /**
  329. * This method constructs the SSP Command IU data for this ssp passthrough
  330. * comand request object.
  331. * @sci_req: This parameter specifies the request object for which the SSP
  332. * command information unit is being built.
  333. *
  334. * enum sci_status, returns invalid parameter is cdb > 16
  335. */
  336. /**
  337. * This method constructs the SATA request object.
  338. * @sci_req:
  339. * @sat_protocol:
  340. * @transfer_length:
  341. * @data_direction:
  342. * @copy_rx_frame:
  343. *
  344. * enum sci_status
  345. */
  346. static enum sci_status
  347. scic_io_request_construct_sata(struct scic_sds_request *sci_req,
  348. u32 len,
  349. enum dma_data_direction dir,
  350. bool copy)
  351. {
  352. enum sci_status status = SCI_SUCCESS;
  353. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  354. struct sas_task *task = isci_request_access_task(ireq);
  355. /* check for management protocols */
  356. if (ireq->ttype == tmf_task) {
  357. struct isci_tmf *tmf = isci_request_access_tmf(ireq);
  358. if (tmf->tmf_code == isci_tmf_sata_srst_high ||
  359. tmf->tmf_code == isci_tmf_sata_srst_low)
  360. return scic_sds_stp_soft_reset_request_construct(sci_req);
  361. else {
  362. dev_err(scic_to_dev(sci_req->owning_controller),
  363. "%s: Request 0x%p received un-handled SAT "
  364. "management protocol 0x%x.\n",
  365. __func__, sci_req, tmf->tmf_code);
  366. return SCI_FAILURE;
  367. }
  368. }
  369. if (!sas_protocol_ata(task->task_proto)) {
  370. dev_err(scic_to_dev(sci_req->owning_controller),
  371. "%s: Non-ATA protocol in SATA path: 0x%x\n",
  372. __func__,
  373. task->task_proto);
  374. return SCI_FAILURE;
  375. }
  376. /* non data */
  377. if (task->data_dir == DMA_NONE)
  378. return scic_sds_stp_non_data_request_construct(sci_req);
  379. /* NCQ */
  380. if (task->ata_task.use_ncq)
  381. return scic_sds_stp_ncq_request_construct(sci_req, len, dir);
  382. /* DMA */
  383. if (task->ata_task.dma_xfer)
  384. return scic_sds_stp_udma_request_construct(sci_req, len, dir);
  385. else /* PIO */
  386. return scic_sds_stp_pio_request_construct(sci_req, copy);
  387. return status;
  388. }
  389. static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
  390. {
  391. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  392. struct sas_task *task = isci_request_access_task(ireq);
  393. sci_req->protocol = SCIC_SSP_PROTOCOL;
  394. scu_ssp_io_request_construct_task_context(sci_req,
  395. task->data_dir,
  396. task->total_xfer_len);
  397. scic_sds_io_request_build_ssp_command_iu(sci_req);
  398. sci_base_state_machine_change_state(
  399. &sci_req->state_machine,
  400. SCI_BASE_REQUEST_STATE_CONSTRUCTED);
  401. return SCI_SUCCESS;
  402. }
  403. enum sci_status scic_task_request_construct_ssp(
  404. struct scic_sds_request *sci_req)
  405. {
  406. /* Construct the SSP Task SCU Task Context */
  407. scu_ssp_task_request_construct_task_context(sci_req);
  408. /* Fill in the SSP Task IU */
  409. scic_sds_task_request_build_ssp_task_iu(sci_req);
  410. sci_base_state_machine_change_state(&sci_req->state_machine,
  411. SCI_BASE_REQUEST_STATE_CONSTRUCTED);
  412. return SCI_SUCCESS;
  413. }
  414. static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
  415. {
  416. enum sci_status status;
  417. struct scic_sds_stp_request *stp_req;
  418. bool copy = false;
  419. struct isci_request *isci_request = sci_req_to_ireq(sci_req);
  420. struct sas_task *task = isci_request_access_task(isci_request);
  421. stp_req = &sci_req->stp.req;
  422. sci_req->protocol = SCIC_STP_PROTOCOL;
  423. copy = (task->data_dir == DMA_NONE) ? false : true;
  424. status = scic_io_request_construct_sata(sci_req,
  425. task->total_xfer_len,
  426. task->data_dir,
  427. copy);
  428. if (status == SCI_SUCCESS)
  429. sci_base_state_machine_change_state(&sci_req->state_machine,
  430. SCI_BASE_REQUEST_STATE_CONSTRUCTED);
  431. return status;
  432. }
  433. enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
  434. {
  435. enum sci_status status = SCI_SUCCESS;
  436. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  437. /* check for management protocols */
  438. if (ireq->ttype == tmf_task) {
  439. struct isci_tmf *tmf = isci_request_access_tmf(ireq);
  440. if (tmf->tmf_code == isci_tmf_sata_srst_high ||
  441. tmf->tmf_code == isci_tmf_sata_srst_low) {
  442. status = scic_sds_stp_soft_reset_request_construct(sci_req);
  443. } else {
  444. dev_err(scic_to_dev(sci_req->owning_controller),
  445. "%s: Request 0x%p received un-handled SAT "
  446. "Protocol 0x%x.\n",
  447. __func__, sci_req, tmf->tmf_code);
  448. return SCI_FAILURE;
  449. }
  450. }
  451. if (status == SCI_SUCCESS)
  452. sci_base_state_machine_change_state(
  453. &sci_req->state_machine,
  454. SCI_BASE_REQUEST_STATE_CONSTRUCTED);
  455. return status;
  456. }
  457. /**
  458. * sci_req_tx_bytes - bytes transferred when reply underruns request
  459. * @sci_req: request that was terminated early
  460. */
  461. #define SCU_TASK_CONTEXT_SRAM 0x200000
  462. static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
  463. {
  464. struct scic_sds_controller *scic = sci_req->owning_controller;
  465. u32 ret_val = 0;
  466. if (readl(&scic->smu_registers->address_modifier) == 0) {
  467. void __iomem *scu_reg_base = scic->scu_registers;
  468. /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
  469. * BAR1 is the scu_registers
  470. * 0x20002C = 0x200000 + 0x2c
  471. * = start of task context SRAM + offset of (type.ssp.data_offset)
  472. * TCi is the io_tag of struct scic_sds_request
  473. */
  474. ret_val = readl(scu_reg_base +
  475. (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
  476. ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
  477. }
  478. return ret_val;
  479. }
  480. enum sci_status
  481. scic_sds_request_start(struct scic_sds_request *request)
  482. {
  483. if (request->device_sequence !=
  484. scic_sds_remote_device_get_sequence(request->target_device))
  485. return SCI_FAILURE;
  486. if (request->state_handlers->start_handler)
  487. return request->state_handlers->start_handler(request);
  488. dev_warn(scic_to_dev(request->owning_controller),
  489. "%s: SCIC IO Request requested to start while in wrong "
  490. "state %d\n",
  491. __func__,
  492. sci_base_state_machine_get_state(&request->state_machine));
  493. return SCI_FAILURE_INVALID_STATE;
  494. }
  495. enum sci_status
  496. scic_sds_io_request_terminate(struct scic_sds_request *request)
  497. {
  498. if (request->state_handlers->abort_handler)
  499. return request->state_handlers->abort_handler(request);
  500. dev_warn(scic_to_dev(request->owning_controller),
  501. "%s: SCIC IO Request requested to abort while in wrong "
  502. "state %d\n",
  503. __func__,
  504. sci_base_state_machine_get_state(&request->state_machine));
  505. return SCI_FAILURE_INVALID_STATE;
  506. }
  507. enum sci_status scic_sds_io_request_event_handler(
  508. struct scic_sds_request *request,
  509. u32 event_code)
  510. {
  511. if (request->state_handlers->event_handler)
  512. return request->state_handlers->event_handler(request, event_code);
  513. dev_warn(scic_to_dev(request->owning_controller),
  514. "%s: SCIC IO Request given event code notification %x while "
  515. "in wrong state %d\n",
  516. __func__,
  517. event_code,
  518. sci_base_state_machine_get_state(&request->state_machine));
  519. return SCI_FAILURE_INVALID_STATE;
  520. }
  521. /**
  522. *
  523. * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
  524. * operation is to be executed.
  525. * @frame_index: The frame index returned by the hardware for the reqeust
  526. * object.
  527. *
  528. * This method invokes the core state frame handler for the
  529. * SCIC_SDS_IO_REQUEST_T object. enum sci_status
  530. */
  531. enum sci_status scic_sds_io_request_frame_handler(
  532. struct scic_sds_request *request,
  533. u32 frame_index)
  534. {
  535. if (request->state_handlers->frame_handler)
  536. return request->state_handlers->frame_handler(request, frame_index);
  537. dev_warn(scic_to_dev(request->owning_controller),
  538. "%s: SCIC IO Request given unexpected frame %x while in "
  539. "state %d\n",
  540. __func__,
  541. frame_index,
  542. sci_base_state_machine_get_state(&request->state_machine));
  543. scic_sds_controller_release_frame(request->owning_controller, frame_index);
  544. return SCI_FAILURE_INVALID_STATE;
  545. }
  546. /*
  547. * This function copies response data for requests returning response data
  548. * instead of sense data.
  549. * @sci_req: This parameter specifies the request object for which to copy
  550. * the response data.
  551. */
  552. void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
  553. {
  554. void *resp_buf;
  555. u32 len;
  556. struct ssp_response_iu *ssp_response;
  557. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  558. struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
  559. ssp_response = &sci_req->ssp.rsp;
  560. resp_buf = &isci_tmf->resp.resp_iu;
  561. len = min_t(u32,
  562. SSP_RESP_IU_MAX_SIZE,
  563. be32_to_cpu(ssp_response->response_data_len));
  564. memcpy(resp_buf, ssp_response->resp_data, len);
  565. }
  566. /*
  567. * This method implements the action taken when a constructed
  568. * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
  569. * This method will, if necessary, allocate a TCi for the io request object and
  570. * then will, if necessary, copy the constructed TC data into the actual TC
  571. * buffer. If everything is successful the post context field is updated with
  572. * the TCi so the controller can post the request to the hardware. enum sci_status
  573. * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
  574. */
  575. static enum sci_status scic_sds_request_constructed_state_start_handler(
  576. struct scic_sds_request *request)
  577. {
  578. struct scu_task_context *task_context;
  579. if (request->io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
  580. request->io_tag =
  581. scic_controller_allocate_io_tag(request->owning_controller);
  582. }
  583. /* Record the IO Tag in the request */
  584. if (request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
  585. task_context = request->task_context_buffer;
  586. task_context->task_index = scic_sds_io_tag_get_index(request->io_tag);
  587. switch (task_context->protocol_type) {
  588. case SCU_TASK_CONTEXT_PROTOCOL_SMP:
  589. case SCU_TASK_CONTEXT_PROTOCOL_SSP:
  590. /* SSP/SMP Frame */
  591. task_context->type.ssp.tag = request->io_tag;
  592. task_context->type.ssp.target_port_transfer_tag = 0xFFFF;
  593. break;
  594. case SCU_TASK_CONTEXT_PROTOCOL_STP:
  595. /*
  596. * STP/SATA Frame
  597. * task_context->type.stp.ncq_tag = request->ncq_tag; */
  598. break;
  599. case SCU_TASK_CONTEXT_PROTOCOL_NONE:
  600. /* / @todo When do we set no protocol type? */
  601. break;
  602. default:
  603. /* This should never happen since we build the IO requests */
  604. break;
  605. }
  606. /*
  607. * Check to see if we need to copy the task context buffer
  608. * or have been building into the task context buffer */
  609. if (request->was_tag_assigned_by_user == false) {
  610. scic_sds_controller_copy_task_context(
  611. request->owning_controller, request);
  612. }
  613. /* Add to the post_context the io tag value */
  614. request->post_context |= scic_sds_io_tag_get_index(request->io_tag);
  615. /* Everything is good go ahead and change state */
  616. sci_base_state_machine_change_state(&request->state_machine,
  617. SCI_BASE_REQUEST_STATE_STARTED);
  618. return SCI_SUCCESS;
  619. }
  620. return SCI_FAILURE_INSUFFICIENT_RESOURCES;
  621. }
  622. /*
  623. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  624. * object receives a scic_sds_request_terminate() request. Since the request
  625. * has not yet been posted to the hardware the request transitions to the
  626. * completed state. enum sci_status SCI_SUCCESS
  627. */
  628. static enum sci_status scic_sds_request_constructed_state_abort_handler(
  629. struct scic_sds_request *request)
  630. {
  631. /*
  632. * This request has been terminated by the user make sure that the correct
  633. * status code is returned */
  634. scic_sds_request_set_status(request,
  635. SCU_TASK_DONE_TASK_ABORT,
  636. SCI_FAILURE_IO_TERMINATED);
  637. sci_base_state_machine_change_state(&request->state_machine,
  638. SCI_BASE_REQUEST_STATE_COMPLETED);
  639. return SCI_SUCCESS;
  640. }
  641. /*
  642. * *****************************************************************************
  643. * * STARTED STATE HANDLERS
  644. * ***************************************************************************** */
  645. /*
  646. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  647. * object receives a scic_sds_request_terminate() request. Since the request
  648. * has been posted to the hardware the io request state is changed to the
  649. * aborting state. enum sci_status SCI_SUCCESS
  650. */
  651. enum sci_status scic_sds_request_started_state_abort_handler(
  652. struct scic_sds_request *request)
  653. {
  654. if (request->has_started_substate_machine)
  655. sci_base_state_machine_stop(&request->started_substate_machine);
  656. sci_base_state_machine_change_state(&request->state_machine,
  657. SCI_BASE_REQUEST_STATE_ABORTING);
  658. return SCI_SUCCESS;
  659. }
  660. /*
  661. * scic_sds_request_started_state_tc_completion_handler() - This method process
  662. * TC (task context) completions for normal IO request (i.e. Task/Abort
  663. * Completions of type 0). This method will update the
  664. * SCIC_SDS_IO_REQUEST_T::status field.
  665. * @sci_req: This parameter specifies the request for which a completion
  666. * occurred.
  667. * @completion_code: This parameter specifies the completion code received from
  668. * the SCU.
  669. *
  670. */
  671. static enum sci_status
  672. scic_sds_request_started_state_tc_completion_handler(struct scic_sds_request *sci_req,
  673. u32 completion_code)
  674. {
  675. u8 datapres;
  676. struct ssp_response_iu *resp_iu;
  677. /*
  678. * TODO: Any SDMA return code of other than 0 is bad
  679. * decode 0x003C0000 to determine SDMA status
  680. */
  681. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  682. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
  683. scic_sds_request_set_status(sci_req,
  684. SCU_TASK_DONE_GOOD,
  685. SCI_SUCCESS);
  686. break;
  687. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP):
  688. {
  689. /*
  690. * There are times when the SCU hardware will return an early
  691. * response because the io request specified more data than is
  692. * returned by the target device (mode pages, inquiry data,
  693. * etc.). We must check the response stats to see if this is
  694. * truly a failed request or a good request that just got
  695. * completed early.
  696. */
  697. struct ssp_response_iu *resp = &sci_req->ssp.rsp;
  698. ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
  699. sci_swab32_cpy(&sci_req->ssp.rsp,
  700. &sci_req->ssp.rsp,
  701. word_cnt);
  702. if (resp->status == 0) {
  703. scic_sds_request_set_status(
  704. sci_req,
  705. SCU_TASK_DONE_GOOD,
  706. SCI_SUCCESS_IO_DONE_EARLY);
  707. } else {
  708. scic_sds_request_set_status(
  709. sci_req,
  710. SCU_TASK_DONE_CHECK_RESPONSE,
  711. SCI_FAILURE_IO_RESPONSE_VALID);
  712. }
  713. }
  714. break;
  715. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE):
  716. {
  717. ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
  718. sci_swab32_cpy(&sci_req->ssp.rsp,
  719. &sci_req->ssp.rsp,
  720. word_cnt);
  721. scic_sds_request_set_status(sci_req,
  722. SCU_TASK_DONE_CHECK_RESPONSE,
  723. SCI_FAILURE_IO_RESPONSE_VALID);
  724. break;
  725. }
  726. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
  727. /*
  728. * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame
  729. * guaranteed to be received before this completion status is
  730. * posted?
  731. */
  732. resp_iu = &sci_req->ssp.rsp;
  733. datapres = resp_iu->datapres;
  734. if ((datapres == 0x01) || (datapres == 0x02)) {
  735. scic_sds_request_set_status(
  736. sci_req,
  737. SCU_TASK_DONE_CHECK_RESPONSE,
  738. SCI_FAILURE_IO_RESPONSE_VALID);
  739. } else
  740. scic_sds_request_set_status(
  741. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
  742. break;
  743. /* only stp device gets suspended. */
  744. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
  745. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
  746. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
  747. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
  748. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
  749. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
  750. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
  751. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
  752. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
  753. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
  754. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
  755. if (sci_req->protocol == SCIC_STP_PROTOCOL) {
  756. scic_sds_request_set_status(
  757. sci_req,
  758. SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  759. SCU_COMPLETION_TL_STATUS_SHIFT,
  760. SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
  761. } else {
  762. scic_sds_request_set_status(
  763. sci_req,
  764. SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  765. SCU_COMPLETION_TL_STATUS_SHIFT,
  766. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  767. }
  768. break;
  769. /* both stp/ssp device gets suspended */
  770. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
  771. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
  772. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
  773. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
  774. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
  775. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
  776. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
  777. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
  778. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
  779. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
  780. scic_sds_request_set_status(
  781. sci_req,
  782. SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  783. SCU_COMPLETION_TL_STATUS_SHIFT,
  784. SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
  785. break;
  786. /* neither ssp nor stp gets suspended. */
  787. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
  788. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
  789. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
  790. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
  791. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
  792. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
  793. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
  794. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
  795. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
  796. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
  797. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
  798. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
  799. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
  800. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
  801. case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
  802. default:
  803. scic_sds_request_set_status(
  804. sci_req,
  805. SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  806. SCU_COMPLETION_TL_STATUS_SHIFT,
  807. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  808. break;
  809. }
  810. /*
  811. * TODO: This is probably wrong for ACK/NAK timeout conditions
  812. */
  813. /* In all cases we will treat this as the completion of the IO req. */
  814. sci_base_state_machine_change_state(
  815. &sci_req->state_machine,
  816. SCI_BASE_REQUEST_STATE_COMPLETED);
  817. return SCI_SUCCESS;
  818. }
  819. enum sci_status
  820. scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code)
  821. {
  822. if (request->state_machine.current_state_id == SCI_BASE_REQUEST_STATE_STARTED &&
  823. request->has_started_substate_machine == false)
  824. return scic_sds_request_started_state_tc_completion_handler(request, completion_code);
  825. else if (request->state_handlers->tc_completion_handler)
  826. return request->state_handlers->tc_completion_handler(request, completion_code);
  827. dev_warn(scic_to_dev(request->owning_controller),
  828. "%s: SCIC IO Request given task completion notification %x "
  829. "while in wrong state %d\n",
  830. __func__,
  831. completion_code,
  832. sci_base_state_machine_get_state(&request->state_machine));
  833. return SCI_FAILURE_INVALID_STATE;
  834. }
  835. /*
  836. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  837. * object receives a scic_sds_request_frame_handler() request. This method
  838. * first determines the frame type received. If this is a response frame then
  839. * the response data is copied to the io request response buffer for processing
  840. * at completion time. If the frame type is not a response buffer an error is
  841. * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
  842. */
  843. static enum sci_status
  844. scic_sds_request_started_state_frame_handler(struct scic_sds_request *sci_req,
  845. u32 frame_index)
  846. {
  847. enum sci_status status;
  848. u32 *frame_header;
  849. struct ssp_frame_hdr ssp_hdr;
  850. ssize_t word_cnt;
  851. status = scic_sds_unsolicited_frame_control_get_header(
  852. &(scic_sds_request_get_controller(sci_req)->uf_control),
  853. frame_index,
  854. (void **)&frame_header);
  855. word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
  856. sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
  857. if (ssp_hdr.frame_type == SSP_RESPONSE) {
  858. struct ssp_response_iu *resp_iu;
  859. ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
  860. status = scic_sds_unsolicited_frame_control_get_buffer(
  861. &(scic_sds_request_get_controller(sci_req)->uf_control),
  862. frame_index,
  863. (void **)&resp_iu);
  864. sci_swab32_cpy(&sci_req->ssp.rsp,
  865. resp_iu, word_cnt);
  866. resp_iu = &sci_req->ssp.rsp;
  867. if ((resp_iu->datapres == 0x01) ||
  868. (resp_iu->datapres == 0x02)) {
  869. scic_sds_request_set_status(
  870. sci_req,
  871. SCU_TASK_DONE_CHECK_RESPONSE,
  872. SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  873. } else
  874. scic_sds_request_set_status(
  875. sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
  876. } else {
  877. /* This was not a response frame why did it get forwarded? */
  878. dev_err(scic_to_dev(sci_req->owning_controller),
  879. "%s: SCIC IO Request 0x%p received unexpected "
  880. "frame %d type 0x%02x\n",
  881. __func__,
  882. sci_req,
  883. frame_index,
  884. ssp_hdr.frame_type);
  885. }
  886. /*
  887. * In any case we are done with this frame buffer return it to the
  888. * controller
  889. */
  890. scic_sds_controller_release_frame(
  891. sci_req->owning_controller, frame_index);
  892. return SCI_SUCCESS;
  893. }
  894. /*
  895. * *****************************************************************************
  896. * * COMPLETED STATE HANDLERS
  897. * ***************************************************************************** */
  898. /*
  899. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  900. * object receives a scic_sds_request_complete() request. This method frees up
  901. * any io request resources that have been allocated and transitions the
  902. * request to its final state. Consider stopping the state machine instead of
  903. * transitioning to the final state? enum sci_status SCI_SUCCESS
  904. */
  905. static enum sci_status scic_sds_request_completed_state_complete_handler(
  906. struct scic_sds_request *request)
  907. {
  908. if (request->was_tag_assigned_by_user != true) {
  909. scic_controller_free_io_tag(
  910. request->owning_controller, request->io_tag);
  911. }
  912. if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
  913. scic_sds_controller_release_frame(
  914. request->owning_controller, request->saved_rx_frame_index);
  915. }
  916. sci_base_state_machine_change_state(&request->state_machine,
  917. SCI_BASE_REQUEST_STATE_FINAL);
  918. return SCI_SUCCESS;
  919. }
  920. /*
  921. * *****************************************************************************
  922. * * ABORTING STATE HANDLERS
  923. * ***************************************************************************** */
  924. /*
  925. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  926. * object receives a scic_sds_request_terminate() request. This method is the
  927. * io request aborting state abort handlers. On receipt of a multiple
  928. * terminate requests the io request will transition to the completed state.
  929. * This should not happen in normal operation. enum sci_status SCI_SUCCESS
  930. */
  931. static enum sci_status scic_sds_request_aborting_state_abort_handler(
  932. struct scic_sds_request *request)
  933. {
  934. sci_base_state_machine_change_state(&request->state_machine,
  935. SCI_BASE_REQUEST_STATE_COMPLETED);
  936. return SCI_SUCCESS;
  937. }
  938. /*
  939. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  940. * object receives a scic_sds_request_task_completion() request. This method
  941. * decodes the completion type waiting for the abort task complete
  942. * notification. When the abort task complete is received the io request
  943. * transitions to the completed state. enum sci_status SCI_SUCCESS
  944. */
  945. static enum sci_status scic_sds_request_aborting_state_tc_completion_handler(
  946. struct scic_sds_request *sci_req,
  947. u32 completion_code)
  948. {
  949. switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
  950. case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
  951. case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
  952. scic_sds_request_set_status(
  953. sci_req, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED
  954. );
  955. sci_base_state_machine_change_state(&sci_req->state_machine,
  956. SCI_BASE_REQUEST_STATE_COMPLETED);
  957. break;
  958. default:
  959. /*
  960. * Unless we get some strange error wait for the task abort to complete
  961. * TODO: Should there be a state change for this completion? */
  962. break;
  963. }
  964. return SCI_SUCCESS;
  965. }
  966. /*
  967. * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
  968. * object receives a scic_sds_request_frame_handler() request. This method
  969. * discards the unsolicited frame since we are waiting for the abort task
  970. * completion. enum sci_status SCI_SUCCESS
  971. */
  972. static enum sci_status scic_sds_request_aborting_state_frame_handler(
  973. struct scic_sds_request *sci_req,
  974. u32 frame_index)
  975. {
  976. /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
  977. scic_sds_controller_release_frame(
  978. sci_req->owning_controller, frame_index);
  979. return SCI_SUCCESS;
  980. }
  981. static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
  982. [SCI_BASE_REQUEST_STATE_INITIAL] = {
  983. },
  984. [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
  985. .start_handler = scic_sds_request_constructed_state_start_handler,
  986. .abort_handler = scic_sds_request_constructed_state_abort_handler,
  987. },
  988. [SCI_BASE_REQUEST_STATE_STARTED] = {
  989. .abort_handler = scic_sds_request_started_state_abort_handler,
  990. .tc_completion_handler = scic_sds_request_started_state_tc_completion_handler,
  991. .frame_handler = scic_sds_request_started_state_frame_handler,
  992. },
  993. [SCI_BASE_REQUEST_STATE_COMPLETED] = {
  994. .complete_handler = scic_sds_request_completed_state_complete_handler,
  995. },
  996. [SCI_BASE_REQUEST_STATE_ABORTING] = {
  997. .abort_handler = scic_sds_request_aborting_state_abort_handler,
  998. .tc_completion_handler = scic_sds_request_aborting_state_tc_completion_handler,
  999. .frame_handler = scic_sds_request_aborting_state_frame_handler,
  1000. },
  1001. [SCI_BASE_REQUEST_STATE_FINAL] = {
  1002. },
  1003. };
  1004. /**
  1005. * isci_request_process_response_iu() - This function sets the status and
  1006. * response iu, in the task struct, from the request object for the upper
  1007. * layer driver.
  1008. * @sas_task: This parameter is the task struct from the upper layer driver.
  1009. * @resp_iu: This parameter points to the response iu of the completed request.
  1010. * @dev: This parameter specifies the linux device struct.
  1011. *
  1012. * none.
  1013. */
  1014. static void isci_request_process_response_iu(
  1015. struct sas_task *task,
  1016. struct ssp_response_iu *resp_iu,
  1017. struct device *dev)
  1018. {
  1019. dev_dbg(dev,
  1020. "%s: resp_iu = %p "
  1021. "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
  1022. "resp_iu->response_data_len = %x, "
  1023. "resp_iu->sense_data_len = %x\nrepsonse data: ",
  1024. __func__,
  1025. resp_iu,
  1026. resp_iu->status,
  1027. resp_iu->datapres,
  1028. resp_iu->response_data_len,
  1029. resp_iu->sense_data_len);
  1030. task->task_status.stat = resp_iu->status;
  1031. /* libsas updates the task status fields based on the response iu. */
  1032. sas_ssp_task_response(dev, task, resp_iu);
  1033. }
  1034. /**
  1035. * isci_request_set_open_reject_status() - This function prepares the I/O
  1036. * completion for OPEN_REJECT conditions.
  1037. * @request: This parameter is the completed isci_request object.
  1038. * @response_ptr: This parameter specifies the service response for the I/O.
  1039. * @status_ptr: This parameter specifies the exec status for the I/O.
  1040. * @complete_to_host_ptr: This parameter specifies the action to be taken by
  1041. * the LLDD with respect to completing this request or forcing an abort
  1042. * condition on the I/O.
  1043. * @open_rej_reason: This parameter specifies the encoded reason for the
  1044. * abandon-class reject.
  1045. *
  1046. * none.
  1047. */
  1048. static void isci_request_set_open_reject_status(
  1049. struct isci_request *request,
  1050. struct sas_task *task,
  1051. enum service_response *response_ptr,
  1052. enum exec_status *status_ptr,
  1053. enum isci_completion_selection *complete_to_host_ptr,
  1054. enum sas_open_rej_reason open_rej_reason)
  1055. {
  1056. /* Task in the target is done. */
  1057. request->complete_in_target = true;
  1058. *response_ptr = SAS_TASK_UNDELIVERED;
  1059. *status_ptr = SAS_OPEN_REJECT;
  1060. *complete_to_host_ptr = isci_perform_normal_io_completion;
  1061. task->task_status.open_rej_reason = open_rej_reason;
  1062. }
  1063. /**
  1064. * isci_request_handle_controller_specific_errors() - This function decodes
  1065. * controller-specific I/O completion error conditions.
  1066. * @request: This parameter is the completed isci_request object.
  1067. * @response_ptr: This parameter specifies the service response for the I/O.
  1068. * @status_ptr: This parameter specifies the exec status for the I/O.
  1069. * @complete_to_host_ptr: This parameter specifies the action to be taken by
  1070. * the LLDD with respect to completing this request or forcing an abort
  1071. * condition on the I/O.
  1072. *
  1073. * none.
  1074. */
  1075. static void isci_request_handle_controller_specific_errors(
  1076. struct isci_remote_device *isci_device,
  1077. struct isci_request *request,
  1078. struct sas_task *task,
  1079. enum service_response *response_ptr,
  1080. enum exec_status *status_ptr,
  1081. enum isci_completion_selection *complete_to_host_ptr)
  1082. {
  1083. unsigned int cstatus;
  1084. cstatus = request->sci.scu_status;
  1085. dev_dbg(&request->isci_host->pdev->dev,
  1086. "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
  1087. "- controller status = 0x%x\n",
  1088. __func__, request, cstatus);
  1089. /* Decode the controller-specific errors; most
  1090. * important is to recognize those conditions in which
  1091. * the target may still have a task outstanding that
  1092. * must be aborted.
  1093. *
  1094. * Note that there are SCU completion codes being
  1095. * named in the decode below for which SCIC has already
  1096. * done work to handle them in a way other than as
  1097. * a controller-specific completion code; these are left
  1098. * in the decode below for completeness sake.
  1099. */
  1100. switch (cstatus) {
  1101. case SCU_TASK_DONE_DMASETUP_DIRERR:
  1102. /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
  1103. case SCU_TASK_DONE_XFERCNT_ERR:
  1104. /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
  1105. if (task->task_proto == SAS_PROTOCOL_SMP) {
  1106. /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
  1107. *response_ptr = SAS_TASK_COMPLETE;
  1108. /* See if the device has been/is being stopped. Note
  1109. * that we ignore the quiesce state, since we are
  1110. * concerned about the actual device state.
  1111. */
  1112. if ((isci_device->status == isci_stopping) ||
  1113. (isci_device->status == isci_stopped))
  1114. *status_ptr = SAS_DEVICE_UNKNOWN;
  1115. else
  1116. *status_ptr = SAS_ABORTED_TASK;
  1117. request->complete_in_target = true;
  1118. *complete_to_host_ptr =
  1119. isci_perform_normal_io_completion;
  1120. } else {
  1121. /* Task in the target is not done. */
  1122. *response_ptr = SAS_TASK_UNDELIVERED;
  1123. if ((isci_device->status == isci_stopping) ||
  1124. (isci_device->status == isci_stopped))
  1125. *status_ptr = SAS_DEVICE_UNKNOWN;
  1126. else
  1127. *status_ptr = SAM_STAT_TASK_ABORTED;
  1128. request->complete_in_target = false;
  1129. *complete_to_host_ptr =
  1130. isci_perform_error_io_completion;
  1131. }
  1132. break;
  1133. case SCU_TASK_DONE_CRC_ERR:
  1134. case SCU_TASK_DONE_NAK_CMD_ERR:
  1135. case SCU_TASK_DONE_EXCESS_DATA:
  1136. case SCU_TASK_DONE_UNEXP_FIS:
  1137. /* Also SCU_TASK_DONE_UNEXP_RESP: */
  1138. case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
  1139. case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
  1140. case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
  1141. /* These are conditions in which the target
  1142. * has completed the task, so that no cleanup
  1143. * is necessary.
  1144. */
  1145. *response_ptr = SAS_TASK_COMPLETE;
  1146. /* See if the device has been/is being stopped. Note
  1147. * that we ignore the quiesce state, since we are
  1148. * concerned about the actual device state.
  1149. */
  1150. if ((isci_device->status == isci_stopping) ||
  1151. (isci_device->status == isci_stopped))
  1152. *status_ptr = SAS_DEVICE_UNKNOWN;
  1153. else
  1154. *status_ptr = SAS_ABORTED_TASK;
  1155. request->complete_in_target = true;
  1156. *complete_to_host_ptr = isci_perform_normal_io_completion;
  1157. break;
  1158. /* Note that the only open reject completion codes seen here will be
  1159. * abandon-class codes; all others are automatically retried in the SCU.
  1160. */
  1161. case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
  1162. isci_request_set_open_reject_status(
  1163. request, task, response_ptr, status_ptr,
  1164. complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
  1165. break;
  1166. case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
  1167. /* Note - the return of AB0 will change when
  1168. * libsas implements detection of zone violations.
  1169. */
  1170. isci_request_set_open_reject_status(
  1171. request, task, response_ptr, status_ptr,
  1172. complete_to_host_ptr, SAS_OREJ_RESV_AB0);
  1173. break;
  1174. case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
  1175. isci_request_set_open_reject_status(
  1176. request, task, response_ptr, status_ptr,
  1177. complete_to_host_ptr, SAS_OREJ_RESV_AB1);
  1178. break;
  1179. case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
  1180. isci_request_set_open_reject_status(
  1181. request, task, response_ptr, status_ptr,
  1182. complete_to_host_ptr, SAS_OREJ_RESV_AB2);
  1183. break;
  1184. case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
  1185. isci_request_set_open_reject_status(
  1186. request, task, response_ptr, status_ptr,
  1187. complete_to_host_ptr, SAS_OREJ_RESV_AB3);
  1188. break;
  1189. case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
  1190. isci_request_set_open_reject_status(
  1191. request, task, response_ptr, status_ptr,
  1192. complete_to_host_ptr, SAS_OREJ_BAD_DEST);
  1193. break;
  1194. case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
  1195. isci_request_set_open_reject_status(
  1196. request, task, response_ptr, status_ptr,
  1197. complete_to_host_ptr, SAS_OREJ_STP_NORES);
  1198. break;
  1199. case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
  1200. isci_request_set_open_reject_status(
  1201. request, task, response_ptr, status_ptr,
  1202. complete_to_host_ptr, SAS_OREJ_EPROTO);
  1203. break;
  1204. case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
  1205. isci_request_set_open_reject_status(
  1206. request, task, response_ptr, status_ptr,
  1207. complete_to_host_ptr, SAS_OREJ_CONN_RATE);
  1208. break;
  1209. case SCU_TASK_DONE_LL_R_ERR:
  1210. /* Also SCU_TASK_DONE_ACK_NAK_TO: */
  1211. case SCU_TASK_DONE_LL_PERR:
  1212. case SCU_TASK_DONE_LL_SY_TERM:
  1213. /* Also SCU_TASK_DONE_NAK_ERR:*/
  1214. case SCU_TASK_DONE_LL_LF_TERM:
  1215. /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
  1216. case SCU_TASK_DONE_LL_ABORT_ERR:
  1217. case SCU_TASK_DONE_SEQ_INV_TYPE:
  1218. /* Also SCU_TASK_DONE_UNEXP_XR: */
  1219. case SCU_TASK_DONE_XR_IU_LEN_ERR:
  1220. case SCU_TASK_DONE_INV_FIS_LEN:
  1221. /* Also SCU_TASK_DONE_XR_WD_LEN: */
  1222. case SCU_TASK_DONE_SDMA_ERR:
  1223. case SCU_TASK_DONE_OFFSET_ERR:
  1224. case SCU_TASK_DONE_MAX_PLD_ERR:
  1225. case SCU_TASK_DONE_LF_ERR:
  1226. case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
  1227. case SCU_TASK_DONE_SMP_LL_RX_ERR:
  1228. case SCU_TASK_DONE_UNEXP_DATA:
  1229. case SCU_TASK_DONE_UNEXP_SDBFIS:
  1230. case SCU_TASK_DONE_REG_ERR:
  1231. case SCU_TASK_DONE_SDB_ERR:
  1232. case SCU_TASK_DONE_TASK_ABORT:
  1233. default:
  1234. /* Task in the target is not done. */
  1235. *response_ptr = SAS_TASK_UNDELIVERED;
  1236. *status_ptr = SAM_STAT_TASK_ABORTED;
  1237. request->complete_in_target = false;
  1238. *complete_to_host_ptr = isci_perform_error_io_completion;
  1239. break;
  1240. }
  1241. }
  1242. /**
  1243. * isci_task_save_for_upper_layer_completion() - This function saves the
  1244. * request for later completion to the upper layer driver.
  1245. * @host: This parameter is a pointer to the host on which the the request
  1246. * should be queued (either as an error or success).
  1247. * @request: This parameter is the completed request.
  1248. * @response: This parameter is the response code for the completed task.
  1249. * @status: This parameter is the status code for the completed task.
  1250. *
  1251. * none.
  1252. */
  1253. static void isci_task_save_for_upper_layer_completion(
  1254. struct isci_host *host,
  1255. struct isci_request *request,
  1256. enum service_response response,
  1257. enum exec_status status,
  1258. enum isci_completion_selection task_notification_selection)
  1259. {
  1260. struct sas_task *task = isci_request_access_task(request);
  1261. task_notification_selection
  1262. = isci_task_set_completion_status(task, response, status,
  1263. task_notification_selection);
  1264. /* Tasks aborted specifically by a call to the lldd_abort_task
  1265. * function should not be completed to the host in the regular path.
  1266. */
  1267. switch (task_notification_selection) {
  1268. case isci_perform_normal_io_completion:
  1269. /* Normal notification (task_done) */
  1270. dev_dbg(&host->pdev->dev,
  1271. "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
  1272. __func__,
  1273. task,
  1274. task->task_status.resp, response,
  1275. task->task_status.stat, status);
  1276. /* Add to the completed list. */
  1277. list_add(&request->completed_node,
  1278. &host->requests_to_complete);
  1279. /* Take the request off the device's pending request list. */
  1280. list_del_init(&request->dev_node);
  1281. break;
  1282. case isci_perform_aborted_io_completion:
  1283. /* No notification to libsas because this request is
  1284. * already in the abort path.
  1285. */
  1286. dev_warn(&host->pdev->dev,
  1287. "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
  1288. __func__,
  1289. task,
  1290. task->task_status.resp, response,
  1291. task->task_status.stat, status);
  1292. /* Wake up whatever process was waiting for this
  1293. * request to complete.
  1294. */
  1295. WARN_ON(request->io_request_completion == NULL);
  1296. if (request->io_request_completion != NULL) {
  1297. /* Signal whoever is waiting that this
  1298. * request is complete.
  1299. */
  1300. complete(request->io_request_completion);
  1301. }
  1302. break;
  1303. case isci_perform_error_io_completion:
  1304. /* Use sas_task_abort */
  1305. dev_warn(&host->pdev->dev,
  1306. "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
  1307. __func__,
  1308. task,
  1309. task->task_status.resp, response,
  1310. task->task_status.stat, status);
  1311. /* Add to the aborted list. */
  1312. list_add(&request->completed_node,
  1313. &host->requests_to_errorback);
  1314. break;
  1315. default:
  1316. dev_warn(&host->pdev->dev,
  1317. "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
  1318. __func__,
  1319. task,
  1320. task->task_status.resp, response,
  1321. task->task_status.stat, status);
  1322. /* Add to the error to libsas list. */
  1323. list_add(&request->completed_node,
  1324. &host->requests_to_errorback);
  1325. break;
  1326. }
  1327. }
  1328. static void isci_request_io_request_complete(struct isci_host *isci_host,
  1329. struct isci_request *request,
  1330. enum sci_io_status completion_status)
  1331. {
  1332. struct sas_task *task = isci_request_access_task(request);
  1333. struct ssp_response_iu *resp_iu;
  1334. void *resp_buf;
  1335. unsigned long task_flags;
  1336. struct isci_remote_device *isci_device = request->isci_device;
  1337. enum service_response response = SAS_TASK_UNDELIVERED;
  1338. enum exec_status status = SAS_ABORTED_TASK;
  1339. enum isci_request_status request_status;
  1340. enum isci_completion_selection complete_to_host
  1341. = isci_perform_normal_io_completion;
  1342. dev_dbg(&isci_host->pdev->dev,
  1343. "%s: request = %p, task = %p,\n"
  1344. "task->data_dir = %d completion_status = 0x%x\n",
  1345. __func__,
  1346. request,
  1347. task,
  1348. task->data_dir,
  1349. completion_status);
  1350. spin_lock(&request->state_lock);
  1351. request_status = isci_request_get_state(request);
  1352. /* Decode the request status. Note that if the request has been
  1353. * aborted by a task management function, we don't care
  1354. * what the status is.
  1355. */
  1356. switch (request_status) {
  1357. case aborted:
  1358. /* "aborted" indicates that the request was aborted by a task
  1359. * management function, since once a task management request is
  1360. * perfomed by the device, the request only completes because
  1361. * of the subsequent driver terminate.
  1362. *
  1363. * Aborted also means an external thread is explicitly managing
  1364. * this request, so that we do not complete it up the stack.
  1365. *
  1366. * The target is still there (since the TMF was successful).
  1367. */
  1368. request->complete_in_target = true;
  1369. response = SAS_TASK_COMPLETE;
  1370. /* See if the device has been/is being stopped. Note
  1371. * that we ignore the quiesce state, since we are
  1372. * concerned about the actual device state.
  1373. */
  1374. if ((isci_device->status == isci_stopping)
  1375. || (isci_device->status == isci_stopped)
  1376. )
  1377. status = SAS_DEVICE_UNKNOWN;
  1378. else
  1379. status = SAS_ABORTED_TASK;
  1380. complete_to_host = isci_perform_aborted_io_completion;
  1381. /* This was an aborted request. */
  1382. spin_unlock(&request->state_lock);
  1383. break;
  1384. case aborting:
  1385. /* aborting means that the task management function tried and
  1386. * failed to abort the request. We need to note the request
  1387. * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
  1388. * target as down.
  1389. *
  1390. * Aborting also means an external thread is explicitly managing
  1391. * this request, so that we do not complete it up the stack.
  1392. */
  1393. request->complete_in_target = true;
  1394. response = SAS_TASK_UNDELIVERED;
  1395. if ((isci_device->status == isci_stopping) ||
  1396. (isci_device->status == isci_stopped))
  1397. /* The device has been /is being stopped. Note that
  1398. * we ignore the quiesce state, since we are
  1399. * concerned about the actual device state.
  1400. */
  1401. status = SAS_DEVICE_UNKNOWN;
  1402. else
  1403. status = SAS_PHY_DOWN;
  1404. complete_to_host = isci_perform_aborted_io_completion;
  1405. /* This was an aborted request. */
  1406. spin_unlock(&request->state_lock);
  1407. break;
  1408. case terminating:
  1409. /* This was an terminated request. This happens when
  1410. * the I/O is being terminated because of an action on
  1411. * the device (reset, tear down, etc.), and the I/O needs
  1412. * to be completed up the stack.
  1413. */
  1414. request->complete_in_target = true;
  1415. response = SAS_TASK_UNDELIVERED;
  1416. /* See if the device has been/is being stopped. Note
  1417. * that we ignore the quiesce state, since we are
  1418. * concerned about the actual device state.
  1419. */
  1420. if ((isci_device->status == isci_stopping) ||
  1421. (isci_device->status == isci_stopped))
  1422. status = SAS_DEVICE_UNKNOWN;
  1423. else
  1424. status = SAS_ABORTED_TASK;
  1425. complete_to_host = isci_perform_aborted_io_completion;
  1426. /* This was a terminated request. */
  1427. spin_unlock(&request->state_lock);
  1428. break;
  1429. default:
  1430. /* The request is done from an SCU HW perspective. */
  1431. request->status = completed;
  1432. spin_unlock(&request->state_lock);
  1433. /* This is an active request being completed from the core. */
  1434. switch (completion_status) {
  1435. case SCI_IO_FAILURE_RESPONSE_VALID:
  1436. dev_dbg(&isci_host->pdev->dev,
  1437. "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
  1438. __func__,
  1439. request,
  1440. task);
  1441. if (sas_protocol_ata(task->task_proto)) {
  1442. resp_buf = &request->sci.stp.rsp;
  1443. isci_request_process_stp_response(task,
  1444. resp_buf);
  1445. } else if (SAS_PROTOCOL_SSP == task->task_proto) {
  1446. /* crack the iu response buffer. */
  1447. resp_iu = &request->sci.ssp.rsp;
  1448. isci_request_process_response_iu(task, resp_iu,
  1449. &isci_host->pdev->dev);
  1450. } else if (SAS_PROTOCOL_SMP == task->task_proto) {
  1451. dev_err(&isci_host->pdev->dev,
  1452. "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
  1453. "SAS_PROTOCOL_SMP protocol\n",
  1454. __func__);
  1455. } else
  1456. dev_err(&isci_host->pdev->dev,
  1457. "%s: unknown protocol\n", __func__);
  1458. /* use the task status set in the task struct by the
  1459. * isci_request_process_response_iu call.
  1460. */
  1461. request->complete_in_target = true;
  1462. response = task->task_status.resp;
  1463. status = task->task_status.stat;
  1464. break;
  1465. case SCI_IO_SUCCESS:
  1466. case SCI_IO_SUCCESS_IO_DONE_EARLY:
  1467. response = SAS_TASK_COMPLETE;
  1468. status = SAM_STAT_GOOD;
  1469. request->complete_in_target = true;
  1470. if (task->task_proto == SAS_PROTOCOL_SMP) {
  1471. void *rsp = &request->sci.smp.rsp;
  1472. dev_dbg(&isci_host->pdev->dev,
  1473. "%s: SMP protocol completion\n",
  1474. __func__);
  1475. sg_copy_from_buffer(
  1476. &task->smp_task.smp_resp, 1,
  1477. rsp, sizeof(struct smp_resp));
  1478. } else if (completion_status
  1479. == SCI_IO_SUCCESS_IO_DONE_EARLY) {
  1480. /* This was an SSP / STP / SATA transfer.
  1481. * There is a possibility that less data than
  1482. * the maximum was transferred.
  1483. */
  1484. u32 transferred_length = sci_req_tx_bytes(&request->sci);
  1485. task->task_status.residual
  1486. = task->total_xfer_len - transferred_length;
  1487. /* If there were residual bytes, call this an
  1488. * underrun.
  1489. */
  1490. if (task->task_status.residual != 0)
  1491. status = SAS_DATA_UNDERRUN;
  1492. dev_dbg(&isci_host->pdev->dev,
  1493. "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
  1494. __func__,
  1495. status);
  1496. } else
  1497. dev_dbg(&isci_host->pdev->dev,
  1498. "%s: SCI_IO_SUCCESS\n",
  1499. __func__);
  1500. break;
  1501. case SCI_IO_FAILURE_TERMINATED:
  1502. dev_dbg(&isci_host->pdev->dev,
  1503. "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
  1504. __func__,
  1505. request,
  1506. task);
  1507. /* The request was terminated explicitly. No handling
  1508. * is needed in the SCSI error handler path.
  1509. */
  1510. request->complete_in_target = true;
  1511. response = SAS_TASK_UNDELIVERED;
  1512. /* See if the device has been/is being stopped. Note
  1513. * that we ignore the quiesce state, since we are
  1514. * concerned about the actual device state.
  1515. */
  1516. if ((isci_device->status == isci_stopping) ||
  1517. (isci_device->status == isci_stopped))
  1518. status = SAS_DEVICE_UNKNOWN;
  1519. else
  1520. status = SAS_ABORTED_TASK;
  1521. complete_to_host = isci_perform_normal_io_completion;
  1522. break;
  1523. case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
  1524. isci_request_handle_controller_specific_errors(
  1525. isci_device, request, task, &response, &status,
  1526. &complete_to_host);
  1527. break;
  1528. case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
  1529. /* This is a special case, in that the I/O completion
  1530. * is telling us that the device needs a reset.
  1531. * In order for the device reset condition to be
  1532. * noticed, the I/O has to be handled in the error
  1533. * handler. Set the reset flag and cause the
  1534. * SCSI error thread to be scheduled.
  1535. */
  1536. spin_lock_irqsave(&task->task_state_lock, task_flags);
  1537. task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
  1538. spin_unlock_irqrestore(&task->task_state_lock, task_flags);
  1539. /* Fail the I/O. */
  1540. response = SAS_TASK_UNDELIVERED;
  1541. status = SAM_STAT_TASK_ABORTED;
  1542. complete_to_host = isci_perform_error_io_completion;
  1543. request->complete_in_target = false;
  1544. break;
  1545. default:
  1546. /* Catch any otherwise unhandled error codes here. */
  1547. dev_warn(&isci_host->pdev->dev,
  1548. "%s: invalid completion code: 0x%x - "
  1549. "isci_request = %p\n",
  1550. __func__, completion_status, request);
  1551. response = SAS_TASK_UNDELIVERED;
  1552. /* See if the device has been/is being stopped. Note
  1553. * that we ignore the quiesce state, since we are
  1554. * concerned about the actual device state.
  1555. */
  1556. if ((isci_device->status == isci_stopping) ||
  1557. (isci_device->status == isci_stopped))
  1558. status = SAS_DEVICE_UNKNOWN;
  1559. else
  1560. status = SAS_ABORTED_TASK;
  1561. complete_to_host = isci_perform_error_io_completion;
  1562. request->complete_in_target = false;
  1563. break;
  1564. }
  1565. break;
  1566. }
  1567. isci_request_unmap_sgl(request, isci_host->pdev);
  1568. /* Put the completed request on the correct list */
  1569. isci_task_save_for_upper_layer_completion(isci_host, request, response,
  1570. status, complete_to_host
  1571. );
  1572. /* complete the io request to the core. */
  1573. scic_controller_complete_io(&isci_host->sci,
  1574. &isci_device->sci,
  1575. &request->sci);
  1576. /* set terminated handle so it cannot be completed or
  1577. * terminated again, and to cause any calls into abort
  1578. * task to recognize the already completed case.
  1579. */
  1580. request->terminated = true;
  1581. isci_host_can_dequeue(isci_host, 1);
  1582. }
  1583. /**
  1584. * scic_sds_request_initial_state_enter() -
  1585. * @object: This parameter specifies the base object for which the state
  1586. * transition is occurring.
  1587. *
  1588. * This method implements the actions taken when entering the
  1589. * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
  1590. * base request is constructed. Entry into the initial state sets all handlers
  1591. * for the io request object to their default handlers. none
  1592. */
  1593. static void scic_sds_request_initial_state_enter(void *object)
  1594. {
  1595. struct scic_sds_request *sci_req = object;
  1596. SET_STATE_HANDLER(
  1597. sci_req,
  1598. scic_sds_request_state_handler_table,
  1599. SCI_BASE_REQUEST_STATE_INITIAL
  1600. );
  1601. }
  1602. /**
  1603. * scic_sds_request_constructed_state_enter() -
  1604. * @object: The io request object that is to enter the constructed state.
  1605. *
  1606. * This method implements the actions taken when entering the
  1607. * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
  1608. * for the the constructed state. none
  1609. */
  1610. static void scic_sds_request_constructed_state_enter(void *object)
  1611. {
  1612. struct scic_sds_request *sci_req = object;
  1613. SET_STATE_HANDLER(
  1614. sci_req,
  1615. scic_sds_request_state_handler_table,
  1616. SCI_BASE_REQUEST_STATE_CONSTRUCTED
  1617. );
  1618. }
  1619. /**
  1620. * scic_sds_request_started_state_enter() -
  1621. * @object: This parameter specifies the base object for which the state
  1622. * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
  1623. *
  1624. * This method implements the actions taken when entering the
  1625. * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
  1626. * SCSI Task request we must enter the started substate machine. none
  1627. */
  1628. static void scic_sds_request_started_state_enter(void *object)
  1629. {
  1630. struct scic_sds_request *sci_req = object;
  1631. SET_STATE_HANDLER(
  1632. sci_req,
  1633. scic_sds_request_state_handler_table,
  1634. SCI_BASE_REQUEST_STATE_STARTED
  1635. );
  1636. /*
  1637. * Most of the request state machines have a started substate machine so
  1638. * start its execution on the entry to the started state. */
  1639. if (sci_req->has_started_substate_machine == true)
  1640. sci_base_state_machine_start(&sci_req->started_substate_machine);
  1641. }
  1642. /**
  1643. * scic_sds_request_started_state_exit() -
  1644. * @object: This parameter specifies the base object for which the state
  1645. * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
  1646. * object.
  1647. *
  1648. * This method implements the actions taken when exiting the
  1649. * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
  1650. * to stop the started substate machine. none
  1651. */
  1652. static void scic_sds_request_started_state_exit(void *object)
  1653. {
  1654. struct scic_sds_request *sci_req = object;
  1655. if (sci_req->has_started_substate_machine == true)
  1656. sci_base_state_machine_stop(&sci_req->started_substate_machine);
  1657. }
  1658. /**
  1659. * scic_sds_request_completed_state_enter() -
  1660. * @object: This parameter specifies the base object for which the state
  1661. * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
  1662. * object.
  1663. *
  1664. * This method implements the actions taken when entering the
  1665. * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
  1666. * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
  1667. * completion status and convert it to an enum sci_status to return in the
  1668. * completion callback function. none
  1669. */
  1670. static void scic_sds_request_completed_state_enter(void *object)
  1671. {
  1672. struct scic_sds_request *sci_req = object;
  1673. struct scic_sds_controller *scic =
  1674. scic_sds_request_get_controller(sci_req);
  1675. struct isci_host *ihost = scic_to_ihost(scic);
  1676. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  1677. SET_STATE_HANDLER(sci_req,
  1678. scic_sds_request_state_handler_table,
  1679. SCI_BASE_REQUEST_STATE_COMPLETED);
  1680. /* Tell the SCI_USER that the IO request is complete */
  1681. if (sci_req->is_task_management_request == false)
  1682. isci_request_io_request_complete(ihost, ireq,
  1683. sci_req->sci_status);
  1684. else
  1685. isci_task_request_complete(ihost, ireq, sci_req->sci_status);
  1686. }
  1687. /**
  1688. * scic_sds_request_aborting_state_enter() -
  1689. * @object: This parameter specifies the base object for which the state
  1690. * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
  1691. * object.
  1692. *
  1693. * This method implements the actions taken when entering the
  1694. * SCI_BASE_REQUEST_STATE_ABORTING state. none
  1695. */
  1696. static void scic_sds_request_aborting_state_enter(void *object)
  1697. {
  1698. struct scic_sds_request *sci_req = object;
  1699. /* Setting the abort bit in the Task Context is required by the silicon. */
  1700. sci_req->task_context_buffer->abort = 1;
  1701. SET_STATE_HANDLER(
  1702. sci_req,
  1703. scic_sds_request_state_handler_table,
  1704. SCI_BASE_REQUEST_STATE_ABORTING
  1705. );
  1706. }
  1707. /**
  1708. * scic_sds_request_final_state_enter() -
  1709. * @object: This parameter specifies the base object for which the state
  1710. * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
  1711. *
  1712. * This method implements the actions taken when entering the
  1713. * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
  1714. * state handlers in place. none
  1715. */
  1716. static void scic_sds_request_final_state_enter(void *object)
  1717. {
  1718. struct scic_sds_request *sci_req = object;
  1719. SET_STATE_HANDLER(
  1720. sci_req,
  1721. scic_sds_request_state_handler_table,
  1722. SCI_BASE_REQUEST_STATE_FINAL
  1723. );
  1724. }
  1725. static const struct sci_base_state scic_sds_request_state_table[] = {
  1726. [SCI_BASE_REQUEST_STATE_INITIAL] = {
  1727. .enter_state = scic_sds_request_initial_state_enter,
  1728. },
  1729. [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
  1730. .enter_state = scic_sds_request_constructed_state_enter,
  1731. },
  1732. [SCI_BASE_REQUEST_STATE_STARTED] = {
  1733. .enter_state = scic_sds_request_started_state_enter,
  1734. .exit_state = scic_sds_request_started_state_exit
  1735. },
  1736. [SCI_BASE_REQUEST_STATE_COMPLETED] = {
  1737. .enter_state = scic_sds_request_completed_state_enter,
  1738. },
  1739. [SCI_BASE_REQUEST_STATE_ABORTING] = {
  1740. .enter_state = scic_sds_request_aborting_state_enter,
  1741. },
  1742. [SCI_BASE_REQUEST_STATE_FINAL] = {
  1743. .enter_state = scic_sds_request_final_state_enter,
  1744. },
  1745. };
  1746. static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
  1747. struct scic_sds_remote_device *sci_dev,
  1748. u16 io_tag, struct scic_sds_request *sci_req)
  1749. {
  1750. sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
  1751. scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
  1752. sci_base_state_machine_start(&sci_req->state_machine);
  1753. sci_req->io_tag = io_tag;
  1754. sci_req->owning_controller = scic;
  1755. sci_req->target_device = sci_dev;
  1756. sci_req->has_started_substate_machine = false;
  1757. sci_req->protocol = SCIC_NO_PROTOCOL;
  1758. sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
  1759. sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
  1760. sci_req->sci_status = SCI_SUCCESS;
  1761. sci_req->scu_status = 0;
  1762. sci_req->post_context = 0xFFFFFFFF;
  1763. sci_req->is_task_management_request = false;
  1764. if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
  1765. sci_req->was_tag_assigned_by_user = false;
  1766. sci_req->task_context_buffer = NULL;
  1767. } else {
  1768. sci_req->was_tag_assigned_by_user = true;
  1769. sci_req->task_context_buffer =
  1770. scic_sds_controller_get_task_context_buffer(scic, io_tag);
  1771. }
  1772. }
  1773. static enum sci_status
  1774. scic_io_request_construct(struct scic_sds_controller *scic,
  1775. struct scic_sds_remote_device *sci_dev,
  1776. u16 io_tag, struct scic_sds_request *sci_req)
  1777. {
  1778. struct domain_device *dev = sci_dev_to_domain(sci_dev);
  1779. enum sci_status status = SCI_SUCCESS;
  1780. /* Build the common part of the request */
  1781. scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
  1782. if (sci_dev->rnc.remote_node_index ==
  1783. SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
  1784. return SCI_FAILURE_INVALID_REMOTE_DEVICE;
  1785. if (dev->dev_type == SAS_END_DEV)
  1786. scic_sds_ssp_io_request_assign_buffers(sci_req);
  1787. else if ((dev->dev_type == SATA_DEV) ||
  1788. (dev->tproto & SAS_PROTOCOL_STP)) {
  1789. scic_sds_stp_request_assign_buffers(sci_req);
  1790. memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
  1791. } else if (dev_is_expander(dev)) {
  1792. scic_sds_smp_request_assign_buffers(sci_req);
  1793. memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
  1794. } else
  1795. status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
  1796. if (status == SCI_SUCCESS) {
  1797. memset(sci_req->task_context_buffer, 0,
  1798. offsetof(struct scu_task_context, sgl_pair_ab));
  1799. }
  1800. return status;
  1801. }
  1802. enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
  1803. struct scic_sds_remote_device *sci_dev,
  1804. u16 io_tag, struct scic_sds_request *sci_req)
  1805. {
  1806. struct domain_device *dev = sci_dev_to_domain(sci_dev);
  1807. enum sci_status status = SCI_SUCCESS;
  1808. /* Build the common part of the request */
  1809. scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
  1810. if (dev->dev_type == SAS_END_DEV) {
  1811. scic_sds_ssp_task_request_assign_buffers(sci_req);
  1812. sci_req->has_started_substate_machine = true;
  1813. /* Construct the started sub-state machine. */
  1814. sci_base_state_machine_construct(
  1815. &sci_req->started_substate_machine,
  1816. sci_req,
  1817. scic_sds_io_request_started_task_mgmt_substate_table,
  1818. SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
  1819. );
  1820. } else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
  1821. scic_sds_stp_request_assign_buffers(sci_req);
  1822. else
  1823. status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
  1824. if (status == SCI_SUCCESS) {
  1825. sci_req->is_task_management_request = true;
  1826. memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
  1827. }
  1828. return status;
  1829. }
  1830. static enum sci_status isci_request_ssp_request_construct(
  1831. struct isci_request *request)
  1832. {
  1833. enum sci_status status;
  1834. dev_dbg(&request->isci_host->pdev->dev,
  1835. "%s: request = %p\n",
  1836. __func__,
  1837. request);
  1838. status = scic_io_request_construct_basic_ssp(&request->sci);
  1839. return status;
  1840. }
  1841. static enum sci_status isci_request_stp_request_construct(
  1842. struct isci_request *request)
  1843. {
  1844. struct sas_task *task = isci_request_access_task(request);
  1845. enum sci_status status;
  1846. struct host_to_dev_fis *register_fis;
  1847. dev_dbg(&request->isci_host->pdev->dev,
  1848. "%s: request = %p\n",
  1849. __func__,
  1850. request);
  1851. /* Get the host_to_dev_fis from the core and copy
  1852. * the fis from the task into it.
  1853. */
  1854. register_fis = isci_sata_task_to_fis_copy(task);
  1855. status = scic_io_request_construct_basic_sata(&request->sci);
  1856. /* Set the ncq tag in the fis, from the queue
  1857. * command in the task.
  1858. */
  1859. if (isci_sata_is_task_ncq(task)) {
  1860. isci_sata_set_ncq_tag(
  1861. register_fis,
  1862. task
  1863. );
  1864. }
  1865. return status;
  1866. }
  1867. /*
  1868. * isci_smp_request_build() - This function builds the smp request.
  1869. * @ireq: This parameter points to the isci_request allocated in the
  1870. * request construct function.
  1871. *
  1872. * SCI_SUCCESS on successfull completion, or specific failure code.
  1873. */
  1874. static enum sci_status isci_smp_request_build(struct isci_request *ireq)
  1875. {
  1876. enum sci_status status = SCI_FAILURE;
  1877. struct sas_task *task = isci_request_access_task(ireq);
  1878. struct scic_sds_request *sci_req = &ireq->sci;
  1879. dev_dbg(&ireq->isci_host->pdev->dev,
  1880. "%s: request = %p\n", __func__, ireq);
  1881. dev_dbg(&ireq->isci_host->pdev->dev,
  1882. "%s: smp_req len = %d\n",
  1883. __func__,
  1884. task->smp_task.smp_req.length);
  1885. /* copy the smp_command to the address; */
  1886. sg_copy_to_buffer(&task->smp_task.smp_req, 1,
  1887. &sci_req->smp.cmd,
  1888. sizeof(struct smp_req));
  1889. status = scic_io_request_construct_smp(sci_req);
  1890. if (status != SCI_SUCCESS)
  1891. dev_warn(&ireq->isci_host->pdev->dev,
  1892. "%s: failed with status = %d\n",
  1893. __func__,
  1894. status);
  1895. return status;
  1896. }
  1897. /**
  1898. * isci_io_request_build() - This function builds the io request object.
  1899. * @isci_host: This parameter specifies the ISCI host object
  1900. * @request: This parameter points to the isci_request object allocated in the
  1901. * request construct function.
  1902. * @sci_device: This parameter is the handle for the sci core's remote device
  1903. * object that is the destination for this request.
  1904. *
  1905. * SCI_SUCCESS on successfull completion, or specific failure code.
  1906. */
  1907. static enum sci_status isci_io_request_build(
  1908. struct isci_host *isci_host,
  1909. struct isci_request *request,
  1910. struct isci_remote_device *isci_device)
  1911. {
  1912. enum sci_status status = SCI_SUCCESS;
  1913. struct sas_task *task = isci_request_access_task(request);
  1914. struct scic_sds_remote_device *sci_device = &isci_device->sci;
  1915. dev_dbg(&isci_host->pdev->dev,
  1916. "%s: isci_device = 0x%p; request = %p, "
  1917. "num_scatter = %d\n",
  1918. __func__,
  1919. isci_device,
  1920. request,
  1921. task->num_scatter);
  1922. /* map the sgl addresses, if present.
  1923. * libata does the mapping for sata devices
  1924. * before we get the request.
  1925. */
  1926. if (task->num_scatter &&
  1927. !sas_protocol_ata(task->task_proto) &&
  1928. !(SAS_PROTOCOL_SMP & task->task_proto)) {
  1929. request->num_sg_entries = dma_map_sg(
  1930. &isci_host->pdev->dev,
  1931. task->scatter,
  1932. task->num_scatter,
  1933. task->data_dir
  1934. );
  1935. if (request->num_sg_entries == 0)
  1936. return SCI_FAILURE_INSUFFICIENT_RESOURCES;
  1937. }
  1938. /* build the common request object. For now,
  1939. * we will let the core allocate the IO tag.
  1940. */
  1941. status = scic_io_request_construct(&isci_host->sci, sci_device,
  1942. SCI_CONTROLLER_INVALID_IO_TAG,
  1943. &request->sci);
  1944. if (status != SCI_SUCCESS) {
  1945. dev_warn(&isci_host->pdev->dev,
  1946. "%s: failed request construct\n",
  1947. __func__);
  1948. return SCI_FAILURE;
  1949. }
  1950. switch (task->task_proto) {
  1951. case SAS_PROTOCOL_SMP:
  1952. status = isci_smp_request_build(request);
  1953. break;
  1954. case SAS_PROTOCOL_SSP:
  1955. status = isci_request_ssp_request_construct(request);
  1956. break;
  1957. case SAS_PROTOCOL_SATA:
  1958. case SAS_PROTOCOL_STP:
  1959. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1960. status = isci_request_stp_request_construct(request);
  1961. break;
  1962. default:
  1963. dev_warn(&isci_host->pdev->dev,
  1964. "%s: unknown protocol\n", __func__);
  1965. return SCI_FAILURE;
  1966. }
  1967. return SCI_SUCCESS;
  1968. }
  1969. /**
  1970. * isci_request_alloc_core() - This function gets the request object from the
  1971. * isci_host dma cache.
  1972. * @isci_host: This parameter specifies the ISCI host object
  1973. * @isci_request: This parameter will contain the pointer to the new
  1974. * isci_request object.
  1975. * @isci_device: This parameter is the pointer to the isci remote device object
  1976. * that is the destination for this request.
  1977. * @gfp_flags: This parameter specifies the os allocation flags.
  1978. *
  1979. * SCI_SUCCESS on successfull completion, or specific failure code.
  1980. */
  1981. static int isci_request_alloc_core(
  1982. struct isci_host *isci_host,
  1983. struct isci_request **isci_request,
  1984. struct isci_remote_device *isci_device,
  1985. gfp_t gfp_flags)
  1986. {
  1987. int ret = 0;
  1988. dma_addr_t handle;
  1989. struct isci_request *request;
  1990. /* get pointer to dma memory. This actually points
  1991. * to both the isci_remote_device object and the
  1992. * sci object. The isci object is at the beginning
  1993. * of the memory allocated here.
  1994. */
  1995. request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
  1996. if (!request) {
  1997. dev_warn(&isci_host->pdev->dev,
  1998. "%s: dma_pool_alloc returned NULL\n", __func__);
  1999. return -ENOMEM;
  2000. }
  2001. /* initialize the request object. */
  2002. spin_lock_init(&request->state_lock);
  2003. request->request_daddr = handle;
  2004. request->isci_host = isci_host;
  2005. request->isci_device = isci_device;
  2006. request->io_request_completion = NULL;
  2007. request->terminated = false;
  2008. request->num_sg_entries = 0;
  2009. request->complete_in_target = false;
  2010. INIT_LIST_HEAD(&request->completed_node);
  2011. INIT_LIST_HEAD(&request->dev_node);
  2012. *isci_request = request;
  2013. isci_request_change_state(request, allocated);
  2014. return ret;
  2015. }
  2016. static int isci_request_alloc_io(
  2017. struct isci_host *isci_host,
  2018. struct sas_task *task,
  2019. struct isci_request **isci_request,
  2020. struct isci_remote_device *isci_device,
  2021. gfp_t gfp_flags)
  2022. {
  2023. int retval = isci_request_alloc_core(isci_host, isci_request,
  2024. isci_device, gfp_flags);
  2025. if (!retval) {
  2026. (*isci_request)->ttype_ptr.io_task_ptr = task;
  2027. (*isci_request)->ttype = io_task;
  2028. task->lldd_task = *isci_request;
  2029. }
  2030. return retval;
  2031. }
  2032. /**
  2033. * isci_request_alloc_tmf() - This function gets the request object from the
  2034. * isci_host dma cache and initializes the relevant fields as a sas_task.
  2035. * @isci_host: This parameter specifies the ISCI host object
  2036. * @sas_task: This parameter is the task struct from the upper layer driver.
  2037. * @isci_request: This parameter will contain the pointer to the new
  2038. * isci_request object.
  2039. * @isci_device: This parameter is the pointer to the isci remote device object
  2040. * that is the destination for this request.
  2041. * @gfp_flags: This parameter specifies the os allocation flags.
  2042. *
  2043. * SCI_SUCCESS on successfull completion, or specific failure code.
  2044. */
  2045. int isci_request_alloc_tmf(
  2046. struct isci_host *isci_host,
  2047. struct isci_tmf *isci_tmf,
  2048. struct isci_request **isci_request,
  2049. struct isci_remote_device *isci_device,
  2050. gfp_t gfp_flags)
  2051. {
  2052. int retval = isci_request_alloc_core(isci_host, isci_request,
  2053. isci_device, gfp_flags);
  2054. if (!retval) {
  2055. (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
  2056. (*isci_request)->ttype = tmf_task;
  2057. }
  2058. return retval;
  2059. }
  2060. /**
  2061. * isci_request_execute() - This function allocates the isci_request object,
  2062. * all fills in some common fields.
  2063. * @isci_host: This parameter specifies the ISCI host object
  2064. * @sas_task: This parameter is the task struct from the upper layer driver.
  2065. * @isci_request: This parameter will contain the pointer to the new
  2066. * isci_request object.
  2067. * @gfp_flags: This parameter specifies the os allocation flags.
  2068. *
  2069. * SCI_SUCCESS on successfull completion, or specific failure code.
  2070. */
  2071. int isci_request_execute(
  2072. struct isci_host *isci_host,
  2073. struct sas_task *task,
  2074. struct isci_request **isci_request,
  2075. gfp_t gfp_flags)
  2076. {
  2077. int ret = 0;
  2078. struct scic_sds_remote_device *sci_device;
  2079. enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
  2080. struct isci_remote_device *isci_device;
  2081. struct isci_request *request;
  2082. unsigned long flags;
  2083. isci_device = task->dev->lldd_dev;
  2084. sci_device = &isci_device->sci;
  2085. /* do common allocation and init of request object. */
  2086. ret = isci_request_alloc_io(
  2087. isci_host,
  2088. task,
  2089. &request,
  2090. isci_device,
  2091. gfp_flags
  2092. );
  2093. if (ret)
  2094. goto out;
  2095. status = isci_io_request_build(isci_host, request, isci_device);
  2096. if (status != SCI_SUCCESS) {
  2097. dev_warn(&isci_host->pdev->dev,
  2098. "%s: request_construct failed - status = 0x%x\n",
  2099. __func__,
  2100. status);
  2101. goto out;
  2102. }
  2103. spin_lock_irqsave(&isci_host->scic_lock, flags);
  2104. /* send the request, let the core assign the IO TAG. */
  2105. status = scic_controller_start_io(&isci_host->sci, sci_device,
  2106. &request->sci,
  2107. SCI_CONTROLLER_INVALID_IO_TAG);
  2108. if (status != SCI_SUCCESS &&
  2109. status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
  2110. dev_warn(&isci_host->pdev->dev,
  2111. "%s: failed request start (0x%x)\n",
  2112. __func__, status);
  2113. spin_unlock_irqrestore(&isci_host->scic_lock, flags);
  2114. goto out;
  2115. }
  2116. /* Either I/O started OK, or the core has signaled that
  2117. * the device needs a target reset.
  2118. *
  2119. * In either case, hold onto the I/O for later.
  2120. *
  2121. * Update it's status and add it to the list in the
  2122. * remote device object.
  2123. */
  2124. isci_request_change_state(request, started);
  2125. list_add(&request->dev_node, &isci_device->reqs_in_process);
  2126. if (status == SCI_SUCCESS) {
  2127. /* Save the tag for possible task mgmt later. */
  2128. request->io_tag = request->sci.io_tag;
  2129. } else {
  2130. /* The request did not really start in the
  2131. * hardware, so clear the request handle
  2132. * here so no terminations will be done.
  2133. */
  2134. request->terminated = true;
  2135. }
  2136. spin_unlock_irqrestore(&isci_host->scic_lock, flags);
  2137. if (status ==
  2138. SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
  2139. /* Signal libsas that we need the SCSI error
  2140. * handler thread to work on this I/O and that
  2141. * we want a device reset.
  2142. */
  2143. spin_lock_irqsave(&task->task_state_lock, flags);
  2144. task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
  2145. spin_unlock_irqrestore(&task->task_state_lock, flags);
  2146. /* Cause this task to be scheduled in the SCSI error
  2147. * handler thread.
  2148. */
  2149. isci_execpath_callback(isci_host, task,
  2150. sas_task_abort);
  2151. /* Change the status, since we are holding
  2152. * the I/O until it is managed by the SCSI
  2153. * error handler.
  2154. */
  2155. status = SCI_SUCCESS;
  2156. }
  2157. out:
  2158. if (status != SCI_SUCCESS) {
  2159. /* release dma memory on failure. */
  2160. isci_request_free(isci_host, request);
  2161. request = NULL;
  2162. ret = SCI_FAILURE;
  2163. }
  2164. *isci_request = request;
  2165. return ret;
  2166. }