remote_device.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas.h>
  56. #include "isci.h"
  57. #include "port.h"
  58. #include "remote_device.h"
  59. #include "request.h"
  60. #include "remote_node_context.h"
  61. #include "scu_event_codes.h"
  62. #include "task.h"
  63. /**
  64. * isci_remote_device_change_state() - This function gets the status of the
  65. * remote_device object.
  66. * @isci_device: This parameter points to the isci_remote_device object
  67. *
  68. * status of the object as a isci_status enum.
  69. */
  70. void isci_remote_device_change_state(
  71. struct isci_remote_device *isci_device,
  72. enum isci_status status)
  73. {
  74. unsigned long flags;
  75. spin_lock_irqsave(&isci_device->state_lock, flags);
  76. isci_device->status = status;
  77. spin_unlock_irqrestore(&isci_device->state_lock, flags);
  78. }
  79. /**
  80. * isci_remote_device_not_ready() - This function is called by the scic when
  81. * the remote device is not ready. We mark the isci device as ready (not
  82. * "ready_for_io") and signal the waiting proccess.
  83. * @isci_host: This parameter specifies the isci host object.
  84. * @isci_device: This parameter specifies the remote device
  85. *
  86. */
  87. static void isci_remote_device_not_ready(struct isci_host *ihost,
  88. struct isci_remote_device *idev, u32 reason)
  89. {
  90. dev_dbg(&ihost->pdev->dev,
  91. "%s: isci_device = %p\n", __func__, idev);
  92. if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED)
  93. isci_remote_device_change_state(idev, isci_stopping);
  94. else
  95. /* device ready is actually a "not ready for io" state. */
  96. isci_remote_device_change_state(idev, isci_ready);
  97. }
  98. /**
  99. * isci_remote_device_ready() - This function is called by the scic when the
  100. * remote device is ready. We mark the isci device as ready and signal the
  101. * waiting proccess.
  102. * @ihost: our valid isci_host
  103. * @idev: remote device
  104. *
  105. */
  106. static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
  107. {
  108. dev_dbg(&ihost->pdev->dev,
  109. "%s: idev = %p\n", __func__, idev);
  110. isci_remote_device_change_state(idev, isci_ready_for_io);
  111. if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
  112. wake_up(&ihost->eventq);
  113. }
  114. /* called once the remote node context is ready to be freed.
  115. * The remote device can now report that its stop operation is complete. none
  116. */
  117. static void rnc_destruct_done(void *_dev)
  118. {
  119. struct scic_sds_remote_device *sci_dev = _dev;
  120. BUG_ON(sci_dev->started_request_count != 0);
  121. sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED);
  122. }
  123. static enum sci_status scic_sds_remote_device_terminate_requests(struct scic_sds_remote_device *sci_dev)
  124. {
  125. struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
  126. u32 i, request_count = sci_dev->started_request_count;
  127. enum sci_status status = SCI_SUCCESS;
  128. for (i = 0; i < SCI_MAX_IO_REQUESTS && i < request_count; i++) {
  129. struct scic_sds_request *sci_req;
  130. enum sci_status s;
  131. sci_req = scic->io_request_table[i];
  132. if (!sci_req || sci_req->target_device != sci_dev)
  133. continue;
  134. s = scic_controller_terminate_request(scic, sci_dev, sci_req);
  135. if (s != SCI_SUCCESS)
  136. status = s;
  137. }
  138. return status;
  139. }
  140. enum sci_status scic_remote_device_stop(struct scic_sds_remote_device *sci_dev,
  141. u32 timeout)
  142. {
  143. struct sci_base_state_machine *sm = &sci_dev->sm;
  144. enum scic_sds_remote_device_states state = sm->current_state_id;
  145. switch (state) {
  146. case SCI_DEV_INITIAL:
  147. case SCI_DEV_FAILED:
  148. case SCI_DEV_FINAL:
  149. default:
  150. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  151. __func__, state);
  152. return SCI_FAILURE_INVALID_STATE;
  153. case SCI_DEV_STOPPED:
  154. return SCI_SUCCESS;
  155. case SCI_DEV_STARTING:
  156. /* device not started so there had better be no requests */
  157. BUG_ON(sci_dev->started_request_count != 0);
  158. scic_sds_remote_node_context_destruct(&sci_dev->rnc,
  159. rnc_destruct_done, sci_dev);
  160. /* Transition to the stopping state and wait for the
  161. * remote node to complete being posted and invalidated.
  162. */
  163. sci_change_state(sm, SCI_DEV_STOPPING);
  164. return SCI_SUCCESS;
  165. case SCI_DEV_READY:
  166. case SCI_STP_DEV_IDLE:
  167. case SCI_STP_DEV_CMD:
  168. case SCI_STP_DEV_NCQ:
  169. case SCI_STP_DEV_NCQ_ERROR:
  170. case SCI_STP_DEV_AWAIT_RESET:
  171. case SCI_SMP_DEV_IDLE:
  172. case SCI_SMP_DEV_CMD:
  173. sci_change_state(sm, SCI_DEV_STOPPING);
  174. if (sci_dev->started_request_count == 0) {
  175. scic_sds_remote_node_context_destruct(&sci_dev->rnc,
  176. rnc_destruct_done, sci_dev);
  177. return SCI_SUCCESS;
  178. } else
  179. return scic_sds_remote_device_terminate_requests(sci_dev);
  180. break;
  181. case SCI_DEV_STOPPING:
  182. /* All requests should have been terminated, but if there is an
  183. * attempt to stop a device already in the stopping state, then
  184. * try again to terminate.
  185. */
  186. return scic_sds_remote_device_terminate_requests(sci_dev);
  187. case SCI_DEV_RESETTING:
  188. sci_change_state(sm, SCI_DEV_STOPPING);
  189. return SCI_SUCCESS;
  190. }
  191. }
  192. enum sci_status scic_remote_device_reset(struct scic_sds_remote_device *sci_dev)
  193. {
  194. struct sci_base_state_machine *sm = &sci_dev->sm;
  195. enum scic_sds_remote_device_states state = sm->current_state_id;
  196. switch (state) {
  197. case SCI_DEV_INITIAL:
  198. case SCI_DEV_STOPPED:
  199. case SCI_DEV_STARTING:
  200. case SCI_SMP_DEV_IDLE:
  201. case SCI_SMP_DEV_CMD:
  202. case SCI_DEV_STOPPING:
  203. case SCI_DEV_FAILED:
  204. case SCI_DEV_RESETTING:
  205. case SCI_DEV_FINAL:
  206. default:
  207. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  208. __func__, state);
  209. return SCI_FAILURE_INVALID_STATE;
  210. case SCI_DEV_READY:
  211. case SCI_STP_DEV_IDLE:
  212. case SCI_STP_DEV_CMD:
  213. case SCI_STP_DEV_NCQ:
  214. case SCI_STP_DEV_NCQ_ERROR:
  215. case SCI_STP_DEV_AWAIT_RESET:
  216. sci_change_state(sm, SCI_DEV_RESETTING);
  217. return SCI_SUCCESS;
  218. }
  219. }
  220. enum sci_status scic_remote_device_reset_complete(struct scic_sds_remote_device *sci_dev)
  221. {
  222. struct sci_base_state_machine *sm = &sci_dev->sm;
  223. enum scic_sds_remote_device_states state = sm->current_state_id;
  224. if (state != SCI_DEV_RESETTING) {
  225. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  226. __func__, state);
  227. return SCI_FAILURE_INVALID_STATE;
  228. }
  229. sci_change_state(sm, SCI_DEV_READY);
  230. return SCI_SUCCESS;
  231. }
  232. enum sci_status scic_sds_remote_device_suspend(struct scic_sds_remote_device *sci_dev,
  233. u32 suspend_type)
  234. {
  235. struct sci_base_state_machine *sm = &sci_dev->sm;
  236. enum scic_sds_remote_device_states state = sm->current_state_id;
  237. if (state != SCI_STP_DEV_CMD) {
  238. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  239. __func__, state);
  240. return SCI_FAILURE_INVALID_STATE;
  241. }
  242. return scic_sds_remote_node_context_suspend(&sci_dev->rnc,
  243. suspend_type, NULL, NULL);
  244. }
  245. enum sci_status scic_sds_remote_device_frame_handler(struct scic_sds_remote_device *sci_dev,
  246. u32 frame_index)
  247. {
  248. struct sci_base_state_machine *sm = &sci_dev->sm;
  249. enum scic_sds_remote_device_states state = sm->current_state_id;
  250. struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
  251. enum sci_status status;
  252. switch (state) {
  253. case SCI_DEV_INITIAL:
  254. case SCI_DEV_STOPPED:
  255. case SCI_DEV_STARTING:
  256. case SCI_STP_DEV_IDLE:
  257. case SCI_SMP_DEV_IDLE:
  258. case SCI_DEV_FINAL:
  259. default:
  260. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  261. __func__, state);
  262. /* Return the frame back to the controller */
  263. scic_sds_controller_release_frame(scic, frame_index);
  264. return SCI_FAILURE_INVALID_STATE;
  265. case SCI_DEV_READY:
  266. case SCI_STP_DEV_NCQ_ERROR:
  267. case SCI_STP_DEV_AWAIT_RESET:
  268. case SCI_DEV_STOPPING:
  269. case SCI_DEV_FAILED:
  270. case SCI_DEV_RESETTING: {
  271. struct scic_sds_request *sci_req;
  272. struct ssp_frame_hdr hdr;
  273. void *frame_header;
  274. ssize_t word_cnt;
  275. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  276. frame_index,
  277. &frame_header);
  278. if (status != SCI_SUCCESS)
  279. return status;
  280. word_cnt = sizeof(hdr) / sizeof(u32);
  281. sci_swab32_cpy(&hdr, frame_header, word_cnt);
  282. sci_req = scic_request_by_tag(scic, be16_to_cpu(hdr.tag));
  283. if (sci_req && sci_req->target_device == sci_dev) {
  284. /* The IO request is now in charge of releasing the frame */
  285. status = scic_sds_io_request_frame_handler(sci_req, frame_index);
  286. } else {
  287. /* We could not map this tag to a valid IO
  288. * request Just toss the frame and continue
  289. */
  290. scic_sds_controller_release_frame(scic, frame_index);
  291. }
  292. break;
  293. }
  294. case SCI_STP_DEV_NCQ: {
  295. struct dev_to_host_fis *hdr;
  296. status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
  297. frame_index,
  298. (void **)&hdr);
  299. if (status != SCI_SUCCESS)
  300. return status;
  301. if (hdr->fis_type == FIS_SETDEVBITS &&
  302. (hdr->status & ATA_ERR)) {
  303. sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
  304. /* TODO Check sactive and complete associated IO if any. */
  305. sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
  306. } else if (hdr->fis_type == FIS_REGD2H &&
  307. (hdr->status & ATA_ERR)) {
  308. /*
  309. * Some devices return D2H FIS when an NCQ error is detected.
  310. * Treat this like an SDB error FIS ready reason.
  311. */
  312. sci_dev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
  313. sci_change_state(&sci_dev->sm, SCI_STP_DEV_NCQ_ERROR);
  314. } else
  315. status = SCI_FAILURE;
  316. scic_sds_controller_release_frame(scic, frame_index);
  317. break;
  318. }
  319. case SCI_STP_DEV_CMD:
  320. case SCI_SMP_DEV_CMD:
  321. /* The device does not process any UF received from the hardware while
  322. * in this state. All unsolicited frames are forwarded to the io request
  323. * object.
  324. */
  325. status = scic_sds_io_request_frame_handler(sci_dev->working_request, frame_index);
  326. break;
  327. }
  328. return status;
  329. }
  330. static bool is_remote_device_ready(struct scic_sds_remote_device *sci_dev)
  331. {
  332. struct sci_base_state_machine *sm = &sci_dev->sm;
  333. enum scic_sds_remote_device_states state = sm->current_state_id;
  334. switch (state) {
  335. case SCI_DEV_READY:
  336. case SCI_STP_DEV_IDLE:
  337. case SCI_STP_DEV_CMD:
  338. case SCI_STP_DEV_NCQ:
  339. case SCI_STP_DEV_NCQ_ERROR:
  340. case SCI_STP_DEV_AWAIT_RESET:
  341. case SCI_SMP_DEV_IDLE:
  342. case SCI_SMP_DEV_CMD:
  343. return true;
  344. default:
  345. return false;
  346. }
  347. }
  348. enum sci_status scic_sds_remote_device_event_handler(struct scic_sds_remote_device *sci_dev,
  349. u32 event_code)
  350. {
  351. struct sci_base_state_machine *sm = &sci_dev->sm;
  352. enum scic_sds_remote_device_states state = sm->current_state_id;
  353. enum sci_status status;
  354. switch (scu_get_event_type(event_code)) {
  355. case SCU_EVENT_TYPE_RNC_OPS_MISC:
  356. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  357. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  358. status = scic_sds_remote_node_context_event_handler(&sci_dev->rnc, event_code);
  359. break;
  360. case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
  361. if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
  362. status = SCI_SUCCESS;
  363. /* Suspend the associated RNC */
  364. scic_sds_remote_node_context_suspend(&sci_dev->rnc,
  365. SCI_SOFTWARE_SUSPENSION,
  366. NULL, NULL);
  367. dev_dbg(scirdev_to_dev(sci_dev),
  368. "%s: device: %p event code: %x: %s\n",
  369. __func__, sci_dev, event_code,
  370. is_remote_device_ready(sci_dev)
  371. ? "I_T_Nexus_Timeout event"
  372. : "I_T_Nexus_Timeout event in wrong state");
  373. break;
  374. }
  375. /* Else, fall through and treat as unhandled... */
  376. default:
  377. dev_dbg(scirdev_to_dev(sci_dev),
  378. "%s: device: %p event code: %x: %s\n",
  379. __func__, sci_dev, event_code,
  380. is_remote_device_ready(sci_dev)
  381. ? "unexpected event"
  382. : "unexpected event in wrong state");
  383. status = SCI_FAILURE_INVALID_STATE;
  384. break;
  385. }
  386. if (status != SCI_SUCCESS)
  387. return status;
  388. if (state == SCI_STP_DEV_IDLE) {
  389. /* We pick up suspension events to handle specifically to this
  390. * state. We resume the RNC right away.
  391. */
  392. if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
  393. scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
  394. status = scic_sds_remote_node_context_resume(&sci_dev->rnc, NULL, NULL);
  395. }
  396. return status;
  397. }
  398. static void scic_sds_remote_device_start_request(struct scic_sds_remote_device *sci_dev,
  399. struct scic_sds_request *sci_req,
  400. enum sci_status status)
  401. {
  402. struct scic_sds_port *sci_port = sci_dev->owning_port;
  403. /* cleanup requests that failed after starting on the port */
  404. if (status != SCI_SUCCESS)
  405. scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
  406. else
  407. scic_sds_remote_device_increment_request_count(sci_dev);
  408. }
  409. enum sci_status scic_sds_remote_device_start_io(struct scic_sds_controller *scic,
  410. struct scic_sds_remote_device *sci_dev,
  411. struct scic_sds_request *sci_req)
  412. {
  413. struct sci_base_state_machine *sm = &sci_dev->sm;
  414. enum scic_sds_remote_device_states state = sm->current_state_id;
  415. struct scic_sds_port *sci_port = sci_dev->owning_port;
  416. struct isci_request *ireq = sci_req_to_ireq(sci_req);
  417. enum sci_status status;
  418. switch (state) {
  419. case SCI_DEV_INITIAL:
  420. case SCI_DEV_STOPPED:
  421. case SCI_DEV_STARTING:
  422. case SCI_STP_DEV_NCQ_ERROR:
  423. case SCI_DEV_STOPPING:
  424. case SCI_DEV_FAILED:
  425. case SCI_DEV_RESETTING:
  426. case SCI_DEV_FINAL:
  427. default:
  428. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  429. __func__, state);
  430. return SCI_FAILURE_INVALID_STATE;
  431. case SCI_DEV_READY:
  432. /* attempt to start an io request for this device object. The remote
  433. * device object will issue the start request for the io and if
  434. * successful it will start the request for the port object then
  435. * increment its own request count.
  436. */
  437. status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
  438. if (status != SCI_SUCCESS)
  439. return status;
  440. status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
  441. if (status != SCI_SUCCESS)
  442. break;
  443. status = scic_sds_request_start(sci_req);
  444. break;
  445. case SCI_STP_DEV_IDLE: {
  446. /* handle the start io operation for a sata device that is in
  447. * the command idle state. - Evalute the type of IO request to
  448. * be started - If its an NCQ request change to NCQ substate -
  449. * If its any other command change to the CMD substate
  450. *
  451. * If this is a softreset we may want to have a different
  452. * substate.
  453. */
  454. enum scic_sds_remote_device_states new_state;
  455. struct sas_task *task = isci_request_access_task(ireq);
  456. status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
  457. if (status != SCI_SUCCESS)
  458. return status;
  459. status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
  460. if (status != SCI_SUCCESS)
  461. break;
  462. status = scic_sds_request_start(sci_req);
  463. if (status != SCI_SUCCESS)
  464. break;
  465. if (task->ata_task.use_ncq)
  466. new_state = SCI_STP_DEV_NCQ;
  467. else {
  468. sci_dev->working_request = sci_req;
  469. new_state = SCI_STP_DEV_CMD;
  470. }
  471. sci_change_state(sm, new_state);
  472. break;
  473. }
  474. case SCI_STP_DEV_NCQ: {
  475. struct sas_task *task = isci_request_access_task(ireq);
  476. if (task->ata_task.use_ncq) {
  477. status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
  478. if (status != SCI_SUCCESS)
  479. return status;
  480. status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
  481. if (status != SCI_SUCCESS)
  482. break;
  483. status = scic_sds_request_start(sci_req);
  484. } else
  485. return SCI_FAILURE_INVALID_STATE;
  486. break;
  487. }
  488. case SCI_STP_DEV_AWAIT_RESET:
  489. return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
  490. case SCI_SMP_DEV_IDLE:
  491. status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
  492. if (status != SCI_SUCCESS)
  493. return status;
  494. status = scic_sds_remote_node_context_start_io(&sci_dev->rnc, sci_req);
  495. if (status != SCI_SUCCESS)
  496. break;
  497. status = scic_sds_request_start(sci_req);
  498. if (status != SCI_SUCCESS)
  499. break;
  500. sci_dev->working_request = sci_req;
  501. sci_change_state(&sci_dev->sm, SCI_SMP_DEV_CMD);
  502. break;
  503. case SCI_STP_DEV_CMD:
  504. case SCI_SMP_DEV_CMD:
  505. /* device is already handling a command it can not accept new commands
  506. * until this one is complete.
  507. */
  508. return SCI_FAILURE_INVALID_STATE;
  509. }
  510. scic_sds_remote_device_start_request(sci_dev, sci_req, status);
  511. return status;
  512. }
  513. static enum sci_status common_complete_io(struct scic_sds_port *sci_port,
  514. struct scic_sds_remote_device *sci_dev,
  515. struct scic_sds_request *sci_req)
  516. {
  517. enum sci_status status;
  518. status = scic_sds_request_complete(sci_req);
  519. if (status != SCI_SUCCESS)
  520. return status;
  521. status = scic_sds_port_complete_io(sci_port, sci_dev, sci_req);
  522. if (status != SCI_SUCCESS)
  523. return status;
  524. scic_sds_remote_device_decrement_request_count(sci_dev);
  525. return status;
  526. }
  527. enum sci_status scic_sds_remote_device_complete_io(struct scic_sds_controller *scic,
  528. struct scic_sds_remote_device *sci_dev,
  529. struct scic_sds_request *sci_req)
  530. {
  531. struct sci_base_state_machine *sm = &sci_dev->sm;
  532. enum scic_sds_remote_device_states state = sm->current_state_id;
  533. struct scic_sds_port *sci_port = sci_dev->owning_port;
  534. enum sci_status status;
  535. switch (state) {
  536. case SCI_DEV_INITIAL:
  537. case SCI_DEV_STOPPED:
  538. case SCI_DEV_STARTING:
  539. case SCI_STP_DEV_IDLE:
  540. case SCI_SMP_DEV_IDLE:
  541. case SCI_DEV_FAILED:
  542. case SCI_DEV_FINAL:
  543. default:
  544. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  545. __func__, state);
  546. return SCI_FAILURE_INVALID_STATE;
  547. case SCI_DEV_READY:
  548. case SCI_STP_DEV_AWAIT_RESET:
  549. case SCI_DEV_RESETTING:
  550. status = common_complete_io(sci_port, sci_dev, sci_req);
  551. break;
  552. case SCI_STP_DEV_CMD:
  553. case SCI_STP_DEV_NCQ:
  554. case SCI_STP_DEV_NCQ_ERROR:
  555. status = common_complete_io(sci_port, sci_dev, sci_req);
  556. if (status != SCI_SUCCESS)
  557. break;
  558. if (sci_req->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
  559. /* This request causes hardware error, device needs to be Lun Reset.
  560. * So here we force the state machine to IDLE state so the rest IOs
  561. * can reach RNC state handler, these IOs will be completed by RNC with
  562. * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
  563. */
  564. sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
  565. } else if (scic_sds_remote_device_get_request_count(sci_dev) == 0)
  566. sci_change_state(sm, SCI_STP_DEV_IDLE);
  567. break;
  568. case SCI_SMP_DEV_CMD:
  569. status = common_complete_io(sci_port, sci_dev, sci_req);
  570. if (status != SCI_SUCCESS)
  571. break;
  572. sci_change_state(sm, SCI_SMP_DEV_IDLE);
  573. break;
  574. case SCI_DEV_STOPPING:
  575. status = common_complete_io(sci_port, sci_dev, sci_req);
  576. if (status != SCI_SUCCESS)
  577. break;
  578. if (scic_sds_remote_device_get_request_count(sci_dev) == 0)
  579. scic_sds_remote_node_context_destruct(&sci_dev->rnc,
  580. rnc_destruct_done,
  581. sci_dev);
  582. break;
  583. }
  584. if (status != SCI_SUCCESS)
  585. dev_err(scirdev_to_dev(sci_dev),
  586. "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
  587. "could not complete\n", __func__, sci_port,
  588. sci_dev, sci_req, status);
  589. return status;
  590. }
  591. static void scic_sds_remote_device_continue_request(void *dev)
  592. {
  593. struct scic_sds_remote_device *sci_dev = dev;
  594. /* we need to check if this request is still valid to continue. */
  595. if (sci_dev->working_request)
  596. scic_controller_continue_io(sci_dev->working_request);
  597. }
  598. enum sci_status scic_sds_remote_device_start_task(struct scic_sds_controller *scic,
  599. struct scic_sds_remote_device *sci_dev,
  600. struct scic_sds_request *sci_req)
  601. {
  602. struct sci_base_state_machine *sm = &sci_dev->sm;
  603. enum scic_sds_remote_device_states state = sm->current_state_id;
  604. struct scic_sds_port *sci_port = sci_dev->owning_port;
  605. enum sci_status status;
  606. switch (state) {
  607. case SCI_DEV_INITIAL:
  608. case SCI_DEV_STOPPED:
  609. case SCI_DEV_STARTING:
  610. case SCI_SMP_DEV_IDLE:
  611. case SCI_SMP_DEV_CMD:
  612. case SCI_DEV_STOPPING:
  613. case SCI_DEV_FAILED:
  614. case SCI_DEV_RESETTING:
  615. case SCI_DEV_FINAL:
  616. default:
  617. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  618. __func__, state);
  619. return SCI_FAILURE_INVALID_STATE;
  620. case SCI_STP_DEV_IDLE:
  621. case SCI_STP_DEV_CMD:
  622. case SCI_STP_DEV_NCQ:
  623. case SCI_STP_DEV_NCQ_ERROR:
  624. case SCI_STP_DEV_AWAIT_RESET:
  625. status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
  626. if (status != SCI_SUCCESS)
  627. return status;
  628. status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req);
  629. if (status != SCI_SUCCESS)
  630. goto out;
  631. status = scic_sds_request_start(sci_req);
  632. if (status != SCI_SUCCESS)
  633. goto out;
  634. /* Note: If the remote device state is not IDLE this will
  635. * replace the request that probably resulted in the task
  636. * management request.
  637. */
  638. sci_dev->working_request = sci_req;
  639. sci_change_state(sm, SCI_STP_DEV_CMD);
  640. /* The remote node context must cleanup the TCi to NCQ mapping
  641. * table. The only way to do this correctly is to either write
  642. * to the TLCR register or to invalidate and repost the RNC. In
  643. * either case the remote node context state machine will take
  644. * the correct action when the remote node context is suspended
  645. * and later resumed.
  646. */
  647. scic_sds_remote_node_context_suspend(&sci_dev->rnc,
  648. SCI_SOFTWARE_SUSPENSION, NULL, NULL);
  649. scic_sds_remote_node_context_resume(&sci_dev->rnc,
  650. scic_sds_remote_device_continue_request,
  651. sci_dev);
  652. out:
  653. scic_sds_remote_device_start_request(sci_dev, sci_req, status);
  654. /* We need to let the controller start request handler know that
  655. * it can't post TC yet. We will provide a callback function to
  656. * post TC when RNC gets resumed.
  657. */
  658. return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
  659. case SCI_DEV_READY:
  660. status = scic_sds_port_start_io(sci_port, sci_dev, sci_req);
  661. if (status != SCI_SUCCESS)
  662. return status;
  663. status = scic_sds_remote_node_context_start_task(&sci_dev->rnc, sci_req);
  664. if (status != SCI_SUCCESS)
  665. break;
  666. status = scic_sds_request_start(sci_req);
  667. break;
  668. }
  669. scic_sds_remote_device_start_request(sci_dev, sci_req, status);
  670. return status;
  671. }
  672. /**
  673. *
  674. * @sci_dev:
  675. * @request:
  676. *
  677. * This method takes the request and bulids an appropriate SCU context for the
  678. * request and then requests the controller to post the request. none
  679. */
  680. void scic_sds_remote_device_post_request(
  681. struct scic_sds_remote_device *sci_dev,
  682. u32 request)
  683. {
  684. u32 context;
  685. context = scic_sds_remote_device_build_command_context(sci_dev, request);
  686. scic_sds_controller_post_request(
  687. scic_sds_remote_device_get_controller(sci_dev),
  688. context
  689. );
  690. }
  691. /* called once the remote node context has transisitioned to a
  692. * ready state. This is the indication that the remote device object can also
  693. * transition to ready.
  694. */
  695. static void remote_device_resume_done(void *_dev)
  696. {
  697. struct scic_sds_remote_device *sci_dev = _dev;
  698. if (is_remote_device_ready(sci_dev))
  699. return;
  700. /* go 'ready' if we are not already in a ready state */
  701. sci_change_state(&sci_dev->sm, SCI_DEV_READY);
  702. }
  703. static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
  704. {
  705. struct scic_sds_remote_device *sci_dev = _dev;
  706. struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
  707. struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
  708. /* For NCQ operation we do not issue a isci_remote_device_not_ready().
  709. * As a result, avoid sending the ready notification.
  710. */
  711. if (sci_dev->sm.previous_state_id != SCI_STP_DEV_NCQ)
  712. isci_remote_device_ready(scic_to_ihost(scic), idev);
  713. }
  714. static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
  715. {
  716. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  717. /* Initial state is a transitional state to the stopped state */
  718. sci_change_state(&sci_dev->sm, SCI_DEV_STOPPED);
  719. }
  720. /**
  721. * scic_remote_device_destruct() - free remote node context and destruct
  722. * @remote_device: This parameter specifies the remote device to be destructed.
  723. *
  724. * Remote device objects are a limited resource. As such, they must be
  725. * protected. Thus calls to construct and destruct are mutually exclusive and
  726. * non-reentrant. The return value shall indicate if the device was
  727. * successfully destructed or if some failure occurred. enum sci_status This value
  728. * is returned if the device is successfully destructed.
  729. * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
  730. * device isn't valid (e.g. it's already been destoryed, the handle isn't
  731. * valid, etc.).
  732. */
  733. static enum sci_status scic_remote_device_destruct(struct scic_sds_remote_device *sci_dev)
  734. {
  735. struct sci_base_state_machine *sm = &sci_dev->sm;
  736. enum scic_sds_remote_device_states state = sm->current_state_id;
  737. struct scic_sds_controller *scic;
  738. if (state != SCI_DEV_STOPPED) {
  739. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  740. __func__, state);
  741. return SCI_FAILURE_INVALID_STATE;
  742. }
  743. scic = sci_dev->owning_port->owning_controller;
  744. scic_sds_controller_free_remote_node_context(scic, sci_dev,
  745. sci_dev->rnc.remote_node_index);
  746. sci_dev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
  747. sci_change_state(sm, SCI_DEV_FINAL);
  748. return SCI_SUCCESS;
  749. }
  750. /**
  751. * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
  752. * @ihost: This parameter specifies the isci host object.
  753. * @idev: This parameter specifies the remote device to be freed.
  754. *
  755. */
  756. static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
  757. {
  758. dev_dbg(&ihost->pdev->dev,
  759. "%s: isci_device = %p\n", __func__, idev);
  760. /* There should not be any outstanding io's. All paths to
  761. * here should go through isci_remote_device_nuke_requests.
  762. * If we hit this condition, we will need a way to complete
  763. * io requests in process */
  764. while (!list_empty(&idev->reqs_in_process)) {
  765. dev_err(&ihost->pdev->dev,
  766. "%s: ** request list not empty! **\n", __func__);
  767. BUG();
  768. }
  769. scic_remote_device_destruct(&idev->sci);
  770. idev->domain_dev->lldd_dev = NULL;
  771. idev->domain_dev = NULL;
  772. idev->isci_port = NULL;
  773. list_del_init(&idev->node);
  774. clear_bit(IDEV_START_PENDING, &idev->flags);
  775. clear_bit(IDEV_STOP_PENDING, &idev->flags);
  776. clear_bit(IDEV_EH, &idev->flags);
  777. wake_up(&ihost->eventq);
  778. }
  779. /**
  780. * isci_remote_device_stop_complete() - This function is called by the scic
  781. * when the remote device stop has completed. We mark the isci device as not
  782. * ready and remove the isci remote device.
  783. * @ihost: This parameter specifies the isci host object.
  784. * @idev: This parameter specifies the remote device.
  785. * @status: This parameter specifies status of the completion.
  786. *
  787. */
  788. static void isci_remote_device_stop_complete(struct isci_host *ihost,
  789. struct isci_remote_device *idev)
  790. {
  791. dev_dbg(&ihost->pdev->dev, "%s: complete idev = %p\n", __func__, idev);
  792. isci_remote_device_change_state(idev, isci_stopped);
  793. /* after stop, we can tear down resources. */
  794. isci_remote_device_deconstruct(ihost, idev);
  795. }
  796. static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
  797. {
  798. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  799. struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
  800. struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
  801. u32 prev_state;
  802. /* If we are entering from the stopping state let the SCI User know that
  803. * the stop operation has completed.
  804. */
  805. prev_state = sci_dev->sm.previous_state_id;
  806. if (prev_state == SCI_DEV_STOPPING)
  807. isci_remote_device_stop_complete(scic_to_ihost(scic), idev);
  808. scic_sds_controller_remote_device_stopped(scic, sci_dev);
  809. }
  810. static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
  811. {
  812. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  813. struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
  814. struct isci_host *ihost = scic_to_ihost(scic);
  815. struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
  816. isci_remote_device_not_ready(ihost, idev,
  817. SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
  818. }
  819. static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
  820. {
  821. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  822. struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
  823. struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
  824. struct domain_device *dev = idev->domain_dev;
  825. scic->remote_device_sequence[sci_dev->rnc.remote_node_index]++;
  826. if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
  827. sci_change_state(&sci_dev->sm, SCI_STP_DEV_IDLE);
  828. } else if (dev_is_expander(dev)) {
  829. sci_change_state(&sci_dev->sm, SCI_SMP_DEV_IDLE);
  830. } else
  831. isci_remote_device_ready(scic_to_ihost(scic), idev);
  832. }
  833. static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
  834. {
  835. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  836. struct domain_device *dev = sci_dev_to_domain(sci_dev);
  837. if (dev->dev_type == SAS_END_DEV) {
  838. struct scic_sds_controller *scic = sci_dev->owning_port->owning_controller;
  839. struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
  840. isci_remote_device_not_ready(scic_to_ihost(scic), idev,
  841. SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
  842. }
  843. }
  844. static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
  845. {
  846. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  847. scic_sds_remote_node_context_suspend(
  848. &sci_dev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
  849. }
  850. static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
  851. {
  852. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  853. scic_sds_remote_node_context_resume(&sci_dev->rnc, NULL, NULL);
  854. }
  855. static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
  856. {
  857. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  858. sci_dev->working_request = NULL;
  859. if (scic_sds_remote_node_context_is_ready(&sci_dev->rnc)) {
  860. /*
  861. * Since the RNC is ready, it's alright to finish completion
  862. * processing (e.g. signal the remote device is ready). */
  863. scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(sci_dev);
  864. } else {
  865. scic_sds_remote_node_context_resume(&sci_dev->rnc,
  866. scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler,
  867. sci_dev);
  868. }
  869. }
  870. static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
  871. {
  872. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  873. struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
  874. BUG_ON(sci_dev->working_request == NULL);
  875. isci_remote_device_not_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev),
  876. SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
  877. }
  878. static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
  879. {
  880. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  881. struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
  882. struct isci_remote_device *idev = sci_dev_to_idev(sci_dev);
  883. if (sci_dev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
  884. isci_remote_device_not_ready(scic_to_ihost(scic), idev,
  885. sci_dev->not_ready_reason);
  886. }
  887. static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
  888. {
  889. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  890. struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
  891. isci_remote_device_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev));
  892. }
  893. static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
  894. {
  895. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  896. struct scic_sds_controller *scic = scic_sds_remote_device_get_controller(sci_dev);
  897. BUG_ON(sci_dev->working_request == NULL);
  898. isci_remote_device_not_ready(scic_to_ihost(scic), sci_dev_to_idev(sci_dev),
  899. SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
  900. }
  901. static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
  902. {
  903. struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
  904. sci_dev->working_request = NULL;
  905. }
  906. static const struct sci_base_state scic_sds_remote_device_state_table[] = {
  907. [SCI_DEV_INITIAL] = {
  908. .enter_state = scic_sds_remote_device_initial_state_enter,
  909. },
  910. [SCI_DEV_STOPPED] = {
  911. .enter_state = scic_sds_remote_device_stopped_state_enter,
  912. },
  913. [SCI_DEV_STARTING] = {
  914. .enter_state = scic_sds_remote_device_starting_state_enter,
  915. },
  916. [SCI_DEV_READY] = {
  917. .enter_state = scic_sds_remote_device_ready_state_enter,
  918. .exit_state = scic_sds_remote_device_ready_state_exit
  919. },
  920. [SCI_STP_DEV_IDLE] = {
  921. .enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter,
  922. },
  923. [SCI_STP_DEV_CMD] = {
  924. .enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter,
  925. },
  926. [SCI_STP_DEV_NCQ] = { },
  927. [SCI_STP_DEV_NCQ_ERROR] = {
  928. .enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter,
  929. },
  930. [SCI_STP_DEV_AWAIT_RESET] = { },
  931. [SCI_SMP_DEV_IDLE] = {
  932. .enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter,
  933. },
  934. [SCI_SMP_DEV_CMD] = {
  935. .enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter,
  936. .exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit,
  937. },
  938. [SCI_DEV_STOPPING] = { },
  939. [SCI_DEV_FAILED] = { },
  940. [SCI_DEV_RESETTING] = {
  941. .enter_state = scic_sds_remote_device_resetting_state_enter,
  942. .exit_state = scic_sds_remote_device_resetting_state_exit
  943. },
  944. [SCI_DEV_FINAL] = { },
  945. };
  946. /**
  947. * scic_remote_device_construct() - common construction
  948. * @sci_port: SAS/SATA port through which this device is accessed.
  949. * @sci_dev: remote device to construct
  950. *
  951. * This routine just performs benign initialization and does not
  952. * allocate the remote_node_context which is left to
  953. * scic_remote_device_[de]a_construct(). scic_remote_device_destruct()
  954. * frees the remote_node_context(s) for the device.
  955. */
  956. static void scic_remote_device_construct(struct scic_sds_port *sci_port,
  957. struct scic_sds_remote_device *sci_dev)
  958. {
  959. sci_dev->owning_port = sci_port;
  960. sci_dev->started_request_count = 0;
  961. sci_init_sm(&sci_dev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL);
  962. scic_sds_remote_node_context_construct(&sci_dev->rnc,
  963. SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
  964. }
  965. /**
  966. * scic_remote_device_da_construct() - construct direct attached device.
  967. *
  968. * The information (e.g. IAF, Signature FIS, etc.) necessary to build
  969. * the device is known to the SCI Core since it is contained in the
  970. * scic_phy object. Remote node context(s) is/are a global resource
  971. * allocated by this routine, freed by scic_remote_device_destruct().
  972. *
  973. * Returns:
  974. * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
  975. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
  976. * sata-only controller instance.
  977. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
  978. */
  979. static enum sci_status scic_remote_device_da_construct(struct scic_sds_port *sci_port,
  980. struct scic_sds_remote_device *sci_dev)
  981. {
  982. enum sci_status status;
  983. struct domain_device *dev = sci_dev_to_domain(sci_dev);
  984. scic_remote_device_construct(sci_port, sci_dev);
  985. /*
  986. * This information is request to determine how many remote node context
  987. * entries will be needed to store the remote node.
  988. */
  989. sci_dev->is_direct_attached = true;
  990. status = scic_sds_controller_allocate_remote_node_context(sci_port->owning_controller,
  991. sci_dev,
  992. &sci_dev->rnc.remote_node_index);
  993. if (status != SCI_SUCCESS)
  994. return status;
  995. if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
  996. (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
  997. /* pass */;
  998. else
  999. return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
  1000. sci_dev->connection_rate = scic_sds_port_get_max_allowed_speed(sci_port);
  1001. /* / @todo Should I assign the port width by reading all of the phys on the port? */
  1002. sci_dev->device_port_width = 1;
  1003. return SCI_SUCCESS;
  1004. }
  1005. /**
  1006. * scic_remote_device_ea_construct() - construct expander attached device
  1007. *
  1008. * Remote node context(s) is/are a global resource allocated by this
  1009. * routine, freed by scic_remote_device_destruct().
  1010. *
  1011. * Returns:
  1012. * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
  1013. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
  1014. * sata-only controller instance.
  1015. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
  1016. */
  1017. static enum sci_status scic_remote_device_ea_construct(struct scic_sds_port *sci_port,
  1018. struct scic_sds_remote_device *sci_dev)
  1019. {
  1020. struct domain_device *dev = sci_dev_to_domain(sci_dev);
  1021. enum sci_status status;
  1022. scic_remote_device_construct(sci_port, sci_dev);
  1023. status = scic_sds_controller_allocate_remote_node_context(sci_port->owning_controller,
  1024. sci_dev,
  1025. &sci_dev->rnc.remote_node_index);
  1026. if (status != SCI_SUCCESS)
  1027. return status;
  1028. if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
  1029. (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
  1030. /* pass */;
  1031. else
  1032. return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
  1033. /*
  1034. * For SAS-2 the physical link rate is actually a logical link
  1035. * rate that incorporates multiplexing. The SCU doesn't
  1036. * incorporate multiplexing and for the purposes of the
  1037. * connection the logical link rate is that same as the
  1038. * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
  1039. * one another, so this code works for both situations. */
  1040. sci_dev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(sci_port),
  1041. dev->linkrate);
  1042. /* / @todo Should I assign the port width by reading all of the phys on the port? */
  1043. sci_dev->device_port_width = 1;
  1044. return SCI_SUCCESS;
  1045. }
  1046. /**
  1047. * scic_remote_device_start() - This method will start the supplied remote
  1048. * device. This method enables normal IO requests to flow through to the
  1049. * remote device.
  1050. * @remote_device: This parameter specifies the device to be started.
  1051. * @timeout: This parameter specifies the number of milliseconds in which the
  1052. * start operation should complete.
  1053. *
  1054. * An indication of whether the device was successfully started. SCI_SUCCESS
  1055. * This value is returned if the device was successfully started.
  1056. * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
  1057. * the device when there have been no phys added to it.
  1058. */
  1059. static enum sci_status scic_remote_device_start(struct scic_sds_remote_device *sci_dev,
  1060. u32 timeout)
  1061. {
  1062. struct sci_base_state_machine *sm = &sci_dev->sm;
  1063. enum scic_sds_remote_device_states state = sm->current_state_id;
  1064. enum sci_status status;
  1065. if (state != SCI_DEV_STOPPED) {
  1066. dev_warn(scirdev_to_dev(sci_dev), "%s: in wrong state: %d\n",
  1067. __func__, state);
  1068. return SCI_FAILURE_INVALID_STATE;
  1069. }
  1070. status = scic_sds_remote_node_context_resume(&sci_dev->rnc,
  1071. remote_device_resume_done,
  1072. sci_dev);
  1073. if (status != SCI_SUCCESS)
  1074. return status;
  1075. sci_change_state(sm, SCI_DEV_STARTING);
  1076. return SCI_SUCCESS;
  1077. }
  1078. static enum sci_status isci_remote_device_construct(struct isci_port *iport,
  1079. struct isci_remote_device *idev)
  1080. {
  1081. struct scic_sds_port *sci_port = &iport->sci;
  1082. struct isci_host *ihost = iport->isci_host;
  1083. struct domain_device *dev = idev->domain_dev;
  1084. enum sci_status status;
  1085. if (dev->parent && dev_is_expander(dev->parent))
  1086. status = scic_remote_device_ea_construct(sci_port, &idev->sci);
  1087. else
  1088. status = scic_remote_device_da_construct(sci_port, &idev->sci);
  1089. if (status != SCI_SUCCESS) {
  1090. dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
  1091. __func__, status);
  1092. return status;
  1093. }
  1094. /* start the device. */
  1095. status = scic_remote_device_start(&idev->sci, ISCI_REMOTE_DEVICE_START_TIMEOUT);
  1096. if (status != SCI_SUCCESS)
  1097. dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
  1098. status);
  1099. return status;
  1100. }
  1101. void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
  1102. {
  1103. DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
  1104. dev_dbg(&ihost->pdev->dev,
  1105. "%s: idev = %p\n", __func__, idev);
  1106. /* Cleanup all requests pending for this device. */
  1107. isci_terminate_pending_requests(ihost, idev, terminating);
  1108. dev_dbg(&ihost->pdev->dev,
  1109. "%s: idev = %p, done\n", __func__, idev);
  1110. }
  1111. /**
  1112. * This function builds the isci_remote_device when a libsas dev_found message
  1113. * is received.
  1114. * @isci_host: This parameter specifies the isci host object.
  1115. * @port: This parameter specifies the isci_port conected to this device.
  1116. *
  1117. * pointer to new isci_remote_device.
  1118. */
  1119. static struct isci_remote_device *
  1120. isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
  1121. {
  1122. struct isci_remote_device *idev;
  1123. int i;
  1124. for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
  1125. idev = &ihost->devices[i];
  1126. if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
  1127. break;
  1128. }
  1129. if (i >= SCI_MAX_REMOTE_DEVICES) {
  1130. dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
  1131. return NULL;
  1132. }
  1133. if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
  1134. return NULL;
  1135. if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
  1136. return NULL;
  1137. isci_remote_device_change_state(idev, isci_freed);
  1138. return idev;
  1139. }
  1140. /**
  1141. * isci_remote_device_stop() - This function is called internally to stop the
  1142. * remote device.
  1143. * @isci_host: This parameter specifies the isci host object.
  1144. * @isci_device: This parameter specifies the remote device.
  1145. *
  1146. * The status of the scic request to stop.
  1147. */
  1148. enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
  1149. {
  1150. enum sci_status status;
  1151. unsigned long flags;
  1152. dev_dbg(&ihost->pdev->dev,
  1153. "%s: isci_device = %p\n", __func__, idev);
  1154. isci_remote_device_change_state(idev, isci_stopping);
  1155. /* Kill all outstanding requests. */
  1156. isci_remote_device_nuke_requests(ihost, idev);
  1157. set_bit(IDEV_STOP_PENDING, &idev->flags);
  1158. spin_lock_irqsave(&ihost->scic_lock, flags);
  1159. status = scic_remote_device_stop(&idev->sci, 50);
  1160. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1161. /* Wait for the stop complete callback. */
  1162. if (status == SCI_SUCCESS) {
  1163. wait_for_device_stop(ihost, idev);
  1164. clear_bit(IDEV_ALLOCATED, &idev->flags);
  1165. }
  1166. dev_dbg(&ihost->pdev->dev,
  1167. "%s: idev = %p - after completion wait\n",
  1168. __func__, idev);
  1169. return status;
  1170. }
  1171. /**
  1172. * isci_remote_device_gone() - This function is called by libsas when a domain
  1173. * device is removed.
  1174. * @domain_device: This parameter specifies the libsas domain device.
  1175. *
  1176. */
  1177. void isci_remote_device_gone(struct domain_device *dev)
  1178. {
  1179. struct isci_host *ihost = dev_to_ihost(dev);
  1180. struct isci_remote_device *idev = dev->lldd_dev;
  1181. dev_dbg(&ihost->pdev->dev,
  1182. "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
  1183. __func__, dev, idev, idev->isci_port);
  1184. isci_remote_device_stop(ihost, idev);
  1185. }
  1186. /**
  1187. * isci_remote_device_found() - This function is called by libsas when a remote
  1188. * device is discovered. A remote device object is created and started. the
  1189. * function then sleeps until the sci core device started message is
  1190. * received.
  1191. * @domain_device: This parameter specifies the libsas domain device.
  1192. *
  1193. * status, zero indicates success.
  1194. */
  1195. int isci_remote_device_found(struct domain_device *domain_dev)
  1196. {
  1197. struct isci_host *isci_host = dev_to_ihost(domain_dev);
  1198. struct isci_port *isci_port;
  1199. struct isci_phy *isci_phy;
  1200. struct asd_sas_port *sas_port;
  1201. struct asd_sas_phy *sas_phy;
  1202. struct isci_remote_device *isci_device;
  1203. enum sci_status status;
  1204. dev_dbg(&isci_host->pdev->dev,
  1205. "%s: domain_device = %p\n", __func__, domain_dev);
  1206. wait_for_start(isci_host);
  1207. sas_port = domain_dev->port;
  1208. sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
  1209. port_phy_el);
  1210. isci_phy = to_isci_phy(sas_phy);
  1211. isci_port = isci_phy->isci_port;
  1212. /* we are being called for a device on this port,
  1213. * so it has to come up eventually
  1214. */
  1215. wait_for_completion(&isci_port->start_complete);
  1216. if ((isci_stopping == isci_port_get_state(isci_port)) ||
  1217. (isci_stopped == isci_port_get_state(isci_port)))
  1218. return -ENODEV;
  1219. isci_device = isci_remote_device_alloc(isci_host, isci_port);
  1220. if (!isci_device)
  1221. return -ENODEV;
  1222. INIT_LIST_HEAD(&isci_device->node);
  1223. domain_dev->lldd_dev = isci_device;
  1224. isci_device->domain_dev = domain_dev;
  1225. isci_device->isci_port = isci_port;
  1226. isci_remote_device_change_state(isci_device, isci_starting);
  1227. spin_lock_irq(&isci_host->scic_lock);
  1228. list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
  1229. set_bit(IDEV_START_PENDING, &isci_device->flags);
  1230. status = isci_remote_device_construct(isci_port, isci_device);
  1231. spin_unlock_irq(&isci_host->scic_lock);
  1232. dev_dbg(&isci_host->pdev->dev,
  1233. "%s: isci_device = %p\n",
  1234. __func__, isci_device);
  1235. if (status != SCI_SUCCESS) {
  1236. spin_lock_irq(&isci_host->scic_lock);
  1237. isci_remote_device_deconstruct(
  1238. isci_host,
  1239. isci_device
  1240. );
  1241. spin_unlock_irq(&isci_host->scic_lock);
  1242. return -ENODEV;
  1243. }
  1244. /* wait for the device ready callback. */
  1245. wait_for_device_start(isci_host, isci_device);
  1246. return 0;
  1247. }
  1248. /**
  1249. * isci_device_is_reset_pending() - This function will check if there is any
  1250. * pending reset condition on the device.
  1251. * @request: This parameter is the isci_device object.
  1252. *
  1253. * true if there is a reset pending for the device.
  1254. */
  1255. bool isci_device_is_reset_pending(
  1256. struct isci_host *isci_host,
  1257. struct isci_remote_device *isci_device)
  1258. {
  1259. struct isci_request *isci_request;
  1260. struct isci_request *tmp_req;
  1261. bool reset_is_pending = false;
  1262. unsigned long flags;
  1263. dev_dbg(&isci_host->pdev->dev,
  1264. "%s: isci_device = %p\n", __func__, isci_device);
  1265. spin_lock_irqsave(&isci_host->scic_lock, flags);
  1266. /* Check for reset on all pending requests. */
  1267. list_for_each_entry_safe(isci_request, tmp_req,
  1268. &isci_device->reqs_in_process, dev_node) {
  1269. dev_dbg(&isci_host->pdev->dev,
  1270. "%s: isci_device = %p request = %p\n",
  1271. __func__, isci_device, isci_request);
  1272. if (isci_request->ttype == io_task) {
  1273. struct sas_task *task = isci_request_access_task(
  1274. isci_request);
  1275. spin_lock(&task->task_state_lock);
  1276. if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
  1277. reset_is_pending = true;
  1278. spin_unlock(&task->task_state_lock);
  1279. }
  1280. }
  1281. spin_unlock_irqrestore(&isci_host->scic_lock, flags);
  1282. dev_dbg(&isci_host->pdev->dev,
  1283. "%s: isci_device = %p reset_is_pending = %d\n",
  1284. __func__, isci_device, reset_is_pending);
  1285. return reset_is_pending;
  1286. }
  1287. /**
  1288. * isci_device_clear_reset_pending() - This function will clear if any pending
  1289. * reset condition flags on the device.
  1290. * @request: This parameter is the isci_device object.
  1291. *
  1292. * true if there is a reset pending for the device.
  1293. */
  1294. void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
  1295. {
  1296. struct isci_request *isci_request;
  1297. struct isci_request *tmp_req;
  1298. unsigned long flags = 0;
  1299. dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
  1300. __func__, idev, ihost);
  1301. spin_lock_irqsave(&ihost->scic_lock, flags);
  1302. /* Clear reset pending on all pending requests. */
  1303. list_for_each_entry_safe(isci_request, tmp_req,
  1304. &idev->reqs_in_process, dev_node) {
  1305. dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
  1306. __func__, idev, isci_request);
  1307. if (isci_request->ttype == io_task) {
  1308. unsigned long flags2;
  1309. struct sas_task *task = isci_request_access_task(
  1310. isci_request);
  1311. spin_lock_irqsave(&task->task_state_lock, flags2);
  1312. task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
  1313. spin_unlock_irqrestore(&task->task_state_lock, flags2);
  1314. }
  1315. }
  1316. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1317. }