remote_device.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas.h>
  56. #include <linux/bitops.h>
  57. #include "isci.h"
  58. #include "port.h"
  59. #include "remote_device.h"
  60. #include "request.h"
  61. #include "remote_node_context.h"
  62. #include "scu_event_codes.h"
  63. #include "task.h"
  64. #undef C
  65. #define C(a) (#a)
  66. const char *dev_state_name(enum sci_remote_device_states state)
  67. {
  68. static const char * const strings[] = REMOTE_DEV_STATES;
  69. return strings[state];
  70. }
  71. #undef C
  72. static enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
  73. enum sci_remote_node_suspension_reasons reason)
  74. {
  75. return sci_remote_node_context_suspend(&idev->rnc, reason,
  76. SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  77. }
  78. /**
  79. * isci_remote_device_ready() - This function is called by the ihost when the
  80. * remote device is ready. We mark the isci device as ready and signal the
  81. * waiting proccess.
  82. * @ihost: our valid isci_host
  83. * @idev: remote device
  84. *
  85. */
  86. static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
  87. {
  88. dev_dbg(&ihost->pdev->dev,
  89. "%s: idev = %p\n", __func__, idev);
  90. clear_bit(IDEV_IO_NCQERROR, &idev->flags);
  91. set_bit(IDEV_IO_READY, &idev->flags);
  92. if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
  93. wake_up(&ihost->eventq);
  94. }
  95. static enum sci_status sci_remote_device_terminate_req(
  96. struct isci_host *ihost,
  97. struct isci_remote_device *idev,
  98. int check_abort,
  99. struct isci_request *ireq)
  100. {
  101. if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
  102. (ireq->target_device != idev) ||
  103. (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
  104. return SCI_SUCCESS;
  105. dev_dbg(&ihost->pdev->dev,
  106. "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
  107. __func__, idev, idev->flags, ireq, ireq->target_device);
  108. set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
  109. return sci_controller_terminate_request(ihost, idev, ireq);
  110. }
  111. static enum sci_status sci_remote_device_terminate_reqs_checkabort(
  112. struct isci_remote_device *idev,
  113. int chk)
  114. {
  115. struct isci_host *ihost = idev->owning_port->owning_controller;
  116. enum sci_status status = SCI_SUCCESS;
  117. u32 i;
  118. for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
  119. struct isci_request *ireq = ihost->reqs[i];
  120. enum sci_status s;
  121. s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
  122. if (s != SCI_SUCCESS)
  123. status = s;
  124. }
  125. return status;
  126. }
  127. static bool isci_compare_suspendcount(
  128. struct isci_remote_device *idev,
  129. u32 localcount)
  130. {
  131. smp_rmb();
  132. return localcount != idev->rnc.suspend_count;
  133. }
  134. static bool isci_check_reqterm(
  135. struct isci_host *ihost,
  136. struct isci_remote_device *idev,
  137. struct isci_request *ireq,
  138. u32 localcount)
  139. {
  140. unsigned long flags;
  141. bool res;
  142. spin_lock_irqsave(&ihost->scic_lock, flags);
  143. res = isci_compare_suspendcount(idev, localcount)
  144. && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
  145. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  146. return res;
  147. }
  148. static bool isci_check_devempty(
  149. struct isci_host *ihost,
  150. struct isci_remote_device *idev,
  151. u32 localcount)
  152. {
  153. unsigned long flags;
  154. bool res;
  155. spin_lock_irqsave(&ihost->scic_lock, flags);
  156. res = isci_compare_suspendcount(idev, localcount)
  157. && idev->started_request_count == 0;
  158. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  159. return res;
  160. }
  161. enum sci_status isci_remote_device_terminate_requests(
  162. struct isci_host *ihost,
  163. struct isci_remote_device *idev,
  164. struct isci_request *ireq)
  165. {
  166. enum sci_status status = SCI_SUCCESS;
  167. unsigned long flags;
  168. u32 rnc_suspend_count;
  169. spin_lock_irqsave(&ihost->scic_lock, flags);
  170. if (isci_get_device(idev) == NULL) {
  171. dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
  172. __func__, idev);
  173. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  174. status = SCI_FAILURE;
  175. } else {
  176. /* If already suspended, don't wait for another suspension. */
  177. smp_rmb();
  178. rnc_suspend_count
  179. = sci_remote_node_context_is_suspended(&idev->rnc)
  180. ? 0 : idev->rnc.suspend_count;
  181. dev_dbg(&ihost->pdev->dev,
  182. "%s: idev=%p, ireq=%p; started_request_count=%d, "
  183. "rnc_suspend_count=%d, rnc.suspend_count=%d"
  184. "about to wait\n",
  185. __func__, idev, ireq, idev->started_request_count,
  186. rnc_suspend_count, idev->rnc.suspend_count);
  187. if (ireq) {
  188. /* Terminate a specific TC. */
  189. set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
  190. sci_remote_device_terminate_req(ihost, idev, 0, ireq);
  191. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  192. wait_event(ihost->eventq,
  193. isci_check_reqterm(ihost, idev, ireq,
  194. rnc_suspend_count));
  195. clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
  196. isci_free_tag(ihost, ireq->io_tag);
  197. } else {
  198. /* Terminate all TCs. */
  199. sci_remote_device_terminate_requests(idev);
  200. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  201. wait_event(ihost->eventq,
  202. isci_check_devempty(ihost, idev,
  203. rnc_suspend_count));
  204. }
  205. dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
  206. __func__, idev);
  207. isci_put_device(idev);
  208. }
  209. return status;
  210. }
  211. /**
  212. * isci_remote_device_not_ready() - This function is called by the ihost when
  213. * the remote device is not ready. We mark the isci device as ready (not
  214. * "ready_for_io") and signal the waiting proccess.
  215. * @isci_host: This parameter specifies the isci host object.
  216. * @isci_device: This parameter specifies the remote device
  217. *
  218. * sci_lock is held on entrance to this function.
  219. */
  220. static void isci_remote_device_not_ready(struct isci_host *ihost,
  221. struct isci_remote_device *idev,
  222. u32 reason)
  223. {
  224. dev_dbg(&ihost->pdev->dev,
  225. "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
  226. switch (reason) {
  227. case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
  228. set_bit(IDEV_IO_NCQERROR, &idev->flags);
  229. /* Suspend the remote device so the I/O can be terminated. */
  230. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
  231. /* Kill all outstanding requests for the device. */
  232. sci_remote_device_terminate_requests(idev);
  233. /* Fall through into the default case... */
  234. default:
  235. clear_bit(IDEV_IO_READY, &idev->flags);
  236. break;
  237. }
  238. }
  239. /* called once the remote node context is ready to be freed.
  240. * The remote device can now report that its stop operation is complete. none
  241. */
  242. static void rnc_destruct_done(void *_dev)
  243. {
  244. struct isci_remote_device *idev = _dev;
  245. BUG_ON(idev->started_request_count != 0);
  246. sci_change_state(&idev->sm, SCI_DEV_STOPPED);
  247. }
  248. enum sci_status sci_remote_device_terminate_requests(
  249. struct isci_remote_device *idev)
  250. {
  251. return sci_remote_device_terminate_reqs_checkabort(idev, 0);
  252. }
  253. enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
  254. u32 timeout)
  255. {
  256. struct sci_base_state_machine *sm = &idev->sm;
  257. enum sci_remote_device_states state = sm->current_state_id;
  258. switch (state) {
  259. case SCI_DEV_INITIAL:
  260. case SCI_DEV_FAILED:
  261. case SCI_DEV_FINAL:
  262. default:
  263. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  264. __func__, dev_state_name(state));
  265. return SCI_FAILURE_INVALID_STATE;
  266. case SCI_DEV_STOPPED:
  267. return SCI_SUCCESS;
  268. case SCI_DEV_STARTING:
  269. /* device not started so there had better be no requests */
  270. BUG_ON(idev->started_request_count != 0);
  271. sci_remote_node_context_destruct(&idev->rnc,
  272. rnc_destruct_done, idev);
  273. /* Transition to the stopping state and wait for the
  274. * remote node to complete being posted and invalidated.
  275. */
  276. sci_change_state(sm, SCI_DEV_STOPPING);
  277. return SCI_SUCCESS;
  278. case SCI_DEV_READY:
  279. case SCI_STP_DEV_IDLE:
  280. case SCI_STP_DEV_CMD:
  281. case SCI_STP_DEV_NCQ:
  282. case SCI_STP_DEV_NCQ_ERROR:
  283. case SCI_STP_DEV_AWAIT_RESET:
  284. case SCI_SMP_DEV_IDLE:
  285. case SCI_SMP_DEV_CMD:
  286. sci_change_state(sm, SCI_DEV_STOPPING);
  287. if (idev->started_request_count == 0)
  288. sci_remote_node_context_destruct(&idev->rnc,
  289. rnc_destruct_done,
  290. idev);
  291. else {
  292. sci_remote_device_suspend(
  293. idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
  294. sci_remote_device_terminate_requests(idev);
  295. }
  296. return SCI_SUCCESS;
  297. case SCI_DEV_STOPPING:
  298. /* All requests should have been terminated, but if there is an
  299. * attempt to stop a device already in the stopping state, then
  300. * try again to terminate.
  301. */
  302. return sci_remote_device_terminate_requests(idev);
  303. case SCI_DEV_RESETTING:
  304. sci_change_state(sm, SCI_DEV_STOPPING);
  305. return SCI_SUCCESS;
  306. }
  307. }
  308. enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
  309. {
  310. struct sci_base_state_machine *sm = &idev->sm;
  311. enum sci_remote_device_states state = sm->current_state_id;
  312. switch (state) {
  313. case SCI_DEV_INITIAL:
  314. case SCI_DEV_STOPPED:
  315. case SCI_DEV_STARTING:
  316. case SCI_SMP_DEV_IDLE:
  317. case SCI_SMP_DEV_CMD:
  318. case SCI_DEV_STOPPING:
  319. case SCI_DEV_FAILED:
  320. case SCI_DEV_RESETTING:
  321. case SCI_DEV_FINAL:
  322. default:
  323. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  324. __func__, dev_state_name(state));
  325. return SCI_FAILURE_INVALID_STATE;
  326. case SCI_DEV_READY:
  327. case SCI_STP_DEV_IDLE:
  328. case SCI_STP_DEV_CMD:
  329. case SCI_STP_DEV_NCQ:
  330. case SCI_STP_DEV_NCQ_ERROR:
  331. case SCI_STP_DEV_AWAIT_RESET:
  332. sci_change_state(sm, SCI_DEV_RESETTING);
  333. return SCI_SUCCESS;
  334. }
  335. }
  336. enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
  337. {
  338. struct sci_base_state_machine *sm = &idev->sm;
  339. enum sci_remote_device_states state = sm->current_state_id;
  340. if (state != SCI_DEV_RESETTING) {
  341. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  342. __func__, dev_state_name(state));
  343. return SCI_FAILURE_INVALID_STATE;
  344. }
  345. sci_change_state(sm, SCI_DEV_READY);
  346. return SCI_SUCCESS;
  347. }
  348. enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
  349. u32 frame_index)
  350. {
  351. struct sci_base_state_machine *sm = &idev->sm;
  352. enum sci_remote_device_states state = sm->current_state_id;
  353. struct isci_host *ihost = idev->owning_port->owning_controller;
  354. enum sci_status status;
  355. switch (state) {
  356. case SCI_DEV_INITIAL:
  357. case SCI_DEV_STOPPED:
  358. case SCI_DEV_STARTING:
  359. case SCI_STP_DEV_IDLE:
  360. case SCI_SMP_DEV_IDLE:
  361. case SCI_DEV_FINAL:
  362. default:
  363. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  364. __func__, dev_state_name(state));
  365. /* Return the frame back to the controller */
  366. sci_controller_release_frame(ihost, frame_index);
  367. return SCI_FAILURE_INVALID_STATE;
  368. case SCI_DEV_READY:
  369. case SCI_STP_DEV_NCQ_ERROR:
  370. case SCI_STP_DEV_AWAIT_RESET:
  371. case SCI_DEV_STOPPING:
  372. case SCI_DEV_FAILED:
  373. case SCI_DEV_RESETTING: {
  374. struct isci_request *ireq;
  375. struct ssp_frame_hdr hdr;
  376. void *frame_header;
  377. ssize_t word_cnt;
  378. status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
  379. frame_index,
  380. &frame_header);
  381. if (status != SCI_SUCCESS)
  382. return status;
  383. word_cnt = sizeof(hdr) / sizeof(u32);
  384. sci_swab32_cpy(&hdr, frame_header, word_cnt);
  385. ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
  386. if (ireq && ireq->target_device == idev) {
  387. /* The IO request is now in charge of releasing the frame */
  388. status = sci_io_request_frame_handler(ireq, frame_index);
  389. } else {
  390. /* We could not map this tag to a valid IO
  391. * request Just toss the frame and continue
  392. */
  393. sci_controller_release_frame(ihost, frame_index);
  394. }
  395. break;
  396. }
  397. case SCI_STP_DEV_NCQ: {
  398. struct dev_to_host_fis *hdr;
  399. status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
  400. frame_index,
  401. (void **)&hdr);
  402. if (status != SCI_SUCCESS)
  403. return status;
  404. if (hdr->fis_type == FIS_SETDEVBITS &&
  405. (hdr->status & ATA_ERR)) {
  406. idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
  407. /* TODO Check sactive and complete associated IO if any. */
  408. sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
  409. } else if (hdr->fis_type == FIS_REGD2H &&
  410. (hdr->status & ATA_ERR)) {
  411. /*
  412. * Some devices return D2H FIS when an NCQ error is detected.
  413. * Treat this like an SDB error FIS ready reason.
  414. */
  415. idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
  416. sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
  417. } else
  418. status = SCI_FAILURE;
  419. sci_controller_release_frame(ihost, frame_index);
  420. break;
  421. }
  422. case SCI_STP_DEV_CMD:
  423. case SCI_SMP_DEV_CMD:
  424. /* The device does not process any UF received from the hardware while
  425. * in this state. All unsolicited frames are forwarded to the io request
  426. * object.
  427. */
  428. status = sci_io_request_frame_handler(idev->working_request, frame_index);
  429. break;
  430. }
  431. return status;
  432. }
  433. static bool is_remote_device_ready(struct isci_remote_device *idev)
  434. {
  435. struct sci_base_state_machine *sm = &idev->sm;
  436. enum sci_remote_device_states state = sm->current_state_id;
  437. switch (state) {
  438. case SCI_DEV_READY:
  439. case SCI_STP_DEV_IDLE:
  440. case SCI_STP_DEV_CMD:
  441. case SCI_STP_DEV_NCQ:
  442. case SCI_STP_DEV_NCQ_ERROR:
  443. case SCI_STP_DEV_AWAIT_RESET:
  444. case SCI_SMP_DEV_IDLE:
  445. case SCI_SMP_DEV_CMD:
  446. return true;
  447. default:
  448. return false;
  449. }
  450. }
  451. /*
  452. * called once the remote node context has transisitioned to a ready
  453. * state (after suspending RX and/or TX due to early D2H fis)
  454. */
  455. static void atapi_remote_device_resume_done(void *_dev)
  456. {
  457. struct isci_remote_device *idev = _dev;
  458. struct isci_request *ireq = idev->working_request;
  459. sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
  460. }
  461. enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
  462. u32 event_code)
  463. {
  464. enum sci_status status;
  465. switch (scu_get_event_type(event_code)) {
  466. case SCU_EVENT_TYPE_RNC_OPS_MISC:
  467. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  468. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  469. status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
  470. break;
  471. case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
  472. if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
  473. status = SCI_SUCCESS;
  474. /* Suspend the associated RNC */
  475. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
  476. dev_dbg(scirdev_to_dev(idev),
  477. "%s: device: %p event code: %x: %s\n",
  478. __func__, idev, event_code,
  479. is_remote_device_ready(idev)
  480. ? "I_T_Nexus_Timeout event"
  481. : "I_T_Nexus_Timeout event in wrong state");
  482. break;
  483. }
  484. /* Else, fall through and treat as unhandled... */
  485. default:
  486. dev_dbg(scirdev_to_dev(idev),
  487. "%s: device: %p event code: %x: %s\n",
  488. __func__, idev, event_code,
  489. is_remote_device_ready(idev)
  490. ? "unexpected event"
  491. : "unexpected event in wrong state");
  492. status = SCI_FAILURE_INVALID_STATE;
  493. break;
  494. }
  495. if (status != SCI_SUCCESS)
  496. return status;
  497. return status;
  498. }
  499. static void sci_remote_device_start_request(struct isci_remote_device *idev,
  500. struct isci_request *ireq,
  501. enum sci_status status)
  502. {
  503. struct isci_port *iport = idev->owning_port;
  504. /* cleanup requests that failed after starting on the port */
  505. if (status != SCI_SUCCESS)
  506. sci_port_complete_io(iport, idev, ireq);
  507. else {
  508. kref_get(&idev->kref);
  509. idev->started_request_count++;
  510. }
  511. }
  512. enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
  513. struct isci_remote_device *idev,
  514. struct isci_request *ireq)
  515. {
  516. struct sci_base_state_machine *sm = &idev->sm;
  517. enum sci_remote_device_states state = sm->current_state_id;
  518. struct isci_port *iport = idev->owning_port;
  519. enum sci_status status;
  520. switch (state) {
  521. case SCI_DEV_INITIAL:
  522. case SCI_DEV_STOPPED:
  523. case SCI_DEV_STARTING:
  524. case SCI_STP_DEV_NCQ_ERROR:
  525. case SCI_DEV_STOPPING:
  526. case SCI_DEV_FAILED:
  527. case SCI_DEV_RESETTING:
  528. case SCI_DEV_FINAL:
  529. default:
  530. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  531. __func__, dev_state_name(state));
  532. return SCI_FAILURE_INVALID_STATE;
  533. case SCI_DEV_READY:
  534. /* attempt to start an io request for this device object. The remote
  535. * device object will issue the start request for the io and if
  536. * successful it will start the request for the port object then
  537. * increment its own request count.
  538. */
  539. status = sci_port_start_io(iport, idev, ireq);
  540. if (status != SCI_SUCCESS)
  541. return status;
  542. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  543. if (status != SCI_SUCCESS)
  544. break;
  545. status = sci_request_start(ireq);
  546. break;
  547. case SCI_STP_DEV_IDLE: {
  548. /* handle the start io operation for a sata device that is in
  549. * the command idle state. - Evalute the type of IO request to
  550. * be started - If its an NCQ request change to NCQ substate -
  551. * If its any other command change to the CMD substate
  552. *
  553. * If this is a softreset we may want to have a different
  554. * substate.
  555. */
  556. enum sci_remote_device_states new_state;
  557. struct sas_task *task = isci_request_access_task(ireq);
  558. status = sci_port_start_io(iport, idev, ireq);
  559. if (status != SCI_SUCCESS)
  560. return status;
  561. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  562. if (status != SCI_SUCCESS)
  563. break;
  564. status = sci_request_start(ireq);
  565. if (status != SCI_SUCCESS)
  566. break;
  567. if (task->ata_task.use_ncq)
  568. new_state = SCI_STP_DEV_NCQ;
  569. else {
  570. idev->working_request = ireq;
  571. new_state = SCI_STP_DEV_CMD;
  572. }
  573. sci_change_state(sm, new_state);
  574. break;
  575. }
  576. case SCI_STP_DEV_NCQ: {
  577. struct sas_task *task = isci_request_access_task(ireq);
  578. if (task->ata_task.use_ncq) {
  579. status = sci_port_start_io(iport, idev, ireq);
  580. if (status != SCI_SUCCESS)
  581. return status;
  582. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  583. if (status != SCI_SUCCESS)
  584. break;
  585. status = sci_request_start(ireq);
  586. } else
  587. return SCI_FAILURE_INVALID_STATE;
  588. break;
  589. }
  590. case SCI_STP_DEV_AWAIT_RESET:
  591. return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
  592. case SCI_SMP_DEV_IDLE:
  593. status = sci_port_start_io(iport, idev, ireq);
  594. if (status != SCI_SUCCESS)
  595. return status;
  596. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  597. if (status != SCI_SUCCESS)
  598. break;
  599. status = sci_request_start(ireq);
  600. if (status != SCI_SUCCESS)
  601. break;
  602. idev->working_request = ireq;
  603. sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
  604. break;
  605. case SCI_STP_DEV_CMD:
  606. case SCI_SMP_DEV_CMD:
  607. /* device is already handling a command it can not accept new commands
  608. * until this one is complete.
  609. */
  610. return SCI_FAILURE_INVALID_STATE;
  611. }
  612. sci_remote_device_start_request(idev, ireq, status);
  613. return status;
  614. }
  615. static enum sci_status common_complete_io(struct isci_port *iport,
  616. struct isci_remote_device *idev,
  617. struct isci_request *ireq)
  618. {
  619. enum sci_status status;
  620. status = sci_request_complete(ireq);
  621. if (status != SCI_SUCCESS)
  622. return status;
  623. status = sci_port_complete_io(iport, idev, ireq);
  624. if (status != SCI_SUCCESS)
  625. return status;
  626. sci_remote_device_decrement_request_count(idev);
  627. return status;
  628. }
  629. enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
  630. struct isci_remote_device *idev,
  631. struct isci_request *ireq)
  632. {
  633. struct sci_base_state_machine *sm = &idev->sm;
  634. enum sci_remote_device_states state = sm->current_state_id;
  635. struct isci_port *iport = idev->owning_port;
  636. enum sci_status status;
  637. switch (state) {
  638. case SCI_DEV_INITIAL:
  639. case SCI_DEV_STOPPED:
  640. case SCI_DEV_STARTING:
  641. case SCI_STP_DEV_IDLE:
  642. case SCI_SMP_DEV_IDLE:
  643. case SCI_DEV_FAILED:
  644. case SCI_DEV_FINAL:
  645. default:
  646. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  647. __func__, dev_state_name(state));
  648. return SCI_FAILURE_INVALID_STATE;
  649. case SCI_DEV_READY:
  650. case SCI_STP_DEV_AWAIT_RESET:
  651. case SCI_DEV_RESETTING:
  652. status = common_complete_io(iport, idev, ireq);
  653. break;
  654. case SCI_STP_DEV_CMD:
  655. case SCI_STP_DEV_NCQ:
  656. case SCI_STP_DEV_NCQ_ERROR:
  657. case SCI_STP_DEV_ATAPI_ERROR:
  658. status = common_complete_io(iport, idev, ireq);
  659. if (status != SCI_SUCCESS)
  660. break;
  661. if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
  662. /* This request causes hardware error, device needs to be Lun Reset.
  663. * So here we force the state machine to IDLE state so the rest IOs
  664. * can reach RNC state handler, these IOs will be completed by RNC with
  665. * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
  666. */
  667. sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
  668. } else if (idev->started_request_count == 0)
  669. sci_change_state(sm, SCI_STP_DEV_IDLE);
  670. break;
  671. case SCI_SMP_DEV_CMD:
  672. status = common_complete_io(iport, idev, ireq);
  673. if (status != SCI_SUCCESS)
  674. break;
  675. sci_change_state(sm, SCI_SMP_DEV_IDLE);
  676. break;
  677. case SCI_DEV_STOPPING:
  678. status = common_complete_io(iport, idev, ireq);
  679. if (status != SCI_SUCCESS)
  680. break;
  681. if (idev->started_request_count == 0)
  682. sci_remote_node_context_destruct(&idev->rnc,
  683. rnc_destruct_done,
  684. idev);
  685. break;
  686. }
  687. if (status != SCI_SUCCESS)
  688. dev_err(scirdev_to_dev(idev),
  689. "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
  690. "could not complete\n", __func__, iport,
  691. idev, ireq, status);
  692. else
  693. isci_put_device(idev);
  694. return status;
  695. }
  696. static void sci_remote_device_continue_request(void *dev)
  697. {
  698. struct isci_remote_device *idev = dev;
  699. /* we need to check if this request is still valid to continue. */
  700. if (idev->working_request)
  701. sci_controller_continue_io(idev->working_request);
  702. }
  703. enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
  704. struct isci_remote_device *idev,
  705. struct isci_request *ireq)
  706. {
  707. struct sci_base_state_machine *sm = &idev->sm;
  708. enum sci_remote_device_states state = sm->current_state_id;
  709. struct isci_port *iport = idev->owning_port;
  710. enum sci_status status;
  711. switch (state) {
  712. case SCI_DEV_INITIAL:
  713. case SCI_DEV_STOPPED:
  714. case SCI_DEV_STARTING:
  715. case SCI_SMP_DEV_IDLE:
  716. case SCI_SMP_DEV_CMD:
  717. case SCI_DEV_STOPPING:
  718. case SCI_DEV_FAILED:
  719. case SCI_DEV_RESETTING:
  720. case SCI_DEV_FINAL:
  721. default:
  722. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  723. __func__, dev_state_name(state));
  724. return SCI_FAILURE_INVALID_STATE;
  725. case SCI_STP_DEV_IDLE:
  726. case SCI_STP_DEV_CMD:
  727. case SCI_STP_DEV_NCQ:
  728. case SCI_STP_DEV_NCQ_ERROR:
  729. case SCI_STP_DEV_AWAIT_RESET:
  730. status = sci_port_start_io(iport, idev, ireq);
  731. if (status != SCI_SUCCESS)
  732. return status;
  733. status = sci_request_start(ireq);
  734. if (status != SCI_SUCCESS)
  735. goto out;
  736. /* Note: If the remote device state is not IDLE this will
  737. * replace the request that probably resulted in the task
  738. * management request.
  739. */
  740. idev->working_request = ireq;
  741. sci_change_state(sm, SCI_STP_DEV_CMD);
  742. /* The remote node context must cleanup the TCi to NCQ mapping
  743. * table. The only way to do this correctly is to either write
  744. * to the TLCR register or to invalidate and repost the RNC. In
  745. * either case the remote node context state machine will take
  746. * the correct action when the remote node context is suspended
  747. * and later resumed.
  748. */
  749. sci_remote_device_suspend(idev,
  750. SCI_SW_SUSPEND_LINKHANG_DETECT);
  751. status = sci_remote_node_context_start_task(&idev->rnc, ireq,
  752. sci_remote_device_continue_request, idev);
  753. out:
  754. sci_remote_device_start_request(idev, ireq, status);
  755. /* We need to let the controller start request handler know that
  756. * it can't post TC yet. We will provide a callback function to
  757. * post TC when RNC gets resumed.
  758. */
  759. return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
  760. case SCI_DEV_READY:
  761. status = sci_port_start_io(iport, idev, ireq);
  762. if (status != SCI_SUCCESS)
  763. return status;
  764. /* Resume the RNC as needed: */
  765. status = sci_remote_node_context_start_task(&idev->rnc, ireq,
  766. NULL, NULL);
  767. if (status != SCI_SUCCESS)
  768. break;
  769. status = sci_request_start(ireq);
  770. break;
  771. }
  772. sci_remote_device_start_request(idev, ireq, status);
  773. return status;
  774. }
  775. void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
  776. {
  777. struct isci_port *iport = idev->owning_port;
  778. u32 context;
  779. context = request |
  780. (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  781. (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  782. idev->rnc.remote_node_index;
  783. sci_controller_post_request(iport->owning_controller, context);
  784. }
  785. /* called once the remote node context has transisitioned to a
  786. * ready state. This is the indication that the remote device object can also
  787. * transition to ready.
  788. */
  789. static void remote_device_resume_done(void *_dev)
  790. {
  791. struct isci_remote_device *idev = _dev;
  792. if (is_remote_device_ready(idev))
  793. return;
  794. /* go 'ready' if we are not already in a ready state */
  795. sci_change_state(&idev->sm, SCI_DEV_READY);
  796. }
  797. static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
  798. {
  799. struct isci_remote_device *idev = _dev;
  800. struct isci_host *ihost = idev->owning_port->owning_controller;
  801. /* For NCQ operation we do not issue a isci_remote_device_not_ready().
  802. * As a result, avoid sending the ready notification.
  803. */
  804. if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
  805. isci_remote_device_ready(ihost, idev);
  806. }
  807. static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
  808. {
  809. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  810. /* Initial state is a transitional state to the stopped state */
  811. sci_change_state(&idev->sm, SCI_DEV_STOPPED);
  812. }
  813. /**
  814. * sci_remote_device_destruct() - free remote node context and destruct
  815. * @remote_device: This parameter specifies the remote device to be destructed.
  816. *
  817. * Remote device objects are a limited resource. As such, they must be
  818. * protected. Thus calls to construct and destruct are mutually exclusive and
  819. * non-reentrant. The return value shall indicate if the device was
  820. * successfully destructed or if some failure occurred. enum sci_status This value
  821. * is returned if the device is successfully destructed.
  822. * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
  823. * device isn't valid (e.g. it's already been destoryed, the handle isn't
  824. * valid, etc.).
  825. */
  826. static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
  827. {
  828. struct sci_base_state_machine *sm = &idev->sm;
  829. enum sci_remote_device_states state = sm->current_state_id;
  830. struct isci_host *ihost;
  831. if (state != SCI_DEV_STOPPED) {
  832. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  833. __func__, dev_state_name(state));
  834. return SCI_FAILURE_INVALID_STATE;
  835. }
  836. ihost = idev->owning_port->owning_controller;
  837. sci_controller_free_remote_node_context(ihost, idev,
  838. idev->rnc.remote_node_index);
  839. idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
  840. sci_change_state(sm, SCI_DEV_FINAL);
  841. return SCI_SUCCESS;
  842. }
  843. /**
  844. * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
  845. * @ihost: This parameter specifies the isci host object.
  846. * @idev: This parameter specifies the remote device to be freed.
  847. *
  848. */
  849. static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
  850. {
  851. dev_dbg(&ihost->pdev->dev,
  852. "%s: isci_device = %p\n", __func__, idev);
  853. /* There should not be any outstanding io's. All paths to
  854. * here should go through isci_remote_device_nuke_requests.
  855. * If we hit this condition, we will need a way to complete
  856. * io requests in process */
  857. BUG_ON(idev->started_request_count > 0);
  858. sci_remote_device_destruct(idev);
  859. list_del_init(&idev->node);
  860. isci_put_device(idev);
  861. }
  862. static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
  863. {
  864. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  865. struct isci_host *ihost = idev->owning_port->owning_controller;
  866. u32 prev_state;
  867. /* If we are entering from the stopping state let the SCI User know that
  868. * the stop operation has completed.
  869. */
  870. prev_state = idev->sm.previous_state_id;
  871. if (prev_state == SCI_DEV_STOPPING)
  872. isci_remote_device_deconstruct(ihost, idev);
  873. sci_controller_remote_device_stopped(ihost, idev);
  874. }
  875. static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
  876. {
  877. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  878. struct isci_host *ihost = idev->owning_port->owning_controller;
  879. isci_remote_device_not_ready(ihost, idev,
  880. SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
  881. }
  882. static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
  883. {
  884. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  885. struct isci_host *ihost = idev->owning_port->owning_controller;
  886. struct domain_device *dev = idev->domain_dev;
  887. if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
  888. sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
  889. } else if (dev_is_expander(dev)) {
  890. sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
  891. } else
  892. isci_remote_device_ready(ihost, idev);
  893. }
  894. static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
  895. {
  896. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  897. struct domain_device *dev = idev->domain_dev;
  898. if (dev->dev_type == SAS_END_DEV) {
  899. struct isci_host *ihost = idev->owning_port->owning_controller;
  900. isci_remote_device_not_ready(ihost, idev,
  901. SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
  902. }
  903. }
  904. static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
  905. {
  906. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  907. struct isci_host *ihost = idev->owning_port->owning_controller;
  908. dev_dbg(&ihost->pdev->dev,
  909. "%s: isci_device = %p\n", __func__, idev);
  910. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
  911. }
  912. static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
  913. {
  914. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  915. struct isci_host *ihost = idev->owning_port->owning_controller;
  916. dev_dbg(&ihost->pdev->dev,
  917. "%s: isci_device = %p\n", __func__, idev);
  918. sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
  919. }
  920. static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
  921. {
  922. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  923. idev->working_request = NULL;
  924. if (sci_remote_node_context_is_ready(&idev->rnc)) {
  925. /*
  926. * Since the RNC is ready, it's alright to finish completion
  927. * processing (e.g. signal the remote device is ready). */
  928. sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
  929. } else {
  930. sci_remote_node_context_resume(&idev->rnc,
  931. sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
  932. idev);
  933. }
  934. }
  935. static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
  936. {
  937. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  938. struct isci_host *ihost = idev->owning_port->owning_controller;
  939. BUG_ON(idev->working_request == NULL);
  940. isci_remote_device_not_ready(ihost, idev,
  941. SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
  942. }
  943. static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
  944. {
  945. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  946. struct isci_host *ihost = idev->owning_port->owning_controller;
  947. if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
  948. isci_remote_device_not_ready(ihost, idev,
  949. idev->not_ready_reason);
  950. }
  951. static void sci_stp_remote_device_atapi_error_substate_enter(
  952. struct sci_base_state_machine *sm)
  953. {
  954. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  955. /* This state is entered when an I/O is decoded with an error
  956. * condition. By this point the RNC expected suspension state is set.
  957. * The error conditions suspend the device, so unsuspend here if
  958. * possible.
  959. */
  960. sci_remote_node_context_resume(&idev->rnc,
  961. atapi_remote_device_resume_done,
  962. idev);
  963. }
  964. static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
  965. {
  966. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  967. struct isci_host *ihost = idev->owning_port->owning_controller;
  968. isci_remote_device_ready(ihost, idev);
  969. }
  970. static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
  971. {
  972. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  973. struct isci_host *ihost = idev->owning_port->owning_controller;
  974. BUG_ON(idev->working_request == NULL);
  975. isci_remote_device_not_ready(ihost, idev,
  976. SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
  977. }
  978. static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
  979. {
  980. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  981. idev->working_request = NULL;
  982. }
  983. static const struct sci_base_state sci_remote_device_state_table[] = {
  984. [SCI_DEV_INITIAL] = {
  985. .enter_state = sci_remote_device_initial_state_enter,
  986. },
  987. [SCI_DEV_STOPPED] = {
  988. .enter_state = sci_remote_device_stopped_state_enter,
  989. },
  990. [SCI_DEV_STARTING] = {
  991. .enter_state = sci_remote_device_starting_state_enter,
  992. },
  993. [SCI_DEV_READY] = {
  994. .enter_state = sci_remote_device_ready_state_enter,
  995. .exit_state = sci_remote_device_ready_state_exit
  996. },
  997. [SCI_STP_DEV_IDLE] = {
  998. .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
  999. },
  1000. [SCI_STP_DEV_CMD] = {
  1001. .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
  1002. },
  1003. [SCI_STP_DEV_NCQ] = { },
  1004. [SCI_STP_DEV_NCQ_ERROR] = {
  1005. .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
  1006. },
  1007. [SCI_STP_DEV_ATAPI_ERROR] = {
  1008. .enter_state = sci_stp_remote_device_atapi_error_substate_enter,
  1009. },
  1010. [SCI_STP_DEV_AWAIT_RESET] = { },
  1011. [SCI_SMP_DEV_IDLE] = {
  1012. .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
  1013. },
  1014. [SCI_SMP_DEV_CMD] = {
  1015. .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
  1016. .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
  1017. },
  1018. [SCI_DEV_STOPPING] = { },
  1019. [SCI_DEV_FAILED] = { },
  1020. [SCI_DEV_RESETTING] = {
  1021. .enter_state = sci_remote_device_resetting_state_enter,
  1022. .exit_state = sci_remote_device_resetting_state_exit
  1023. },
  1024. [SCI_DEV_FINAL] = { },
  1025. };
  1026. /**
  1027. * sci_remote_device_construct() - common construction
  1028. * @sci_port: SAS/SATA port through which this device is accessed.
  1029. * @sci_dev: remote device to construct
  1030. *
  1031. * This routine just performs benign initialization and does not
  1032. * allocate the remote_node_context which is left to
  1033. * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
  1034. * frees the remote_node_context(s) for the device.
  1035. */
  1036. static void sci_remote_device_construct(struct isci_port *iport,
  1037. struct isci_remote_device *idev)
  1038. {
  1039. idev->owning_port = iport;
  1040. idev->started_request_count = 0;
  1041. sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
  1042. sci_remote_node_context_construct(&idev->rnc,
  1043. SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
  1044. }
  1045. /**
  1046. * sci_remote_device_da_construct() - construct direct attached device.
  1047. *
  1048. * The information (e.g. IAF, Signature FIS, etc.) necessary to build
  1049. * the device is known to the SCI Core since it is contained in the
  1050. * sci_phy object. Remote node context(s) is/are a global resource
  1051. * allocated by this routine, freed by sci_remote_device_destruct().
  1052. *
  1053. * Returns:
  1054. * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
  1055. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
  1056. * sata-only controller instance.
  1057. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
  1058. */
  1059. static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
  1060. struct isci_remote_device *idev)
  1061. {
  1062. enum sci_status status;
  1063. struct sci_port_properties properties;
  1064. sci_remote_device_construct(iport, idev);
  1065. sci_port_get_properties(iport, &properties);
  1066. /* Get accurate port width from port's phy mask for a DA device. */
  1067. idev->device_port_width = hweight32(properties.phy_mask);
  1068. status = sci_controller_allocate_remote_node_context(iport->owning_controller,
  1069. idev,
  1070. &idev->rnc.remote_node_index);
  1071. if (status != SCI_SUCCESS)
  1072. return status;
  1073. idev->connection_rate = sci_port_get_max_allowed_speed(iport);
  1074. return SCI_SUCCESS;
  1075. }
  1076. /**
  1077. * sci_remote_device_ea_construct() - construct expander attached device
  1078. *
  1079. * Remote node context(s) is/are a global resource allocated by this
  1080. * routine, freed by sci_remote_device_destruct().
  1081. *
  1082. * Returns:
  1083. * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
  1084. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
  1085. * sata-only controller instance.
  1086. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
  1087. */
  1088. static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
  1089. struct isci_remote_device *idev)
  1090. {
  1091. struct domain_device *dev = idev->domain_dev;
  1092. enum sci_status status;
  1093. sci_remote_device_construct(iport, idev);
  1094. status = sci_controller_allocate_remote_node_context(iport->owning_controller,
  1095. idev,
  1096. &idev->rnc.remote_node_index);
  1097. if (status != SCI_SUCCESS)
  1098. return status;
  1099. /* For SAS-2 the physical link rate is actually a logical link
  1100. * rate that incorporates multiplexing. The SCU doesn't
  1101. * incorporate multiplexing and for the purposes of the
  1102. * connection the logical link rate is that same as the
  1103. * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
  1104. * one another, so this code works for both situations.
  1105. */
  1106. idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
  1107. dev->linkrate);
  1108. /* / @todo Should I assign the port width by reading all of the phys on the port? */
  1109. idev->device_port_width = 1;
  1110. return SCI_SUCCESS;
  1111. }
  1112. enum sci_status sci_remote_device_resume(
  1113. struct isci_remote_device *idev,
  1114. scics_sds_remote_node_context_callback cb_fn,
  1115. void *cb_p)
  1116. {
  1117. enum sci_status status;
  1118. status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
  1119. if (status != SCI_SUCCESS)
  1120. dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
  1121. __func__, status);
  1122. return status;
  1123. }
  1124. static void isci_remote_device_resume_from_abort_complete(void *cbparam)
  1125. {
  1126. struct isci_remote_device *idev = cbparam;
  1127. struct isci_host *ihost = idev->owning_port->owning_controller;
  1128. scics_sds_remote_node_context_callback abort_resume_cb =
  1129. idev->abort_resume_cb;
  1130. dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
  1131. __func__, abort_resume_cb);
  1132. if (abort_resume_cb != NULL) {
  1133. idev->abort_resume_cb = NULL;
  1134. abort_resume_cb(idev->abort_resume_cbparam);
  1135. }
  1136. clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
  1137. wake_up(&ihost->eventq);
  1138. }
  1139. void isci_remote_device_wait_for_resume_from_abort(
  1140. struct isci_host *ihost,
  1141. struct isci_remote_device *idev)
  1142. {
  1143. dev_dbg(scirdev_to_dev(idev), "%s: starting resume wait: %p\n",
  1144. __func__, idev);
  1145. #define MAX_RESUME_MSECS 5
  1146. if (!wait_event_timeout(ihost->eventq,
  1147. (!test_bit(IDEV_ABORT_PATH_RESUME_PENDING,
  1148. &idev->flags)
  1149. || test_bit(IDEV_STOP_PENDING, &idev->flags)),
  1150. msecs_to_jiffies(MAX_RESUME_MSECS))) {
  1151. dev_warn(scirdev_to_dev(idev), "%s: #### Timeout waiting for "
  1152. "resume: %p\n", __func__, idev);
  1153. }
  1154. clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
  1155. dev_dbg(scirdev_to_dev(idev), "%s: resume wait done: %p\n",
  1156. __func__, idev);
  1157. }
  1158. enum sci_status isci_remote_device_resume_from_abort(
  1159. struct isci_host *ihost,
  1160. struct isci_remote_device *idev)
  1161. {
  1162. unsigned long flags;
  1163. enum sci_status status;
  1164. spin_lock_irqsave(&ihost->scic_lock, flags);
  1165. /* Preserve any current resume callbacks, for instance from other
  1166. * resumptions.
  1167. */
  1168. idev->abort_resume_cb = idev->rnc.user_callback;
  1169. idev->abort_resume_cbparam = idev->rnc.user_cookie;
  1170. set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
  1171. clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
  1172. status = sci_remote_device_resume(
  1173. idev, isci_remote_device_resume_from_abort_complete,
  1174. idev);
  1175. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1176. isci_remote_device_wait_for_resume_from_abort(ihost, idev);
  1177. return status;
  1178. }
  1179. /**
  1180. * sci_remote_device_start() - This method will start the supplied remote
  1181. * device. This method enables normal IO requests to flow through to the
  1182. * remote device.
  1183. * @remote_device: This parameter specifies the device to be started.
  1184. * @timeout: This parameter specifies the number of milliseconds in which the
  1185. * start operation should complete.
  1186. *
  1187. * An indication of whether the device was successfully started. SCI_SUCCESS
  1188. * This value is returned if the device was successfully started.
  1189. * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
  1190. * the device when there have been no phys added to it.
  1191. */
  1192. static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
  1193. u32 timeout)
  1194. {
  1195. struct sci_base_state_machine *sm = &idev->sm;
  1196. enum sci_remote_device_states state = sm->current_state_id;
  1197. enum sci_status status;
  1198. if (state != SCI_DEV_STOPPED) {
  1199. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  1200. __func__, dev_state_name(state));
  1201. return SCI_FAILURE_INVALID_STATE;
  1202. }
  1203. status = sci_remote_device_resume(idev, remote_device_resume_done,
  1204. idev);
  1205. if (status != SCI_SUCCESS)
  1206. return status;
  1207. sci_change_state(sm, SCI_DEV_STARTING);
  1208. return SCI_SUCCESS;
  1209. }
  1210. static enum sci_status isci_remote_device_construct(struct isci_port *iport,
  1211. struct isci_remote_device *idev)
  1212. {
  1213. struct isci_host *ihost = iport->isci_host;
  1214. struct domain_device *dev = idev->domain_dev;
  1215. enum sci_status status;
  1216. if (dev->parent && dev_is_expander(dev->parent))
  1217. status = sci_remote_device_ea_construct(iport, idev);
  1218. else
  1219. status = sci_remote_device_da_construct(iport, idev);
  1220. if (status != SCI_SUCCESS) {
  1221. dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
  1222. __func__, status);
  1223. return status;
  1224. }
  1225. /* start the device. */
  1226. status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
  1227. if (status != SCI_SUCCESS)
  1228. dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
  1229. status);
  1230. return status;
  1231. }
  1232. /**
  1233. * This function builds the isci_remote_device when a libsas dev_found message
  1234. * is received.
  1235. * @isci_host: This parameter specifies the isci host object.
  1236. * @port: This parameter specifies the isci_port conected to this device.
  1237. *
  1238. * pointer to new isci_remote_device.
  1239. */
  1240. static struct isci_remote_device *
  1241. isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
  1242. {
  1243. struct isci_remote_device *idev;
  1244. int i;
  1245. for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
  1246. idev = &ihost->devices[i];
  1247. if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
  1248. break;
  1249. }
  1250. if (i >= SCI_MAX_REMOTE_DEVICES) {
  1251. dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
  1252. return NULL;
  1253. }
  1254. if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
  1255. return NULL;
  1256. return idev;
  1257. }
  1258. void isci_remote_device_release(struct kref *kref)
  1259. {
  1260. struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
  1261. struct isci_host *ihost = idev->isci_port->isci_host;
  1262. idev->domain_dev = NULL;
  1263. idev->isci_port = NULL;
  1264. clear_bit(IDEV_START_PENDING, &idev->flags);
  1265. clear_bit(IDEV_STOP_PENDING, &idev->flags);
  1266. clear_bit(IDEV_IO_READY, &idev->flags);
  1267. clear_bit(IDEV_GONE, &idev->flags);
  1268. smp_mb__before_clear_bit();
  1269. clear_bit(IDEV_ALLOCATED, &idev->flags);
  1270. wake_up(&ihost->eventq);
  1271. }
  1272. /**
  1273. * isci_remote_device_stop() - This function is called internally to stop the
  1274. * remote device.
  1275. * @isci_host: This parameter specifies the isci host object.
  1276. * @isci_device: This parameter specifies the remote device.
  1277. *
  1278. * The status of the ihost request to stop.
  1279. */
  1280. enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
  1281. {
  1282. enum sci_status status;
  1283. unsigned long flags;
  1284. dev_dbg(&ihost->pdev->dev,
  1285. "%s: isci_device = %p\n", __func__, idev);
  1286. spin_lock_irqsave(&ihost->scic_lock, flags);
  1287. idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
  1288. set_bit(IDEV_GONE, &idev->flags);
  1289. set_bit(IDEV_STOP_PENDING, &idev->flags);
  1290. status = sci_remote_device_stop(idev, 50);
  1291. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1292. /* Wait for the stop complete callback. */
  1293. if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
  1294. /* nothing to wait for */;
  1295. else
  1296. wait_for_device_stop(ihost, idev);
  1297. dev_dbg(&ihost->pdev->dev,
  1298. "%s: isci_device = %p, waiting done.\n", __func__, idev);
  1299. return status;
  1300. }
  1301. /**
  1302. * isci_remote_device_gone() - This function is called by libsas when a domain
  1303. * device is removed.
  1304. * @domain_device: This parameter specifies the libsas domain device.
  1305. *
  1306. */
  1307. void isci_remote_device_gone(struct domain_device *dev)
  1308. {
  1309. struct isci_host *ihost = dev_to_ihost(dev);
  1310. struct isci_remote_device *idev = dev->lldd_dev;
  1311. dev_dbg(&ihost->pdev->dev,
  1312. "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
  1313. __func__, dev, idev, idev->isci_port);
  1314. isci_remote_device_stop(ihost, idev);
  1315. }
  1316. /**
  1317. * isci_remote_device_found() - This function is called by libsas when a remote
  1318. * device is discovered. A remote device object is created and started. the
  1319. * function then sleeps until the sci core device started message is
  1320. * received.
  1321. * @domain_device: This parameter specifies the libsas domain device.
  1322. *
  1323. * status, zero indicates success.
  1324. */
  1325. int isci_remote_device_found(struct domain_device *dev)
  1326. {
  1327. struct isci_host *isci_host = dev_to_ihost(dev);
  1328. struct isci_port *isci_port = dev->port->lldd_port;
  1329. struct isci_remote_device *isci_device;
  1330. enum sci_status status;
  1331. dev_dbg(&isci_host->pdev->dev,
  1332. "%s: domain_device = %p\n", __func__, dev);
  1333. if (!isci_port)
  1334. return -ENODEV;
  1335. isci_device = isci_remote_device_alloc(isci_host, isci_port);
  1336. if (!isci_device)
  1337. return -ENODEV;
  1338. kref_init(&isci_device->kref);
  1339. INIT_LIST_HEAD(&isci_device->node);
  1340. spin_lock_irq(&isci_host->scic_lock);
  1341. isci_device->domain_dev = dev;
  1342. isci_device->isci_port = isci_port;
  1343. list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
  1344. set_bit(IDEV_START_PENDING, &isci_device->flags);
  1345. status = isci_remote_device_construct(isci_port, isci_device);
  1346. dev_dbg(&isci_host->pdev->dev,
  1347. "%s: isci_device = %p\n",
  1348. __func__, isci_device);
  1349. if (status == SCI_SUCCESS) {
  1350. /* device came up, advertise it to the world */
  1351. dev->lldd_dev = isci_device;
  1352. } else
  1353. isci_put_device(isci_device);
  1354. spin_unlock_irq(&isci_host->scic_lock);
  1355. /* wait for the device ready callback. */
  1356. wait_for_device_start(isci_host, isci_device);
  1357. return status == SCI_SUCCESS ? 0 : -ENODEV;
  1358. }
  1359. enum sci_status isci_remote_device_suspend_terminate(
  1360. struct isci_host *ihost,
  1361. struct isci_remote_device *idev,
  1362. struct isci_request *ireq)
  1363. {
  1364. unsigned long flags;
  1365. enum sci_status status;
  1366. /* Put the device into suspension. */
  1367. spin_lock_irqsave(&ihost->scic_lock, flags);
  1368. set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
  1369. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
  1370. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1371. /* Terminate and wait for the completions. */
  1372. status = isci_remote_device_terminate_requests(ihost, idev, ireq);
  1373. if (status != SCI_SUCCESS)
  1374. dev_dbg(&ihost->pdev->dev,
  1375. "%s: isci_remote_device_terminate_requests(%p) "
  1376. "returned %d!\n",
  1377. __func__, idev, status);
  1378. /* NOTE: RNC resumption is left to the caller! */
  1379. return status;
  1380. }
  1381. int isci_remote_device_is_safe_to_abort(
  1382. struct isci_remote_device *idev)
  1383. {
  1384. return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
  1385. }
  1386. enum sci_status sci_remote_device_abort_requests_pending_abort(
  1387. struct isci_remote_device *idev)
  1388. {
  1389. return sci_remote_device_terminate_reqs_checkabort(idev, 1);
  1390. }
  1391. enum sci_status isci_remote_device_reset_complete(
  1392. struct isci_host *ihost,
  1393. struct isci_remote_device *idev)
  1394. {
  1395. unsigned long flags;
  1396. enum sci_status status;
  1397. spin_lock_irqsave(&ihost->scic_lock, flags);
  1398. status = sci_remote_device_reset_complete(idev);
  1399. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1400. return status;
  1401. }
  1402. void isci_dev_set_hang_detection_timeout(
  1403. struct isci_remote_device *idev,
  1404. u32 timeout)
  1405. {
  1406. if (dev_is_sata(idev->domain_dev)) {
  1407. if (timeout) {
  1408. if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
  1409. &idev->flags))
  1410. return; /* Already enabled. */
  1411. } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
  1412. &idev->flags))
  1413. return; /* Not enabled. */
  1414. sci_port_set_hang_detection_timeout(idev->owning_port,
  1415. timeout);
  1416. }
  1417. }