remote_device.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas.h>
  56. #include <linux/bitops.h>
  57. #include "isci.h"
  58. #include "port.h"
  59. #include "remote_device.h"
  60. #include "request.h"
  61. #include "remote_node_context.h"
  62. #include "scu_event_codes.h"
  63. #include "task.h"
  64. #undef C
  65. #define C(a) (#a)
  66. const char *dev_state_name(enum sci_remote_device_states state)
  67. {
  68. static const char * const strings[] = REMOTE_DEV_STATES;
  69. return strings[state];
  70. }
  71. #undef C
  72. static enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
  73. enum sci_remote_node_suspension_reasons reason)
  74. {
  75. return sci_remote_node_context_suspend(&idev->rnc, reason,
  76. SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  77. }
  78. /**
  79. * isci_remote_device_ready() - This function is called by the ihost when the
  80. * remote device is ready. We mark the isci device as ready and signal the
  81. * waiting proccess.
  82. * @ihost: our valid isci_host
  83. * @idev: remote device
  84. *
  85. */
  86. static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
  87. {
  88. dev_dbg(&ihost->pdev->dev,
  89. "%s: idev = %p\n", __func__, idev);
  90. clear_bit(IDEV_IO_NCQERROR, &idev->flags);
  91. set_bit(IDEV_IO_READY, &idev->flags);
  92. if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
  93. wake_up(&ihost->eventq);
  94. }
  95. static enum sci_status sci_remote_device_terminate_req(
  96. struct isci_host *ihost,
  97. struct isci_remote_device *idev,
  98. int check_abort,
  99. struct isci_request *ireq)
  100. {
  101. if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
  102. (ireq->target_device != idev) ||
  103. (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
  104. return SCI_SUCCESS;
  105. dev_dbg(&ihost->pdev->dev,
  106. "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
  107. __func__, idev, idev->flags, ireq, ireq->target_device);
  108. set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
  109. return sci_controller_terminate_request(ihost, idev, ireq);
  110. }
  111. static enum sci_status sci_remote_device_terminate_reqs_checkabort(
  112. struct isci_remote_device *idev,
  113. int chk)
  114. {
  115. struct isci_host *ihost = idev->owning_port->owning_controller;
  116. enum sci_status status = SCI_SUCCESS;
  117. u32 i;
  118. for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
  119. struct isci_request *ireq = ihost->reqs[i];
  120. enum sci_status s;
  121. s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
  122. if (s != SCI_SUCCESS)
  123. status = s;
  124. }
  125. return status;
  126. }
  127. static bool isci_compare_suspendcount(
  128. struct isci_remote_device *idev,
  129. u32 localcount)
  130. {
  131. smp_rmb();
  132. return localcount != idev->rnc.suspend_count;
  133. }
  134. static bool isci_check_reqterm(
  135. struct isci_host *ihost,
  136. struct isci_remote_device *idev,
  137. struct isci_request *ireq,
  138. u32 localcount)
  139. {
  140. unsigned long flags;
  141. bool res;
  142. spin_lock_irqsave(&ihost->scic_lock, flags);
  143. res = isci_compare_suspendcount(idev, localcount)
  144. && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
  145. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  146. return res;
  147. }
  148. static bool isci_check_devempty(
  149. struct isci_host *ihost,
  150. struct isci_remote_device *idev,
  151. u32 localcount)
  152. {
  153. unsigned long flags;
  154. bool res;
  155. spin_lock_irqsave(&ihost->scic_lock, flags);
  156. res = isci_compare_suspendcount(idev, localcount)
  157. && idev->started_request_count == 0;
  158. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  159. return res;
  160. }
  161. enum sci_status isci_remote_device_terminate_requests(
  162. struct isci_host *ihost,
  163. struct isci_remote_device *idev,
  164. struct isci_request *ireq)
  165. {
  166. enum sci_status status = SCI_SUCCESS;
  167. unsigned long flags;
  168. u32 rnc_suspend_count;
  169. spin_lock_irqsave(&ihost->scic_lock, flags);
  170. if (isci_get_device(idev) == NULL) {
  171. dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
  172. __func__, idev);
  173. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  174. status = SCI_FAILURE;
  175. } else {
  176. /* If already suspended, don't wait for another suspension. */
  177. smp_rmb();
  178. rnc_suspend_count
  179. = sci_remote_node_context_is_suspended(&idev->rnc)
  180. ? 0 : idev->rnc.suspend_count;
  181. dev_dbg(&ihost->pdev->dev,
  182. "%s: idev=%p, ireq=%p; started_request_count=%d, "
  183. "rnc_suspend_count=%d, rnc.suspend_count=%d"
  184. "about to wait\n",
  185. __func__, idev, ireq, idev->started_request_count,
  186. rnc_suspend_count, idev->rnc.suspend_count);
  187. #define MAX_SUSPEND_MSECS 10000
  188. if (ireq) {
  189. /* Terminate a specific TC. */
  190. set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
  191. sci_remote_device_terminate_req(ihost, idev, 0, ireq);
  192. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  193. if (!wait_event_timeout(ihost->eventq,
  194. isci_check_reqterm(ihost, idev, ireq,
  195. rnc_suspend_count),
  196. msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
  197. dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
  198. __func__, ihost->id);
  199. dev_dbg(&ihost->pdev->dev,
  200. "%s: ******* Timeout waiting for "
  201. "suspend; idev=%p, current state %s; "
  202. "started_request_count=%d, flags=%lx\n\t"
  203. "rnc_suspend_count=%d, rnc.suspend_count=%d "
  204. "RNC: current state %s, current "
  205. "suspend_type %x dest state %d;\n"
  206. "ireq=%p, ireq->flags = %lx\n",
  207. __func__, idev,
  208. dev_state_name(idev->sm.current_state_id),
  209. idev->started_request_count, idev->flags,
  210. rnc_suspend_count, idev->rnc.suspend_count,
  211. rnc_state_name(idev->rnc.sm.current_state_id),
  212. idev->rnc.suspend_type,
  213. idev->rnc.destination_state,
  214. ireq, ireq->flags);
  215. }
  216. clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
  217. isci_free_tag(ihost, ireq->io_tag);
  218. } else {
  219. /* Terminate all TCs. */
  220. sci_remote_device_terminate_requests(idev);
  221. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  222. if (!wait_event_timeout(ihost->eventq,
  223. isci_check_devempty(ihost, idev,
  224. rnc_suspend_count),
  225. msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
  226. dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
  227. __func__, ihost->id);
  228. dev_dbg(&ihost->pdev->dev,
  229. "%s: ******* Timeout waiting for "
  230. "suspend; idev=%p, current state %s; "
  231. "started_request_count=%d, flags=%lx\n\t"
  232. "rnc_suspend_count=%d, "
  233. "RNC: current state %s, "
  234. "rnc.suspend_count=%d, current "
  235. "suspend_type %x dest state %d\n",
  236. __func__, idev,
  237. dev_state_name(idev->sm.current_state_id),
  238. idev->started_request_count, idev->flags,
  239. rnc_suspend_count,
  240. rnc_state_name(idev->rnc.sm.current_state_id),
  241. idev->rnc.suspend_count,
  242. idev->rnc.suspend_type,
  243. idev->rnc.destination_state);
  244. }
  245. }
  246. dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
  247. __func__, idev);
  248. isci_put_device(idev);
  249. }
  250. return status;
  251. }
  252. /**
  253. * isci_remote_device_not_ready() - This function is called by the ihost when
  254. * the remote device is not ready. We mark the isci device as ready (not
  255. * "ready_for_io") and signal the waiting proccess.
  256. * @isci_host: This parameter specifies the isci host object.
  257. * @isci_device: This parameter specifies the remote device
  258. *
  259. * sci_lock is held on entrance to this function.
  260. */
  261. static void isci_remote_device_not_ready(struct isci_host *ihost,
  262. struct isci_remote_device *idev,
  263. u32 reason)
  264. {
  265. dev_dbg(&ihost->pdev->dev,
  266. "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
  267. switch (reason) {
  268. case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
  269. set_bit(IDEV_IO_NCQERROR, &idev->flags);
  270. /* Suspend the remote device so the I/O can be terminated. */
  271. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
  272. /* Kill all outstanding requests for the device. */
  273. sci_remote_device_terminate_requests(idev);
  274. /* Fall through into the default case... */
  275. default:
  276. clear_bit(IDEV_IO_READY, &idev->flags);
  277. break;
  278. }
  279. }
  280. /* called once the remote node context is ready to be freed.
  281. * The remote device can now report that its stop operation is complete. none
  282. */
  283. static void rnc_destruct_done(void *_dev)
  284. {
  285. struct isci_remote_device *idev = _dev;
  286. BUG_ON(idev->started_request_count != 0);
  287. sci_change_state(&idev->sm, SCI_DEV_STOPPED);
  288. }
  289. enum sci_status sci_remote_device_terminate_requests(
  290. struct isci_remote_device *idev)
  291. {
  292. return sci_remote_device_terminate_reqs_checkabort(idev, 0);
  293. }
  294. enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
  295. u32 timeout)
  296. {
  297. struct sci_base_state_machine *sm = &idev->sm;
  298. enum sci_remote_device_states state = sm->current_state_id;
  299. switch (state) {
  300. case SCI_DEV_INITIAL:
  301. case SCI_DEV_FAILED:
  302. case SCI_DEV_FINAL:
  303. default:
  304. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  305. __func__, dev_state_name(state));
  306. return SCI_FAILURE_INVALID_STATE;
  307. case SCI_DEV_STOPPED:
  308. return SCI_SUCCESS;
  309. case SCI_DEV_STARTING:
  310. /* device not started so there had better be no requests */
  311. BUG_ON(idev->started_request_count != 0);
  312. sci_remote_node_context_destruct(&idev->rnc,
  313. rnc_destruct_done, idev);
  314. /* Transition to the stopping state and wait for the
  315. * remote node to complete being posted and invalidated.
  316. */
  317. sci_change_state(sm, SCI_DEV_STOPPING);
  318. return SCI_SUCCESS;
  319. case SCI_DEV_READY:
  320. case SCI_STP_DEV_IDLE:
  321. case SCI_STP_DEV_CMD:
  322. case SCI_STP_DEV_NCQ:
  323. case SCI_STP_DEV_NCQ_ERROR:
  324. case SCI_STP_DEV_AWAIT_RESET:
  325. case SCI_SMP_DEV_IDLE:
  326. case SCI_SMP_DEV_CMD:
  327. sci_change_state(sm, SCI_DEV_STOPPING);
  328. if (idev->started_request_count == 0)
  329. sci_remote_node_context_destruct(&idev->rnc,
  330. rnc_destruct_done,
  331. idev);
  332. else {
  333. sci_remote_device_suspend(
  334. idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
  335. sci_remote_device_terminate_requests(idev);
  336. }
  337. return SCI_SUCCESS;
  338. case SCI_DEV_STOPPING:
  339. /* All requests should have been terminated, but if there is an
  340. * attempt to stop a device already in the stopping state, then
  341. * try again to terminate.
  342. */
  343. return sci_remote_device_terminate_requests(idev);
  344. case SCI_DEV_RESETTING:
  345. sci_change_state(sm, SCI_DEV_STOPPING);
  346. return SCI_SUCCESS;
  347. }
  348. }
  349. enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
  350. {
  351. struct sci_base_state_machine *sm = &idev->sm;
  352. enum sci_remote_device_states state = sm->current_state_id;
  353. switch (state) {
  354. case SCI_DEV_INITIAL:
  355. case SCI_DEV_STOPPED:
  356. case SCI_DEV_STARTING:
  357. case SCI_SMP_DEV_IDLE:
  358. case SCI_SMP_DEV_CMD:
  359. case SCI_DEV_STOPPING:
  360. case SCI_DEV_FAILED:
  361. case SCI_DEV_RESETTING:
  362. case SCI_DEV_FINAL:
  363. default:
  364. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  365. __func__, dev_state_name(state));
  366. return SCI_FAILURE_INVALID_STATE;
  367. case SCI_DEV_READY:
  368. case SCI_STP_DEV_IDLE:
  369. case SCI_STP_DEV_CMD:
  370. case SCI_STP_DEV_NCQ:
  371. case SCI_STP_DEV_NCQ_ERROR:
  372. case SCI_STP_DEV_AWAIT_RESET:
  373. sci_change_state(sm, SCI_DEV_RESETTING);
  374. return SCI_SUCCESS;
  375. }
  376. }
  377. enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
  378. {
  379. struct sci_base_state_machine *sm = &idev->sm;
  380. enum sci_remote_device_states state = sm->current_state_id;
  381. if (state != SCI_DEV_RESETTING) {
  382. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  383. __func__, dev_state_name(state));
  384. return SCI_FAILURE_INVALID_STATE;
  385. }
  386. sci_change_state(sm, SCI_DEV_READY);
  387. return SCI_SUCCESS;
  388. }
  389. enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
  390. u32 frame_index)
  391. {
  392. struct sci_base_state_machine *sm = &idev->sm;
  393. enum sci_remote_device_states state = sm->current_state_id;
  394. struct isci_host *ihost = idev->owning_port->owning_controller;
  395. enum sci_status status;
  396. switch (state) {
  397. case SCI_DEV_INITIAL:
  398. case SCI_DEV_STOPPED:
  399. case SCI_DEV_STARTING:
  400. case SCI_STP_DEV_IDLE:
  401. case SCI_SMP_DEV_IDLE:
  402. case SCI_DEV_FINAL:
  403. default:
  404. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  405. __func__, dev_state_name(state));
  406. /* Return the frame back to the controller */
  407. sci_controller_release_frame(ihost, frame_index);
  408. return SCI_FAILURE_INVALID_STATE;
  409. case SCI_DEV_READY:
  410. case SCI_STP_DEV_NCQ_ERROR:
  411. case SCI_STP_DEV_AWAIT_RESET:
  412. case SCI_DEV_STOPPING:
  413. case SCI_DEV_FAILED:
  414. case SCI_DEV_RESETTING: {
  415. struct isci_request *ireq;
  416. struct ssp_frame_hdr hdr;
  417. void *frame_header;
  418. ssize_t word_cnt;
  419. status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
  420. frame_index,
  421. &frame_header);
  422. if (status != SCI_SUCCESS)
  423. return status;
  424. word_cnt = sizeof(hdr) / sizeof(u32);
  425. sci_swab32_cpy(&hdr, frame_header, word_cnt);
  426. ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
  427. if (ireq && ireq->target_device == idev) {
  428. /* The IO request is now in charge of releasing the frame */
  429. status = sci_io_request_frame_handler(ireq, frame_index);
  430. } else {
  431. /* We could not map this tag to a valid IO
  432. * request Just toss the frame and continue
  433. */
  434. sci_controller_release_frame(ihost, frame_index);
  435. }
  436. break;
  437. }
  438. case SCI_STP_DEV_NCQ: {
  439. struct dev_to_host_fis *hdr;
  440. status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
  441. frame_index,
  442. (void **)&hdr);
  443. if (status != SCI_SUCCESS)
  444. return status;
  445. if (hdr->fis_type == FIS_SETDEVBITS &&
  446. (hdr->status & ATA_ERR)) {
  447. idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
  448. /* TODO Check sactive and complete associated IO if any. */
  449. sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
  450. } else if (hdr->fis_type == FIS_REGD2H &&
  451. (hdr->status & ATA_ERR)) {
  452. /*
  453. * Some devices return D2H FIS when an NCQ error is detected.
  454. * Treat this like an SDB error FIS ready reason.
  455. */
  456. idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
  457. sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
  458. } else
  459. status = SCI_FAILURE;
  460. sci_controller_release_frame(ihost, frame_index);
  461. break;
  462. }
  463. case SCI_STP_DEV_CMD:
  464. case SCI_SMP_DEV_CMD:
  465. /* The device does not process any UF received from the hardware while
  466. * in this state. All unsolicited frames are forwarded to the io request
  467. * object.
  468. */
  469. status = sci_io_request_frame_handler(idev->working_request, frame_index);
  470. break;
  471. }
  472. return status;
  473. }
  474. static bool is_remote_device_ready(struct isci_remote_device *idev)
  475. {
  476. struct sci_base_state_machine *sm = &idev->sm;
  477. enum sci_remote_device_states state = sm->current_state_id;
  478. switch (state) {
  479. case SCI_DEV_READY:
  480. case SCI_STP_DEV_IDLE:
  481. case SCI_STP_DEV_CMD:
  482. case SCI_STP_DEV_NCQ:
  483. case SCI_STP_DEV_NCQ_ERROR:
  484. case SCI_STP_DEV_AWAIT_RESET:
  485. case SCI_SMP_DEV_IDLE:
  486. case SCI_SMP_DEV_CMD:
  487. return true;
  488. default:
  489. return false;
  490. }
  491. }
  492. /*
  493. * called once the remote node context has transisitioned to a ready
  494. * state (after suspending RX and/or TX due to early D2H fis)
  495. */
  496. static void atapi_remote_device_resume_done(void *_dev)
  497. {
  498. struct isci_remote_device *idev = _dev;
  499. struct isci_request *ireq = idev->working_request;
  500. sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
  501. }
  502. enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
  503. u32 event_code)
  504. {
  505. enum sci_status status;
  506. switch (scu_get_event_type(event_code)) {
  507. case SCU_EVENT_TYPE_RNC_OPS_MISC:
  508. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  509. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  510. status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
  511. break;
  512. case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
  513. if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
  514. status = SCI_SUCCESS;
  515. /* Suspend the associated RNC */
  516. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
  517. dev_dbg(scirdev_to_dev(idev),
  518. "%s: device: %p event code: %x: %s\n",
  519. __func__, idev, event_code,
  520. is_remote_device_ready(idev)
  521. ? "I_T_Nexus_Timeout event"
  522. : "I_T_Nexus_Timeout event in wrong state");
  523. break;
  524. }
  525. /* Else, fall through and treat as unhandled... */
  526. default:
  527. dev_dbg(scirdev_to_dev(idev),
  528. "%s: device: %p event code: %x: %s\n",
  529. __func__, idev, event_code,
  530. is_remote_device_ready(idev)
  531. ? "unexpected event"
  532. : "unexpected event in wrong state");
  533. status = SCI_FAILURE_INVALID_STATE;
  534. break;
  535. }
  536. if (status != SCI_SUCCESS)
  537. return status;
  538. return status;
  539. }
  540. static void sci_remote_device_start_request(struct isci_remote_device *idev,
  541. struct isci_request *ireq,
  542. enum sci_status status)
  543. {
  544. struct isci_port *iport = idev->owning_port;
  545. /* cleanup requests that failed after starting on the port */
  546. if (status != SCI_SUCCESS)
  547. sci_port_complete_io(iport, idev, ireq);
  548. else {
  549. kref_get(&idev->kref);
  550. idev->started_request_count++;
  551. }
  552. }
  553. enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
  554. struct isci_remote_device *idev,
  555. struct isci_request *ireq)
  556. {
  557. struct sci_base_state_machine *sm = &idev->sm;
  558. enum sci_remote_device_states state = sm->current_state_id;
  559. struct isci_port *iport = idev->owning_port;
  560. enum sci_status status;
  561. switch (state) {
  562. case SCI_DEV_INITIAL:
  563. case SCI_DEV_STOPPED:
  564. case SCI_DEV_STARTING:
  565. case SCI_STP_DEV_NCQ_ERROR:
  566. case SCI_DEV_STOPPING:
  567. case SCI_DEV_FAILED:
  568. case SCI_DEV_RESETTING:
  569. case SCI_DEV_FINAL:
  570. default:
  571. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  572. __func__, dev_state_name(state));
  573. return SCI_FAILURE_INVALID_STATE;
  574. case SCI_DEV_READY:
  575. /* attempt to start an io request for this device object. The remote
  576. * device object will issue the start request for the io and if
  577. * successful it will start the request for the port object then
  578. * increment its own request count.
  579. */
  580. status = sci_port_start_io(iport, idev, ireq);
  581. if (status != SCI_SUCCESS)
  582. return status;
  583. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  584. if (status != SCI_SUCCESS)
  585. break;
  586. status = sci_request_start(ireq);
  587. break;
  588. case SCI_STP_DEV_IDLE: {
  589. /* handle the start io operation for a sata device that is in
  590. * the command idle state. - Evalute the type of IO request to
  591. * be started - If its an NCQ request change to NCQ substate -
  592. * If its any other command change to the CMD substate
  593. *
  594. * If this is a softreset we may want to have a different
  595. * substate.
  596. */
  597. enum sci_remote_device_states new_state;
  598. struct sas_task *task = isci_request_access_task(ireq);
  599. status = sci_port_start_io(iport, idev, ireq);
  600. if (status != SCI_SUCCESS)
  601. return status;
  602. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  603. if (status != SCI_SUCCESS)
  604. break;
  605. status = sci_request_start(ireq);
  606. if (status != SCI_SUCCESS)
  607. break;
  608. if (task->ata_task.use_ncq)
  609. new_state = SCI_STP_DEV_NCQ;
  610. else {
  611. idev->working_request = ireq;
  612. new_state = SCI_STP_DEV_CMD;
  613. }
  614. sci_change_state(sm, new_state);
  615. break;
  616. }
  617. case SCI_STP_DEV_NCQ: {
  618. struct sas_task *task = isci_request_access_task(ireq);
  619. if (task->ata_task.use_ncq) {
  620. status = sci_port_start_io(iport, idev, ireq);
  621. if (status != SCI_SUCCESS)
  622. return status;
  623. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  624. if (status != SCI_SUCCESS)
  625. break;
  626. status = sci_request_start(ireq);
  627. } else
  628. return SCI_FAILURE_INVALID_STATE;
  629. break;
  630. }
  631. case SCI_STP_DEV_AWAIT_RESET:
  632. return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
  633. case SCI_SMP_DEV_IDLE:
  634. status = sci_port_start_io(iport, idev, ireq);
  635. if (status != SCI_SUCCESS)
  636. return status;
  637. status = sci_remote_node_context_start_io(&idev->rnc, ireq);
  638. if (status != SCI_SUCCESS)
  639. break;
  640. status = sci_request_start(ireq);
  641. if (status != SCI_SUCCESS)
  642. break;
  643. idev->working_request = ireq;
  644. sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
  645. break;
  646. case SCI_STP_DEV_CMD:
  647. case SCI_SMP_DEV_CMD:
  648. /* device is already handling a command it can not accept new commands
  649. * until this one is complete.
  650. */
  651. return SCI_FAILURE_INVALID_STATE;
  652. }
  653. sci_remote_device_start_request(idev, ireq, status);
  654. return status;
  655. }
  656. static enum sci_status common_complete_io(struct isci_port *iport,
  657. struct isci_remote_device *idev,
  658. struct isci_request *ireq)
  659. {
  660. enum sci_status status;
  661. status = sci_request_complete(ireq);
  662. if (status != SCI_SUCCESS)
  663. return status;
  664. status = sci_port_complete_io(iport, idev, ireq);
  665. if (status != SCI_SUCCESS)
  666. return status;
  667. sci_remote_device_decrement_request_count(idev);
  668. return status;
  669. }
  670. enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
  671. struct isci_remote_device *idev,
  672. struct isci_request *ireq)
  673. {
  674. struct sci_base_state_machine *sm = &idev->sm;
  675. enum sci_remote_device_states state = sm->current_state_id;
  676. struct isci_port *iport = idev->owning_port;
  677. enum sci_status status;
  678. switch (state) {
  679. case SCI_DEV_INITIAL:
  680. case SCI_DEV_STOPPED:
  681. case SCI_DEV_STARTING:
  682. case SCI_STP_DEV_IDLE:
  683. case SCI_SMP_DEV_IDLE:
  684. case SCI_DEV_FAILED:
  685. case SCI_DEV_FINAL:
  686. default:
  687. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  688. __func__, dev_state_name(state));
  689. return SCI_FAILURE_INVALID_STATE;
  690. case SCI_DEV_READY:
  691. case SCI_STP_DEV_AWAIT_RESET:
  692. case SCI_DEV_RESETTING:
  693. status = common_complete_io(iport, idev, ireq);
  694. break;
  695. case SCI_STP_DEV_CMD:
  696. case SCI_STP_DEV_NCQ:
  697. case SCI_STP_DEV_NCQ_ERROR:
  698. case SCI_STP_DEV_ATAPI_ERROR:
  699. status = common_complete_io(iport, idev, ireq);
  700. if (status != SCI_SUCCESS)
  701. break;
  702. if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
  703. /* This request causes hardware error, device needs to be Lun Reset.
  704. * So here we force the state machine to IDLE state so the rest IOs
  705. * can reach RNC state handler, these IOs will be completed by RNC with
  706. * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
  707. */
  708. sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
  709. } else if (idev->started_request_count == 0)
  710. sci_change_state(sm, SCI_STP_DEV_IDLE);
  711. break;
  712. case SCI_SMP_DEV_CMD:
  713. status = common_complete_io(iport, idev, ireq);
  714. if (status != SCI_SUCCESS)
  715. break;
  716. sci_change_state(sm, SCI_SMP_DEV_IDLE);
  717. break;
  718. case SCI_DEV_STOPPING:
  719. status = common_complete_io(iport, idev, ireq);
  720. if (status != SCI_SUCCESS)
  721. break;
  722. if (idev->started_request_count == 0)
  723. sci_remote_node_context_destruct(&idev->rnc,
  724. rnc_destruct_done,
  725. idev);
  726. break;
  727. }
  728. if (status != SCI_SUCCESS)
  729. dev_err(scirdev_to_dev(idev),
  730. "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
  731. "could not complete\n", __func__, iport,
  732. idev, ireq, status);
  733. else
  734. isci_put_device(idev);
  735. return status;
  736. }
  737. static void sci_remote_device_continue_request(void *dev)
  738. {
  739. struct isci_remote_device *idev = dev;
  740. /* we need to check if this request is still valid to continue. */
  741. if (idev->working_request)
  742. sci_controller_continue_io(idev->working_request);
  743. }
  744. enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
  745. struct isci_remote_device *idev,
  746. struct isci_request *ireq)
  747. {
  748. struct sci_base_state_machine *sm = &idev->sm;
  749. enum sci_remote_device_states state = sm->current_state_id;
  750. struct isci_port *iport = idev->owning_port;
  751. enum sci_status status;
  752. switch (state) {
  753. case SCI_DEV_INITIAL:
  754. case SCI_DEV_STOPPED:
  755. case SCI_DEV_STARTING:
  756. case SCI_SMP_DEV_IDLE:
  757. case SCI_SMP_DEV_CMD:
  758. case SCI_DEV_STOPPING:
  759. case SCI_DEV_FAILED:
  760. case SCI_DEV_RESETTING:
  761. case SCI_DEV_FINAL:
  762. default:
  763. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  764. __func__, dev_state_name(state));
  765. return SCI_FAILURE_INVALID_STATE;
  766. case SCI_STP_DEV_IDLE:
  767. case SCI_STP_DEV_CMD:
  768. case SCI_STP_DEV_NCQ:
  769. case SCI_STP_DEV_NCQ_ERROR:
  770. case SCI_STP_DEV_AWAIT_RESET:
  771. status = sci_port_start_io(iport, idev, ireq);
  772. if (status != SCI_SUCCESS)
  773. return status;
  774. status = sci_request_start(ireq);
  775. if (status != SCI_SUCCESS)
  776. goto out;
  777. /* Note: If the remote device state is not IDLE this will
  778. * replace the request that probably resulted in the task
  779. * management request.
  780. */
  781. idev->working_request = ireq;
  782. sci_change_state(sm, SCI_STP_DEV_CMD);
  783. /* The remote node context must cleanup the TCi to NCQ mapping
  784. * table. The only way to do this correctly is to either write
  785. * to the TLCR register or to invalidate and repost the RNC. In
  786. * either case the remote node context state machine will take
  787. * the correct action when the remote node context is suspended
  788. * and later resumed.
  789. */
  790. sci_remote_device_suspend(idev,
  791. SCI_SW_SUSPEND_LINKHANG_DETECT);
  792. status = sci_remote_node_context_start_task(&idev->rnc, ireq,
  793. sci_remote_device_continue_request, idev);
  794. out:
  795. sci_remote_device_start_request(idev, ireq, status);
  796. /* We need to let the controller start request handler know that
  797. * it can't post TC yet. We will provide a callback function to
  798. * post TC when RNC gets resumed.
  799. */
  800. return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
  801. case SCI_DEV_READY:
  802. status = sci_port_start_io(iport, idev, ireq);
  803. if (status != SCI_SUCCESS)
  804. return status;
  805. /* Resume the RNC as needed: */
  806. status = sci_remote_node_context_start_task(&idev->rnc, ireq,
  807. NULL, NULL);
  808. if (status != SCI_SUCCESS)
  809. break;
  810. status = sci_request_start(ireq);
  811. break;
  812. }
  813. sci_remote_device_start_request(idev, ireq, status);
  814. return status;
  815. }
  816. void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
  817. {
  818. struct isci_port *iport = idev->owning_port;
  819. u32 context;
  820. context = request |
  821. (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  822. (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  823. idev->rnc.remote_node_index;
  824. sci_controller_post_request(iport->owning_controller, context);
  825. }
  826. /* called once the remote node context has transisitioned to a
  827. * ready state. This is the indication that the remote device object can also
  828. * transition to ready.
  829. */
  830. static void remote_device_resume_done(void *_dev)
  831. {
  832. struct isci_remote_device *idev = _dev;
  833. if (is_remote_device_ready(idev))
  834. return;
  835. /* go 'ready' if we are not already in a ready state */
  836. sci_change_state(&idev->sm, SCI_DEV_READY);
  837. }
  838. static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
  839. {
  840. struct isci_remote_device *idev = _dev;
  841. struct isci_host *ihost = idev->owning_port->owning_controller;
  842. /* For NCQ operation we do not issue a isci_remote_device_not_ready().
  843. * As a result, avoid sending the ready notification.
  844. */
  845. if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
  846. isci_remote_device_ready(ihost, idev);
  847. }
  848. static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
  849. {
  850. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  851. /* Initial state is a transitional state to the stopped state */
  852. sci_change_state(&idev->sm, SCI_DEV_STOPPED);
  853. }
  854. /**
  855. * sci_remote_device_destruct() - free remote node context and destruct
  856. * @remote_device: This parameter specifies the remote device to be destructed.
  857. *
  858. * Remote device objects are a limited resource. As such, they must be
  859. * protected. Thus calls to construct and destruct are mutually exclusive and
  860. * non-reentrant. The return value shall indicate if the device was
  861. * successfully destructed or if some failure occurred. enum sci_status This value
  862. * is returned if the device is successfully destructed.
  863. * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
  864. * device isn't valid (e.g. it's already been destoryed, the handle isn't
  865. * valid, etc.).
  866. */
  867. static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
  868. {
  869. struct sci_base_state_machine *sm = &idev->sm;
  870. enum sci_remote_device_states state = sm->current_state_id;
  871. struct isci_host *ihost;
  872. if (state != SCI_DEV_STOPPED) {
  873. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  874. __func__, dev_state_name(state));
  875. return SCI_FAILURE_INVALID_STATE;
  876. }
  877. ihost = idev->owning_port->owning_controller;
  878. sci_controller_free_remote_node_context(ihost, idev,
  879. idev->rnc.remote_node_index);
  880. idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
  881. sci_change_state(sm, SCI_DEV_FINAL);
  882. return SCI_SUCCESS;
  883. }
  884. /**
  885. * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
  886. * @ihost: This parameter specifies the isci host object.
  887. * @idev: This parameter specifies the remote device to be freed.
  888. *
  889. */
  890. static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
  891. {
  892. dev_dbg(&ihost->pdev->dev,
  893. "%s: isci_device = %p\n", __func__, idev);
  894. /* There should not be any outstanding io's. All paths to
  895. * here should go through isci_remote_device_nuke_requests.
  896. * If we hit this condition, we will need a way to complete
  897. * io requests in process */
  898. BUG_ON(idev->started_request_count > 0);
  899. sci_remote_device_destruct(idev);
  900. list_del_init(&idev->node);
  901. isci_put_device(idev);
  902. }
  903. static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
  904. {
  905. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  906. struct isci_host *ihost = idev->owning_port->owning_controller;
  907. u32 prev_state;
  908. /* If we are entering from the stopping state let the SCI User know that
  909. * the stop operation has completed.
  910. */
  911. prev_state = idev->sm.previous_state_id;
  912. if (prev_state == SCI_DEV_STOPPING)
  913. isci_remote_device_deconstruct(ihost, idev);
  914. sci_controller_remote_device_stopped(ihost, idev);
  915. }
  916. static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
  917. {
  918. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  919. struct isci_host *ihost = idev->owning_port->owning_controller;
  920. isci_remote_device_not_ready(ihost, idev,
  921. SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
  922. }
  923. static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
  924. {
  925. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  926. struct isci_host *ihost = idev->owning_port->owning_controller;
  927. struct domain_device *dev = idev->domain_dev;
  928. if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
  929. sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
  930. } else if (dev_is_expander(dev)) {
  931. sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
  932. } else
  933. isci_remote_device_ready(ihost, idev);
  934. }
  935. static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
  936. {
  937. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  938. struct domain_device *dev = idev->domain_dev;
  939. if (dev->dev_type == SAS_END_DEV) {
  940. struct isci_host *ihost = idev->owning_port->owning_controller;
  941. isci_remote_device_not_ready(ihost, idev,
  942. SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
  943. }
  944. }
  945. static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
  946. {
  947. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  948. struct isci_host *ihost = idev->owning_port->owning_controller;
  949. dev_dbg(&ihost->pdev->dev,
  950. "%s: isci_device = %p\n", __func__, idev);
  951. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
  952. }
  953. static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
  954. {
  955. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  956. struct isci_host *ihost = idev->owning_port->owning_controller;
  957. dev_dbg(&ihost->pdev->dev,
  958. "%s: isci_device = %p\n", __func__, idev);
  959. sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
  960. }
  961. static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
  962. {
  963. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  964. idev->working_request = NULL;
  965. if (sci_remote_node_context_is_ready(&idev->rnc)) {
  966. /*
  967. * Since the RNC is ready, it's alright to finish completion
  968. * processing (e.g. signal the remote device is ready). */
  969. sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
  970. } else {
  971. sci_remote_node_context_resume(&idev->rnc,
  972. sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
  973. idev);
  974. }
  975. }
  976. static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
  977. {
  978. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  979. struct isci_host *ihost = idev->owning_port->owning_controller;
  980. BUG_ON(idev->working_request == NULL);
  981. isci_remote_device_not_ready(ihost, idev,
  982. SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
  983. }
  984. static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
  985. {
  986. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  987. struct isci_host *ihost = idev->owning_port->owning_controller;
  988. if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
  989. isci_remote_device_not_ready(ihost, idev,
  990. idev->not_ready_reason);
  991. }
  992. static void sci_stp_remote_device_atapi_error_substate_enter(
  993. struct sci_base_state_machine *sm)
  994. {
  995. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  996. /* This state is entered when an I/O is decoded with an error
  997. * condition. By this point the RNC expected suspension state is set.
  998. * The error conditions suspend the device, so unsuspend here if
  999. * possible.
  1000. */
  1001. sci_remote_node_context_resume(&idev->rnc,
  1002. atapi_remote_device_resume_done,
  1003. idev);
  1004. }
  1005. static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
  1006. {
  1007. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  1008. struct isci_host *ihost = idev->owning_port->owning_controller;
  1009. isci_remote_device_ready(ihost, idev);
  1010. }
  1011. static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
  1012. {
  1013. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  1014. struct isci_host *ihost = idev->owning_port->owning_controller;
  1015. BUG_ON(idev->working_request == NULL);
  1016. isci_remote_device_not_ready(ihost, idev,
  1017. SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
  1018. }
  1019. static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
  1020. {
  1021. struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
  1022. idev->working_request = NULL;
  1023. }
  1024. static const struct sci_base_state sci_remote_device_state_table[] = {
  1025. [SCI_DEV_INITIAL] = {
  1026. .enter_state = sci_remote_device_initial_state_enter,
  1027. },
  1028. [SCI_DEV_STOPPED] = {
  1029. .enter_state = sci_remote_device_stopped_state_enter,
  1030. },
  1031. [SCI_DEV_STARTING] = {
  1032. .enter_state = sci_remote_device_starting_state_enter,
  1033. },
  1034. [SCI_DEV_READY] = {
  1035. .enter_state = sci_remote_device_ready_state_enter,
  1036. .exit_state = sci_remote_device_ready_state_exit
  1037. },
  1038. [SCI_STP_DEV_IDLE] = {
  1039. .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
  1040. },
  1041. [SCI_STP_DEV_CMD] = {
  1042. .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
  1043. },
  1044. [SCI_STP_DEV_NCQ] = { },
  1045. [SCI_STP_DEV_NCQ_ERROR] = {
  1046. .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
  1047. },
  1048. [SCI_STP_DEV_ATAPI_ERROR] = {
  1049. .enter_state = sci_stp_remote_device_atapi_error_substate_enter,
  1050. },
  1051. [SCI_STP_DEV_AWAIT_RESET] = { },
  1052. [SCI_SMP_DEV_IDLE] = {
  1053. .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
  1054. },
  1055. [SCI_SMP_DEV_CMD] = {
  1056. .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
  1057. .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
  1058. },
  1059. [SCI_DEV_STOPPING] = { },
  1060. [SCI_DEV_FAILED] = { },
  1061. [SCI_DEV_RESETTING] = {
  1062. .enter_state = sci_remote_device_resetting_state_enter,
  1063. .exit_state = sci_remote_device_resetting_state_exit
  1064. },
  1065. [SCI_DEV_FINAL] = { },
  1066. };
  1067. /**
  1068. * sci_remote_device_construct() - common construction
  1069. * @sci_port: SAS/SATA port through which this device is accessed.
  1070. * @sci_dev: remote device to construct
  1071. *
  1072. * This routine just performs benign initialization and does not
  1073. * allocate the remote_node_context which is left to
  1074. * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
  1075. * frees the remote_node_context(s) for the device.
  1076. */
  1077. static void sci_remote_device_construct(struct isci_port *iport,
  1078. struct isci_remote_device *idev)
  1079. {
  1080. idev->owning_port = iport;
  1081. idev->started_request_count = 0;
  1082. sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
  1083. sci_remote_node_context_construct(&idev->rnc,
  1084. SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
  1085. }
  1086. /**
  1087. * sci_remote_device_da_construct() - construct direct attached device.
  1088. *
  1089. * The information (e.g. IAF, Signature FIS, etc.) necessary to build
  1090. * the device is known to the SCI Core since it is contained in the
  1091. * sci_phy object. Remote node context(s) is/are a global resource
  1092. * allocated by this routine, freed by sci_remote_device_destruct().
  1093. *
  1094. * Returns:
  1095. * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
  1096. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
  1097. * sata-only controller instance.
  1098. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
  1099. */
  1100. static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
  1101. struct isci_remote_device *idev)
  1102. {
  1103. enum sci_status status;
  1104. struct sci_port_properties properties;
  1105. sci_remote_device_construct(iport, idev);
  1106. sci_port_get_properties(iport, &properties);
  1107. /* Get accurate port width from port's phy mask for a DA device. */
  1108. idev->device_port_width = hweight32(properties.phy_mask);
  1109. status = sci_controller_allocate_remote_node_context(iport->owning_controller,
  1110. idev,
  1111. &idev->rnc.remote_node_index);
  1112. if (status != SCI_SUCCESS)
  1113. return status;
  1114. idev->connection_rate = sci_port_get_max_allowed_speed(iport);
  1115. return SCI_SUCCESS;
  1116. }
  1117. /**
  1118. * sci_remote_device_ea_construct() - construct expander attached device
  1119. *
  1120. * Remote node context(s) is/are a global resource allocated by this
  1121. * routine, freed by sci_remote_device_destruct().
  1122. *
  1123. * Returns:
  1124. * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
  1125. * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
  1126. * sata-only controller instance.
  1127. * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
  1128. */
  1129. static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
  1130. struct isci_remote_device *idev)
  1131. {
  1132. struct domain_device *dev = idev->domain_dev;
  1133. enum sci_status status;
  1134. sci_remote_device_construct(iport, idev);
  1135. status = sci_controller_allocate_remote_node_context(iport->owning_controller,
  1136. idev,
  1137. &idev->rnc.remote_node_index);
  1138. if (status != SCI_SUCCESS)
  1139. return status;
  1140. /* For SAS-2 the physical link rate is actually a logical link
  1141. * rate that incorporates multiplexing. The SCU doesn't
  1142. * incorporate multiplexing and for the purposes of the
  1143. * connection the logical link rate is that same as the
  1144. * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
  1145. * one another, so this code works for both situations.
  1146. */
  1147. idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
  1148. dev->linkrate);
  1149. /* / @todo Should I assign the port width by reading all of the phys on the port? */
  1150. idev->device_port_width = 1;
  1151. return SCI_SUCCESS;
  1152. }
  1153. enum sci_status sci_remote_device_resume(
  1154. struct isci_remote_device *idev,
  1155. scics_sds_remote_node_context_callback cb_fn,
  1156. void *cb_p)
  1157. {
  1158. enum sci_status status;
  1159. status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
  1160. if (status != SCI_SUCCESS)
  1161. dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
  1162. __func__, status);
  1163. return status;
  1164. }
  1165. static void isci_remote_device_resume_from_abort_complete(void *cbparam)
  1166. {
  1167. struct isci_remote_device *idev = cbparam;
  1168. struct isci_host *ihost = idev->owning_port->owning_controller;
  1169. scics_sds_remote_node_context_callback abort_resume_cb =
  1170. idev->abort_resume_cb;
  1171. dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
  1172. __func__, abort_resume_cb);
  1173. if (abort_resume_cb != NULL) {
  1174. idev->abort_resume_cb = NULL;
  1175. abort_resume_cb(idev->abort_resume_cbparam);
  1176. }
  1177. clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
  1178. wake_up(&ihost->eventq);
  1179. }
  1180. void isci_remote_device_wait_for_resume_from_abort(
  1181. struct isci_host *ihost,
  1182. struct isci_remote_device *idev)
  1183. {
  1184. dev_dbg(scirdev_to_dev(idev), "%s: starting resume wait: %p\n",
  1185. __func__, idev);
  1186. #define MAX_RESUME_MSECS 10000
  1187. if (!wait_event_timeout(ihost->eventq,
  1188. (!test_bit(IDEV_ABORT_PATH_RESUME_PENDING,
  1189. &idev->flags)
  1190. || test_bit(IDEV_STOP_PENDING, &idev->flags)),
  1191. msecs_to_jiffies(MAX_RESUME_MSECS))) {
  1192. dev_warn(scirdev_to_dev(idev), "%s: #### Timeout waiting for "
  1193. "resume: %p\n", __func__, idev);
  1194. }
  1195. clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
  1196. dev_dbg(scirdev_to_dev(idev), "%s: resume wait done: %p\n",
  1197. __func__, idev);
  1198. }
  1199. enum sci_status isci_remote_device_resume_from_abort(
  1200. struct isci_host *ihost,
  1201. struct isci_remote_device *idev)
  1202. {
  1203. unsigned long flags;
  1204. enum sci_status status;
  1205. spin_lock_irqsave(&ihost->scic_lock, flags);
  1206. /* Preserve any current resume callbacks, for instance from other
  1207. * resumptions.
  1208. */
  1209. idev->abort_resume_cb = idev->rnc.user_callback;
  1210. idev->abort_resume_cbparam = idev->rnc.user_cookie;
  1211. set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
  1212. clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
  1213. status = sci_remote_device_resume(
  1214. idev, isci_remote_device_resume_from_abort_complete,
  1215. idev);
  1216. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1217. isci_remote_device_wait_for_resume_from_abort(ihost, idev);
  1218. return status;
  1219. }
  1220. /**
  1221. * sci_remote_device_start() - This method will start the supplied remote
  1222. * device. This method enables normal IO requests to flow through to the
  1223. * remote device.
  1224. * @remote_device: This parameter specifies the device to be started.
  1225. * @timeout: This parameter specifies the number of milliseconds in which the
  1226. * start operation should complete.
  1227. *
  1228. * An indication of whether the device was successfully started. SCI_SUCCESS
  1229. * This value is returned if the device was successfully started.
  1230. * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
  1231. * the device when there have been no phys added to it.
  1232. */
  1233. static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
  1234. u32 timeout)
  1235. {
  1236. struct sci_base_state_machine *sm = &idev->sm;
  1237. enum sci_remote_device_states state = sm->current_state_id;
  1238. enum sci_status status;
  1239. if (state != SCI_DEV_STOPPED) {
  1240. dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
  1241. __func__, dev_state_name(state));
  1242. return SCI_FAILURE_INVALID_STATE;
  1243. }
  1244. status = sci_remote_device_resume(idev, remote_device_resume_done,
  1245. idev);
  1246. if (status != SCI_SUCCESS)
  1247. return status;
  1248. sci_change_state(sm, SCI_DEV_STARTING);
  1249. return SCI_SUCCESS;
  1250. }
  1251. static enum sci_status isci_remote_device_construct(struct isci_port *iport,
  1252. struct isci_remote_device *idev)
  1253. {
  1254. struct isci_host *ihost = iport->isci_host;
  1255. struct domain_device *dev = idev->domain_dev;
  1256. enum sci_status status;
  1257. if (dev->parent && dev_is_expander(dev->parent))
  1258. status = sci_remote_device_ea_construct(iport, idev);
  1259. else
  1260. status = sci_remote_device_da_construct(iport, idev);
  1261. if (status != SCI_SUCCESS) {
  1262. dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
  1263. __func__, status);
  1264. return status;
  1265. }
  1266. /* start the device. */
  1267. status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
  1268. if (status != SCI_SUCCESS)
  1269. dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
  1270. status);
  1271. return status;
  1272. }
  1273. /**
  1274. * This function builds the isci_remote_device when a libsas dev_found message
  1275. * is received.
  1276. * @isci_host: This parameter specifies the isci host object.
  1277. * @port: This parameter specifies the isci_port conected to this device.
  1278. *
  1279. * pointer to new isci_remote_device.
  1280. */
  1281. static struct isci_remote_device *
  1282. isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
  1283. {
  1284. struct isci_remote_device *idev;
  1285. int i;
  1286. for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
  1287. idev = &ihost->devices[i];
  1288. if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
  1289. break;
  1290. }
  1291. if (i >= SCI_MAX_REMOTE_DEVICES) {
  1292. dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
  1293. return NULL;
  1294. }
  1295. if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
  1296. return NULL;
  1297. return idev;
  1298. }
  1299. void isci_remote_device_release(struct kref *kref)
  1300. {
  1301. struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
  1302. struct isci_host *ihost = idev->isci_port->isci_host;
  1303. idev->domain_dev = NULL;
  1304. idev->isci_port = NULL;
  1305. clear_bit(IDEV_START_PENDING, &idev->flags);
  1306. clear_bit(IDEV_STOP_PENDING, &idev->flags);
  1307. clear_bit(IDEV_IO_READY, &idev->flags);
  1308. clear_bit(IDEV_GONE, &idev->flags);
  1309. smp_mb__before_clear_bit();
  1310. clear_bit(IDEV_ALLOCATED, &idev->flags);
  1311. wake_up(&ihost->eventq);
  1312. }
  1313. /**
  1314. * isci_remote_device_stop() - This function is called internally to stop the
  1315. * remote device.
  1316. * @isci_host: This parameter specifies the isci host object.
  1317. * @isci_device: This parameter specifies the remote device.
  1318. *
  1319. * The status of the ihost request to stop.
  1320. */
  1321. enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
  1322. {
  1323. enum sci_status status;
  1324. unsigned long flags;
  1325. dev_dbg(&ihost->pdev->dev,
  1326. "%s: isci_device = %p\n", __func__, idev);
  1327. spin_lock_irqsave(&ihost->scic_lock, flags);
  1328. idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
  1329. set_bit(IDEV_GONE, &idev->flags);
  1330. set_bit(IDEV_STOP_PENDING, &idev->flags);
  1331. status = sci_remote_device_stop(idev, 50);
  1332. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1333. /* Wait for the stop complete callback. */
  1334. if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
  1335. /* nothing to wait for */;
  1336. else
  1337. wait_for_device_stop(ihost, idev);
  1338. dev_dbg(&ihost->pdev->dev,
  1339. "%s: isci_device = %p, waiting done.\n", __func__, idev);
  1340. return status;
  1341. }
  1342. /**
  1343. * isci_remote_device_gone() - This function is called by libsas when a domain
  1344. * device is removed.
  1345. * @domain_device: This parameter specifies the libsas domain device.
  1346. *
  1347. */
  1348. void isci_remote_device_gone(struct domain_device *dev)
  1349. {
  1350. struct isci_host *ihost = dev_to_ihost(dev);
  1351. struct isci_remote_device *idev = dev->lldd_dev;
  1352. dev_dbg(&ihost->pdev->dev,
  1353. "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
  1354. __func__, dev, idev, idev->isci_port);
  1355. isci_remote_device_stop(ihost, idev);
  1356. }
  1357. /**
  1358. * isci_remote_device_found() - This function is called by libsas when a remote
  1359. * device is discovered. A remote device object is created and started. the
  1360. * function then sleeps until the sci core device started message is
  1361. * received.
  1362. * @domain_device: This parameter specifies the libsas domain device.
  1363. *
  1364. * status, zero indicates success.
  1365. */
  1366. int isci_remote_device_found(struct domain_device *dev)
  1367. {
  1368. struct isci_host *isci_host = dev_to_ihost(dev);
  1369. struct isci_port *isci_port = dev->port->lldd_port;
  1370. struct isci_remote_device *isci_device;
  1371. enum sci_status status;
  1372. dev_dbg(&isci_host->pdev->dev,
  1373. "%s: domain_device = %p\n", __func__, dev);
  1374. if (!isci_port)
  1375. return -ENODEV;
  1376. isci_device = isci_remote_device_alloc(isci_host, isci_port);
  1377. if (!isci_device)
  1378. return -ENODEV;
  1379. kref_init(&isci_device->kref);
  1380. INIT_LIST_HEAD(&isci_device->node);
  1381. spin_lock_irq(&isci_host->scic_lock);
  1382. isci_device->domain_dev = dev;
  1383. isci_device->isci_port = isci_port;
  1384. list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
  1385. set_bit(IDEV_START_PENDING, &isci_device->flags);
  1386. status = isci_remote_device_construct(isci_port, isci_device);
  1387. dev_dbg(&isci_host->pdev->dev,
  1388. "%s: isci_device = %p\n",
  1389. __func__, isci_device);
  1390. if (status == SCI_SUCCESS) {
  1391. /* device came up, advertise it to the world */
  1392. dev->lldd_dev = isci_device;
  1393. } else
  1394. isci_put_device(isci_device);
  1395. spin_unlock_irq(&isci_host->scic_lock);
  1396. /* wait for the device ready callback. */
  1397. wait_for_device_start(isci_host, isci_device);
  1398. return status == SCI_SUCCESS ? 0 : -ENODEV;
  1399. }
  1400. enum sci_status isci_remote_device_suspend_terminate(
  1401. struct isci_host *ihost,
  1402. struct isci_remote_device *idev,
  1403. struct isci_request *ireq)
  1404. {
  1405. unsigned long flags;
  1406. enum sci_status status;
  1407. /* Put the device into suspension. */
  1408. spin_lock_irqsave(&ihost->scic_lock, flags);
  1409. set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
  1410. sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
  1411. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1412. /* Terminate and wait for the completions. */
  1413. status = isci_remote_device_terminate_requests(ihost, idev, ireq);
  1414. if (status != SCI_SUCCESS)
  1415. dev_dbg(&ihost->pdev->dev,
  1416. "%s: isci_remote_device_terminate_requests(%p) "
  1417. "returned %d!\n",
  1418. __func__, idev, status);
  1419. /* NOTE: RNC resumption is left to the caller! */
  1420. return status;
  1421. }
  1422. int isci_remote_device_is_safe_to_abort(
  1423. struct isci_remote_device *idev)
  1424. {
  1425. return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
  1426. }
  1427. enum sci_status sci_remote_device_abort_requests_pending_abort(
  1428. struct isci_remote_device *idev)
  1429. {
  1430. return sci_remote_device_terminate_reqs_checkabort(idev, 1);
  1431. }
  1432. enum sci_status isci_remote_device_reset_complete(
  1433. struct isci_host *ihost,
  1434. struct isci_remote_device *idev)
  1435. {
  1436. unsigned long flags;
  1437. enum sci_status status;
  1438. spin_lock_irqsave(&ihost->scic_lock, flags);
  1439. status = sci_remote_device_reset_complete(idev);
  1440. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1441. return status;
  1442. }
  1443. void isci_dev_set_hang_detection_timeout(
  1444. struct isci_remote_device *idev,
  1445. u32 timeout)
  1446. {
  1447. if (dev_is_sata(idev->domain_dev)) {
  1448. if (timeout) {
  1449. if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
  1450. &idev->flags))
  1451. return; /* Already enabled. */
  1452. } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
  1453. &idev->flags))
  1454. return; /* Not enabled. */
  1455. sci_port_set_hang_detection_timeout(idev->owning_port,
  1456. timeout);
  1457. }
  1458. }