remote_node_context.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas_ata.h>
  56. #include "host.h"
  57. #include "isci.h"
  58. #include "remote_device.h"
  59. #include "remote_node_context.h"
  60. #include "scu_event_codes.h"
  61. #include "scu_task_context.h"
  62. #undef C
  63. #define C(a) (#a)
  64. const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
  65. {
  66. static const char * const strings[] = RNC_STATES;
  67. return strings[state];
  68. }
  69. #undef C
  70. /**
  71. *
  72. * @sci_rnc: The state of the remote node context object to check.
  73. *
  74. * This method will return true if the remote node context is in a READY state
  75. * otherwise it will return false bool true if the remote node context is in
  76. * the ready state. false if the remote node context is not in the ready state.
  77. */
  78. bool sci_remote_node_context_is_ready(
  79. struct sci_remote_node_context *sci_rnc)
  80. {
  81. u32 current_state = sci_rnc->sm.current_state_id;
  82. if (current_state == SCI_RNC_READY) {
  83. return true;
  84. }
  85. return false;
  86. }
  87. bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
  88. {
  89. u32 current_state = sci_rnc->sm.current_state_id;
  90. if (current_state == SCI_RNC_TX_RX_SUSPENDED)
  91. return true;
  92. return false;
  93. }
  94. static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
  95. {
  96. if (id < ihost->remote_node_entries &&
  97. ihost->device_table[id])
  98. return &ihost->remote_node_context_table[id];
  99. return NULL;
  100. }
  101. static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
  102. {
  103. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  104. struct domain_device *dev = idev->domain_dev;
  105. int rni = sci_rnc->remote_node_index;
  106. union scu_remote_node_context *rnc;
  107. struct isci_host *ihost;
  108. __le64 sas_addr;
  109. ihost = idev->owning_port->owning_controller;
  110. rnc = sci_rnc_by_id(ihost, rni);
  111. memset(rnc, 0, sizeof(union scu_remote_node_context)
  112. * sci_remote_device_node_count(idev));
  113. rnc->ssp.remote_node_index = rni;
  114. rnc->ssp.remote_node_port_width = idev->device_port_width;
  115. rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
  116. /* sas address is __be64, context ram format is __le64 */
  117. sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
  118. rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
  119. rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
  120. rnc->ssp.nexus_loss_timer_enable = true;
  121. rnc->ssp.check_bit = false;
  122. rnc->ssp.is_valid = false;
  123. rnc->ssp.is_remote_node_context = true;
  124. rnc->ssp.function_number = 0;
  125. rnc->ssp.arbitration_wait_time = 0;
  126. if (dev_is_sata(dev)) {
  127. rnc->ssp.connection_occupancy_timeout =
  128. ihost->user_parameters.stp_max_occupancy_timeout;
  129. rnc->ssp.connection_inactivity_timeout =
  130. ihost->user_parameters.stp_inactivity_timeout;
  131. } else {
  132. rnc->ssp.connection_occupancy_timeout =
  133. ihost->user_parameters.ssp_max_occupancy_timeout;
  134. rnc->ssp.connection_inactivity_timeout =
  135. ihost->user_parameters.ssp_inactivity_timeout;
  136. }
  137. rnc->ssp.initial_arbitration_wait_time = 0;
  138. /* Open Address Frame Parameters */
  139. rnc->ssp.oaf_connection_rate = idev->connection_rate;
  140. rnc->ssp.oaf_features = 0;
  141. rnc->ssp.oaf_source_zone_group = 0;
  142. rnc->ssp.oaf_more_compatibility_features = 0;
  143. }
  144. static void sci_remote_node_context_save_cbparams(
  145. struct sci_remote_node_context *sci_rnc,
  146. scics_sds_remote_node_context_callback callback,
  147. void *callback_parameter)
  148. {
  149. sci_rnc->user_callback = callback;
  150. sci_rnc->user_cookie = callback_parameter;
  151. }
  152. /**
  153. *
  154. * @sci_rnc:
  155. * @callback:
  156. * @callback_parameter:
  157. *
  158. * This method will setup the remote node context object so it will transition
  159. * to its ready state. If the remote node context is already setup to
  160. * transition to its final state then this function does nothing. none
  161. */
  162. static void sci_remote_node_context_setup_to_resume(
  163. struct sci_remote_node_context *sci_rnc,
  164. scics_sds_remote_node_context_callback callback,
  165. void *callback_parameter,
  166. enum sci_remote_node_context_destination_state dest_param)
  167. {
  168. if (sci_rnc->destination_state != RNC_DEST_FINAL) {
  169. sci_rnc->destination_state = dest_param;
  170. if (callback != NULL)
  171. sci_remote_node_context_save_cbparams(
  172. sci_rnc, callback, callback_parameter);
  173. }
  174. }
  175. static void sci_remote_node_context_setup_to_destroy(
  176. struct sci_remote_node_context *sci_rnc,
  177. scics_sds_remote_node_context_callback callback,
  178. void *callback_parameter)
  179. {
  180. sci_rnc->destination_state = RNC_DEST_FINAL;
  181. sci_rnc->user_callback = callback;
  182. sci_rnc->user_cookie = callback_parameter;
  183. }
  184. /**
  185. *
  186. *
  187. * This method just calls the user callback function and then resets the
  188. * callback.
  189. */
  190. static void sci_remote_node_context_notify_user(
  191. struct sci_remote_node_context *rnc)
  192. {
  193. if (rnc->user_callback != NULL) {
  194. (*rnc->user_callback)(rnc->user_cookie);
  195. rnc->user_callback = NULL;
  196. rnc->user_cookie = NULL;
  197. }
  198. }
  199. static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
  200. {
  201. if ((rnc->destination_state == RNC_DEST_READY) ||
  202. (rnc->destination_state == RNC_DEST_SUSPENDED_RESUME)) {
  203. rnc->destination_state = RNC_DEST_READY;
  204. sci_remote_node_context_resume(rnc, rnc->user_callback,
  205. rnc->user_cookie);
  206. } else
  207. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  208. }
  209. static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
  210. {
  211. union scu_remote_node_context *rnc_buffer;
  212. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  213. struct domain_device *dev = idev->domain_dev;
  214. struct isci_host *ihost = idev->owning_port->owning_controller;
  215. rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
  216. rnc_buffer->ssp.is_valid = true;
  217. if (dev_is_sata(dev) && dev->parent) {
  218. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
  219. } else {
  220. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
  221. if (!dev->parent)
  222. sci_port_setup_transports(idev->owning_port,
  223. sci_rnc->remote_node_index);
  224. }
  225. }
  226. static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
  227. {
  228. union scu_remote_node_context *rnc_buffer;
  229. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  230. struct isci_host *ihost = idev->owning_port->owning_controller;
  231. rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
  232. rnc_buffer->ssp.is_valid = false;
  233. sci_remote_device_post_request(rnc_to_dev(sci_rnc),
  234. SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
  235. }
  236. static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
  237. {
  238. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  239. /* Check to see if we have gotten back to the initial state because
  240. * someone requested to destroy the remote node context object.
  241. */
  242. if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
  243. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  244. sci_remote_node_context_notify_user(rnc);
  245. }
  246. }
  247. static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
  248. {
  249. struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
  250. sci_remote_node_context_validate_context_buffer(sci_rnc);
  251. }
  252. static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
  253. {
  254. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  255. /* Terminate all outstanding requests. */
  256. sci_remote_device_terminate_requests(rnc_to_dev(rnc));
  257. sci_remote_node_context_invalidate_context_buffer(rnc);
  258. }
  259. static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
  260. {
  261. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  262. struct isci_remote_device *idev;
  263. struct domain_device *dev;
  264. idev = rnc_to_dev(rnc);
  265. dev = idev->domain_dev;
  266. /*
  267. * For direct attached SATA devices we need to clear the TLCR
  268. * NCQ to TCi tag mapping on the phy and in cases where we
  269. * resume because of a target reset we also need to update
  270. * the STPTLDARNI register with the RNi of the device
  271. */
  272. if (dev_is_sata(dev) && !dev->parent)
  273. sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
  274. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
  275. }
  276. static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
  277. {
  278. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  279. enum sci_remote_node_context_destination_state dest_select;
  280. int tell_user = 1;
  281. dest_select = rnc->destination_state;
  282. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  283. if ((dest_select == RNC_DEST_SUSPENDED) ||
  284. (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
  285. sci_remote_node_context_suspend(
  286. rnc, rnc->suspend_reason,
  287. SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  288. if (dest_select == RNC_DEST_SUSPENDED_RESUME)
  289. tell_user = 0; /* Wait until ready again. */
  290. }
  291. if (tell_user)
  292. sci_remote_node_context_notify_user(rnc);
  293. }
  294. static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
  295. {
  296. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  297. sci_remote_node_context_continue_state_transitions(rnc);
  298. }
  299. static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
  300. {
  301. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  302. struct isci_remote_device *idev = rnc_to_dev(rnc);
  303. struct isci_host *ihost = idev->owning_port->owning_controller;
  304. u32 new_count = rnc->suspend_count + 1;
  305. if (new_count == 0)
  306. rnc->suspend_count = 1;
  307. else
  308. rnc->suspend_count = new_count;
  309. smp_wmb();
  310. /* Terminate outstanding requests pending abort. */
  311. sci_remote_device_abort_requests_pending_abort(idev);
  312. wake_up(&ihost->eventq);
  313. sci_remote_node_context_continue_state_transitions(rnc);
  314. }
  315. static void sci_remote_node_context_await_suspend_state_exit(
  316. struct sci_base_state_machine *sm)
  317. {
  318. struct sci_remote_node_context *rnc
  319. = container_of(sm, typeof(*rnc), sm);
  320. struct isci_remote_device *idev = rnc_to_dev(rnc);
  321. if (dev_is_sata(idev->domain_dev))
  322. isci_dev_set_hang_detection_timeout(idev, 0);
  323. }
  324. static const struct sci_base_state sci_remote_node_context_state_table[] = {
  325. [SCI_RNC_INITIAL] = {
  326. .enter_state = sci_remote_node_context_initial_state_enter,
  327. },
  328. [SCI_RNC_POSTING] = {
  329. .enter_state = sci_remote_node_context_posting_state_enter,
  330. },
  331. [SCI_RNC_INVALIDATING] = {
  332. .enter_state = sci_remote_node_context_invalidating_state_enter,
  333. },
  334. [SCI_RNC_RESUMING] = {
  335. .enter_state = sci_remote_node_context_resuming_state_enter,
  336. },
  337. [SCI_RNC_READY] = {
  338. .enter_state = sci_remote_node_context_ready_state_enter,
  339. },
  340. [SCI_RNC_TX_SUSPENDED] = {
  341. .enter_state = sci_remote_node_context_tx_suspended_state_enter,
  342. },
  343. [SCI_RNC_TX_RX_SUSPENDED] = {
  344. .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
  345. },
  346. [SCI_RNC_AWAIT_SUSPENSION] = {
  347. .exit_state = sci_remote_node_context_await_suspend_state_exit,
  348. },
  349. };
  350. void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
  351. u16 remote_node_index)
  352. {
  353. memset(rnc, 0, sizeof(struct sci_remote_node_context));
  354. rnc->remote_node_index = remote_node_index;
  355. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  356. sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
  357. }
  358. enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
  359. u32 event_code)
  360. {
  361. enum scis_sds_remote_node_context_states state;
  362. u32 next_state;
  363. state = sci_rnc->sm.current_state_id;
  364. switch (state) {
  365. case SCI_RNC_POSTING:
  366. switch (scu_get_event_code(event_code)) {
  367. case SCU_EVENT_POST_RNC_COMPLETE:
  368. sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
  369. break;
  370. default:
  371. goto out;
  372. }
  373. break;
  374. case SCI_RNC_INVALIDATING:
  375. if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
  376. if (sci_rnc->destination_state == RNC_DEST_FINAL)
  377. next_state = SCI_RNC_INITIAL;
  378. else
  379. next_state = SCI_RNC_POSTING;
  380. sci_change_state(&sci_rnc->sm, next_state);
  381. } else {
  382. switch (scu_get_event_type(event_code)) {
  383. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  384. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  385. /* We really dont care if the hardware is going to suspend
  386. * the device since it's being invalidated anyway */
  387. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  388. "%s: SCIC Remote Node Context 0x%p was "
  389. "suspeneded by hardware while being "
  390. "invalidated.\n", __func__, sci_rnc);
  391. break;
  392. default:
  393. goto out;
  394. }
  395. }
  396. break;
  397. case SCI_RNC_RESUMING:
  398. if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
  399. sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
  400. } else {
  401. switch (scu_get_event_type(event_code)) {
  402. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  403. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  404. /* We really dont care if the hardware is going to suspend
  405. * the device since it's being resumed anyway */
  406. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  407. "%s: SCIC Remote Node Context 0x%p was "
  408. "suspeneded by hardware while being resumed.\n",
  409. __func__, sci_rnc);
  410. break;
  411. default:
  412. goto out;
  413. }
  414. }
  415. break;
  416. case SCI_RNC_READY:
  417. switch (scu_get_event_type(event_code)) {
  418. case SCU_EVENT_TL_RNC_SUSPEND_TX:
  419. sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
  420. sci_rnc->suspend_type = scu_get_event_type(event_code);
  421. break;
  422. case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
  423. sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
  424. sci_rnc->suspend_type = scu_get_event_type(event_code);
  425. break;
  426. default:
  427. goto out;
  428. }
  429. break;
  430. case SCI_RNC_AWAIT_SUSPENSION:
  431. switch (scu_get_event_type(event_code)) {
  432. case SCU_EVENT_TL_RNC_SUSPEND_TX:
  433. next_state = SCI_RNC_TX_SUSPENDED;
  434. break;
  435. case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
  436. next_state = SCI_RNC_TX_RX_SUSPENDED;
  437. break;
  438. default:
  439. goto out;
  440. }
  441. if (sci_rnc->suspend_type == scu_get_event_type(event_code))
  442. sci_change_state(&sci_rnc->sm, next_state);
  443. break;
  444. default:
  445. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  446. "%s: invalid state: %s\n", __func__,
  447. rnc_state_name(state));
  448. return SCI_FAILURE_INVALID_STATE;
  449. }
  450. return SCI_SUCCESS;
  451. out:
  452. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  453. "%s: code: %#x state: %s\n", __func__, event_code,
  454. rnc_state_name(state));
  455. return SCI_FAILURE;
  456. }
  457. enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
  458. scics_sds_remote_node_context_callback cb_fn,
  459. void *cb_p)
  460. {
  461. enum scis_sds_remote_node_context_states state;
  462. state = sci_rnc->sm.current_state_id;
  463. switch (state) {
  464. case SCI_RNC_INVALIDATING:
  465. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  466. return SCI_SUCCESS;
  467. case SCI_RNC_POSTING:
  468. case SCI_RNC_RESUMING:
  469. case SCI_RNC_READY:
  470. case SCI_RNC_TX_SUSPENDED:
  471. case SCI_RNC_TX_RX_SUSPENDED:
  472. case SCI_RNC_AWAIT_SUSPENSION:
  473. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  474. sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
  475. return SCI_SUCCESS;
  476. case SCI_RNC_INITIAL:
  477. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  478. "%s: invalid state: %s\n", __func__,
  479. rnc_state_name(state));
  480. /* We have decided that the destruct request on the remote node context
  481. * can not fail since it is either in the initial/destroyed state or is
  482. * can be destroyed.
  483. */
  484. return SCI_SUCCESS;
  485. default:
  486. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  487. "%s: invalid state %s\n", __func__,
  488. rnc_state_name(state));
  489. return SCI_FAILURE_INVALID_STATE;
  490. }
  491. }
  492. enum sci_status sci_remote_node_context_suspend(
  493. struct sci_remote_node_context *sci_rnc,
  494. enum sci_remote_node_suspension_reasons suspend_reason,
  495. u32 suspend_type)
  496. {
  497. enum scis_sds_remote_node_context_states state
  498. = sci_rnc->sm.current_state_id;
  499. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  500. enum sci_status status = SCI_FAILURE_INVALID_STATE;
  501. enum sci_remote_node_context_destination_state dest_param =
  502. RNC_DEST_UNSPECIFIED;
  503. dev_dbg(scirdev_to_dev(idev),
  504. "%s: current state %s, current suspend_type %x dest state %d,"
  505. " arg suspend_reason %d, arg suspend_type %x",
  506. __func__, rnc_state_name(state), sci_rnc->suspend_type,
  507. sci_rnc->destination_state, suspend_reason,
  508. suspend_type);
  509. /* Disable automatic state continuations if explicitly suspending. */
  510. if ((suspend_reason == SCI_HW_SUSPEND) ||
  511. (sci_rnc->destination_state == RNC_DEST_FINAL))
  512. dest_param = sci_rnc->destination_state;
  513. switch (state) {
  514. case SCI_RNC_READY:
  515. break;
  516. case SCI_RNC_INVALIDATING:
  517. if (sci_rnc->destination_state == RNC_DEST_FINAL) {
  518. dev_warn(scirdev_to_dev(idev),
  519. "%s: already destroying %p\n",
  520. __func__, sci_rnc);
  521. return SCI_FAILURE_INVALID_STATE;
  522. }
  523. /* Fall through and handle like SCI_RNC_POSTING */
  524. case SCI_RNC_RESUMING:
  525. /* Fall through and handle like SCI_RNC_POSTING */
  526. case SCI_RNC_POSTING:
  527. /* Set the destination state to AWAIT - this signals the
  528. * entry into the SCI_RNC_READY state that a suspension
  529. * needs to be done immediately.
  530. */
  531. sci_rnc->destination_state = RNC_DEST_SUSPENDED;
  532. sci_rnc->suspend_type = suspend_type;
  533. sci_rnc->suspend_reason = suspend_reason;
  534. return SCI_SUCCESS;
  535. case SCI_RNC_TX_SUSPENDED:
  536. if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
  537. status = SCI_SUCCESS;
  538. break;
  539. case SCI_RNC_TX_RX_SUSPENDED:
  540. if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
  541. status = SCI_SUCCESS;
  542. break;
  543. case SCI_RNC_AWAIT_SUSPENSION:
  544. if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
  545. || (suspend_type == sci_rnc->suspend_type))
  546. return SCI_SUCCESS;
  547. break;
  548. default:
  549. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  550. "%s: invalid state %s\n", __func__,
  551. rnc_state_name(state));
  552. return SCI_FAILURE_INVALID_STATE;
  553. }
  554. sci_rnc->destination_state = dest_param;
  555. sci_rnc->suspend_type = suspend_type;
  556. sci_rnc->suspend_reason = suspend_reason;
  557. if (status == SCI_SUCCESS) { /* Already in the destination state? */
  558. struct isci_host *ihost = idev->owning_port->owning_controller;
  559. wake_up_all(&ihost->eventq); /* Let observers look. */
  560. return SCI_SUCCESS;
  561. }
  562. if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
  563. (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
  564. if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
  565. isci_dev_set_hang_detection_timeout(idev, 0x00000001);
  566. sci_remote_device_post_request(
  567. idev, SCI_SOFTWARE_SUSPEND_CMD);
  568. }
  569. if (state != SCI_RNC_AWAIT_SUSPENSION)
  570. sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
  571. return SCI_SUCCESS;
  572. }
  573. enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
  574. scics_sds_remote_node_context_callback cb_fn,
  575. void *cb_p)
  576. {
  577. enum scis_sds_remote_node_context_states state;
  578. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  579. state = sci_rnc->sm.current_state_id;
  580. dev_dbg(scirdev_to_dev(idev),
  581. "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
  582. "dev resume path %s\n",
  583. __func__, rnc_state_name(state), cb_fn, cb_p,
  584. sci_rnc->destination_state,
  585. test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
  586. ? "<abort active>" : "<normal>");
  587. switch (state) {
  588. case SCI_RNC_INITIAL:
  589. if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
  590. return SCI_FAILURE_INVALID_STATE;
  591. if (test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags))
  592. sci_remote_node_context_save_cbparams(sci_rnc, cb_fn,
  593. cb_p);
  594. else {
  595. sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn,
  596. cb_p, RNC_DEST_READY);
  597. sci_remote_node_context_construct_buffer(sci_rnc);
  598. sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
  599. }
  600. return SCI_SUCCESS;
  601. case SCI_RNC_POSTING:
  602. case SCI_RNC_INVALIDATING:
  603. case SCI_RNC_RESUMING:
  604. if (test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags))
  605. sci_remote_node_context_save_cbparams(sci_rnc, cb_fn,
  606. cb_p);
  607. else {
  608. /* We are still waiting to post when a resume was
  609. * requested.
  610. */
  611. switch (sci_rnc->destination_state) {
  612. case RNC_DEST_SUSPENDED:
  613. case RNC_DEST_SUSPENDED_RESUME:
  614. /* Previously waiting to suspend after posting.
  615. * Now continue onto resumption.
  616. */
  617. sci_remote_node_context_setup_to_resume(
  618. sci_rnc, cb_fn, cb_p,
  619. RNC_DEST_SUSPENDED_RESUME);
  620. break;
  621. default:
  622. sci_remote_node_context_setup_to_resume(
  623. sci_rnc, cb_fn, cb_p,
  624. RNC_DEST_READY);
  625. break;
  626. }
  627. }
  628. return SCI_SUCCESS;
  629. case SCI_RNC_TX_SUSPENDED:
  630. case SCI_RNC_TX_RX_SUSPENDED:
  631. if (test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags))
  632. sci_remote_node_context_save_cbparams(sci_rnc, cb_fn,
  633. cb_p);
  634. else {
  635. struct domain_device *dev = idev->domain_dev;
  636. /* If this is an expander attached SATA device we must
  637. * invalidate and repost the RNC since this is the only
  638. * way to clear the TCi to NCQ tag mapping table for
  639. * the RNi. All other device types we can just resume.
  640. */
  641. sci_remote_node_context_setup_to_resume(
  642. sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
  643. if (dev_is_sata(dev) && dev->parent)
  644. sci_change_state(&sci_rnc->sm,
  645. SCI_RNC_INVALIDATING);
  646. else
  647. sci_change_state(&sci_rnc->sm,
  648. SCI_RNC_RESUMING);
  649. }
  650. return SCI_SUCCESS;
  651. case SCI_RNC_AWAIT_SUSPENSION:
  652. if (test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags))
  653. sci_remote_node_context_save_cbparams(sci_rnc, cb_fn,
  654. cb_p);
  655. else
  656. sci_remote_node_context_setup_to_resume(
  657. sci_rnc, cb_fn, cb_p,
  658. RNC_DEST_SUSPENDED_RESUME);
  659. return SCI_SUCCESS;
  660. default:
  661. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  662. "%s: invalid state %s\n", __func__,
  663. rnc_state_name(state));
  664. return SCI_FAILURE_INVALID_STATE;
  665. }
  666. }
  667. enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
  668. struct isci_request *ireq)
  669. {
  670. enum scis_sds_remote_node_context_states state;
  671. state = sci_rnc->sm.current_state_id;
  672. switch (state) {
  673. case SCI_RNC_READY:
  674. return SCI_SUCCESS;
  675. case SCI_RNC_TX_SUSPENDED:
  676. case SCI_RNC_TX_RX_SUSPENDED:
  677. case SCI_RNC_AWAIT_SUSPENSION:
  678. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  679. "%s: invalid state %s\n", __func__,
  680. rnc_state_name(state));
  681. return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
  682. default:
  683. dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  684. "%s: invalid state %s\n", __func__,
  685. rnc_state_name(state));
  686. return SCI_FAILURE_INVALID_STATE;
  687. }
  688. }
  689. enum sci_status sci_remote_node_context_start_task(
  690. struct sci_remote_node_context *sci_rnc,
  691. struct isci_request *ireq,
  692. scics_sds_remote_node_context_callback cb_fn,
  693. void *cb_p)
  694. {
  695. enum sci_status status = sci_remote_node_context_resume(sci_rnc,
  696. cb_fn, cb_p);
  697. if (status != SCI_SUCCESS)
  698. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  699. "%s: resume failed: %d\n", __func__, status);
  700. return status;
  701. }
  702. int sci_remote_node_context_is_safe_to_abort(
  703. struct sci_remote_node_context *sci_rnc)
  704. {
  705. enum scis_sds_remote_node_context_states state;
  706. state = sci_rnc->sm.current_state_id;
  707. switch (state) {
  708. case SCI_RNC_INVALIDATING:
  709. case SCI_RNC_TX_RX_SUSPENDED:
  710. return 1;
  711. case SCI_RNC_POSTING:
  712. case SCI_RNC_RESUMING:
  713. case SCI_RNC_READY:
  714. case SCI_RNC_TX_SUSPENDED:
  715. case SCI_RNC_AWAIT_SUSPENSION:
  716. case SCI_RNC_INITIAL:
  717. return 0;
  718. default:
  719. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  720. "%s: invalid state %d\n", __func__, state);
  721. return 0;
  722. }
  723. }