remote_node_context.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas_ata.h>
  56. #include "host.h"
  57. #include "isci.h"
  58. #include "remote_device.h"
  59. #include "remote_node_context.h"
  60. #include "scu_event_codes.h"
  61. #include "scu_task_context.h"
  62. #undef C
  63. #define C(a) (#a)
  64. const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
  65. {
  66. static const char * const strings[] = RNC_STATES;
  67. return strings[state];
  68. }
  69. #undef C
  70. /**
  71. *
  72. * @sci_rnc: The state of the remote node context object to check.
  73. *
  74. * This method will return true if the remote node context is in a READY state
  75. * otherwise it will return false bool true if the remote node context is in
  76. * the ready state. false if the remote node context is not in the ready state.
  77. */
  78. bool sci_remote_node_context_is_ready(
  79. struct sci_remote_node_context *sci_rnc)
  80. {
  81. u32 current_state = sci_rnc->sm.current_state_id;
  82. if (current_state == SCI_RNC_READY) {
  83. return true;
  84. }
  85. return false;
  86. }
  87. static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
  88. {
  89. if (id < ihost->remote_node_entries &&
  90. ihost->device_table[id])
  91. return &ihost->remote_node_context_table[id];
  92. return NULL;
  93. }
  94. static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
  95. {
  96. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  97. struct domain_device *dev = idev->domain_dev;
  98. int rni = sci_rnc->remote_node_index;
  99. union scu_remote_node_context *rnc;
  100. struct isci_host *ihost;
  101. __le64 sas_addr;
  102. ihost = idev->owning_port->owning_controller;
  103. rnc = sci_rnc_by_id(ihost, rni);
  104. memset(rnc, 0, sizeof(union scu_remote_node_context)
  105. * sci_remote_device_node_count(idev));
  106. rnc->ssp.remote_node_index = rni;
  107. rnc->ssp.remote_node_port_width = idev->device_port_width;
  108. rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
  109. /* sas address is __be64, context ram format is __le64 */
  110. sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
  111. rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
  112. rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
  113. rnc->ssp.nexus_loss_timer_enable = true;
  114. rnc->ssp.check_bit = false;
  115. rnc->ssp.is_valid = false;
  116. rnc->ssp.is_remote_node_context = true;
  117. rnc->ssp.function_number = 0;
  118. rnc->ssp.arbitration_wait_time = 0;
  119. if (dev_is_sata(dev)) {
  120. rnc->ssp.connection_occupancy_timeout =
  121. ihost->user_parameters.stp_max_occupancy_timeout;
  122. rnc->ssp.connection_inactivity_timeout =
  123. ihost->user_parameters.stp_inactivity_timeout;
  124. } else {
  125. rnc->ssp.connection_occupancy_timeout =
  126. ihost->user_parameters.ssp_max_occupancy_timeout;
  127. rnc->ssp.connection_inactivity_timeout =
  128. ihost->user_parameters.ssp_inactivity_timeout;
  129. }
  130. rnc->ssp.initial_arbitration_wait_time = 0;
  131. /* Open Address Frame Parameters */
  132. rnc->ssp.oaf_connection_rate = idev->connection_rate;
  133. rnc->ssp.oaf_features = 0;
  134. rnc->ssp.oaf_source_zone_group = 0;
  135. rnc->ssp.oaf_more_compatibility_features = 0;
  136. }
  137. /**
  138. *
  139. * @sci_rnc:
  140. * @callback:
  141. * @callback_parameter:
  142. *
  143. * This method will setup the remote node context object so it will transition
  144. * to its ready state. If the remote node context is already setup to
  145. * transition to its final state then this function does nothing. none
  146. */
  147. static void sci_remote_node_context_setup_to_resume(
  148. struct sci_remote_node_context *sci_rnc,
  149. scics_sds_remote_node_context_callback callback,
  150. void *callback_parameter,
  151. enum sci_remote_node_context_destination_state dest_param)
  152. {
  153. if (sci_rnc->destination_state != RNC_DEST_FINAL) {
  154. sci_rnc->destination_state = dest_param;
  155. if (callback != NULL) {
  156. sci_rnc->user_callback = callback;
  157. sci_rnc->user_cookie = callback_parameter;
  158. }
  159. }
  160. }
  161. static void sci_remote_node_context_setup_to_destroy(
  162. struct sci_remote_node_context *sci_rnc,
  163. scics_sds_remote_node_context_callback callback,
  164. void *callback_parameter)
  165. {
  166. sci_rnc->destination_state = RNC_DEST_FINAL;
  167. sci_rnc->user_callback = callback;
  168. sci_rnc->user_cookie = callback_parameter;
  169. }
  170. /**
  171. *
  172. *
  173. * This method just calls the user callback function and then resets the
  174. * callback.
  175. */
  176. static void sci_remote_node_context_notify_user(
  177. struct sci_remote_node_context *rnc)
  178. {
  179. if (rnc->user_callback != NULL) {
  180. (*rnc->user_callback)(rnc->user_cookie);
  181. rnc->user_callback = NULL;
  182. rnc->user_cookie = NULL;
  183. }
  184. }
  185. static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
  186. {
  187. if ((rnc->destination_state == RNC_DEST_READY) ||
  188. (rnc->destination_state == RNC_DEST_SUSPENDED_RESUME)) {
  189. rnc->destination_state = RNC_DEST_READY;
  190. sci_remote_node_context_resume(rnc, rnc->user_callback,
  191. rnc->user_cookie);
  192. } else
  193. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  194. }
  195. static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
  196. {
  197. union scu_remote_node_context *rnc_buffer;
  198. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  199. struct domain_device *dev = idev->domain_dev;
  200. struct isci_host *ihost = idev->owning_port->owning_controller;
  201. rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
  202. rnc_buffer->ssp.is_valid = true;
  203. if (dev_is_sata(dev) && dev->parent) {
  204. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
  205. } else {
  206. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
  207. if (!dev->parent)
  208. sci_port_setup_transports(idev->owning_port,
  209. sci_rnc->remote_node_index);
  210. }
  211. }
  212. static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
  213. {
  214. union scu_remote_node_context *rnc_buffer;
  215. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  216. struct isci_host *ihost = idev->owning_port->owning_controller;
  217. rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
  218. rnc_buffer->ssp.is_valid = false;
  219. sci_remote_device_post_request(rnc_to_dev(sci_rnc),
  220. SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
  221. }
  222. static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
  223. {
  224. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  225. /* Check to see if we have gotten back to the initial state because
  226. * someone requested to destroy the remote node context object.
  227. */
  228. if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
  229. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  230. sci_remote_node_context_notify_user(rnc);
  231. }
  232. }
  233. static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
  234. {
  235. struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
  236. sci_remote_node_context_validate_context_buffer(sci_rnc);
  237. }
  238. static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
  239. {
  240. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  241. /* Terminate all outstanding requests. */
  242. sci_remote_device_terminate_requests(rnc_to_dev(rnc));
  243. sci_remote_node_context_invalidate_context_buffer(rnc);
  244. }
  245. static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
  246. {
  247. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  248. struct isci_remote_device *idev;
  249. struct domain_device *dev;
  250. idev = rnc_to_dev(rnc);
  251. dev = idev->domain_dev;
  252. /*
  253. * For direct attached SATA devices we need to clear the TLCR
  254. * NCQ to TCi tag mapping on the phy and in cases where we
  255. * resume because of a target reset we also need to update
  256. * the STPTLDARNI register with the RNi of the device
  257. */
  258. if (dev_is_sata(dev) && !dev->parent)
  259. sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
  260. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
  261. }
  262. static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
  263. {
  264. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  265. enum sci_remote_node_context_destination_state dest_select;
  266. scics_sds_remote_node_context_callback usr_cb = rnc->user_callback;
  267. void *usr_param = rnc->user_cookie;
  268. int tell_user = 1;
  269. dest_select = rnc->destination_state;
  270. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  271. if ((dest_select == RNC_DEST_SUSPENDED) ||
  272. (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
  273. sci_remote_node_context_suspend(
  274. rnc, rnc->suspend_reason,
  275. SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  276. if (dest_select == RNC_DEST_SUSPENDED_RESUME) {
  277. sci_remote_node_context_resume(rnc, usr_cb, usr_param);
  278. tell_user = 0; /* Wait until ready again. */
  279. }
  280. }
  281. if (tell_user && rnc->user_callback)
  282. sci_remote_node_context_notify_user(rnc);
  283. }
  284. static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
  285. {
  286. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  287. sci_remote_node_context_continue_state_transitions(rnc);
  288. }
  289. static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
  290. {
  291. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  292. struct isci_remote_device *idev = rnc_to_dev(rnc);
  293. struct isci_host *ihost = idev->owning_port->owning_controller;
  294. /* Terminate outstanding requests pending abort. */
  295. sci_remote_device_abort_requests_pending_abort(idev);
  296. wake_up(&ihost->eventq);
  297. sci_remote_node_context_continue_state_transitions(rnc);
  298. }
  299. static void sci_remote_node_context_await_suspend_state_exit(
  300. struct sci_base_state_machine *sm)
  301. {
  302. struct sci_remote_node_context *rnc
  303. = container_of(sm, typeof(*rnc), sm);
  304. struct isci_remote_device *idev = rnc_to_dev(rnc);
  305. if (dev_is_sata(idev->domain_dev))
  306. isci_dev_set_hang_detection_timeout(idev, 0);
  307. }
  308. static const struct sci_base_state sci_remote_node_context_state_table[] = {
  309. [SCI_RNC_INITIAL] = {
  310. .enter_state = sci_remote_node_context_initial_state_enter,
  311. },
  312. [SCI_RNC_POSTING] = {
  313. .enter_state = sci_remote_node_context_posting_state_enter,
  314. },
  315. [SCI_RNC_INVALIDATING] = {
  316. .enter_state = sci_remote_node_context_invalidating_state_enter,
  317. },
  318. [SCI_RNC_RESUMING] = {
  319. .enter_state = sci_remote_node_context_resuming_state_enter,
  320. },
  321. [SCI_RNC_READY] = {
  322. .enter_state = sci_remote_node_context_ready_state_enter,
  323. },
  324. [SCI_RNC_TX_SUSPENDED] = {
  325. .enter_state = sci_remote_node_context_tx_suspended_state_enter,
  326. },
  327. [SCI_RNC_TX_RX_SUSPENDED] = {
  328. .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
  329. },
  330. [SCI_RNC_AWAIT_SUSPENSION] = {
  331. .exit_state = sci_remote_node_context_await_suspend_state_exit,
  332. },
  333. };
  334. void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
  335. u16 remote_node_index)
  336. {
  337. memset(rnc, 0, sizeof(struct sci_remote_node_context));
  338. rnc->remote_node_index = remote_node_index;
  339. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  340. sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
  341. }
  342. enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
  343. u32 event_code)
  344. {
  345. enum scis_sds_remote_node_context_states state;
  346. u32 next_state;
  347. state = sci_rnc->sm.current_state_id;
  348. switch (state) {
  349. case SCI_RNC_POSTING:
  350. switch (scu_get_event_code(event_code)) {
  351. case SCU_EVENT_POST_RNC_COMPLETE:
  352. sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
  353. break;
  354. default:
  355. goto out;
  356. }
  357. break;
  358. case SCI_RNC_INVALIDATING:
  359. if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
  360. if (sci_rnc->destination_state == RNC_DEST_FINAL)
  361. next_state = SCI_RNC_INITIAL;
  362. else
  363. next_state = SCI_RNC_POSTING;
  364. sci_change_state(&sci_rnc->sm, next_state);
  365. } else {
  366. switch (scu_get_event_type(event_code)) {
  367. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  368. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  369. /* We really dont care if the hardware is going to suspend
  370. * the device since it's being invalidated anyway */
  371. dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  372. "%s: SCIC Remote Node Context 0x%p was "
  373. "suspeneded by hardware while being "
  374. "invalidated.\n", __func__, sci_rnc);
  375. break;
  376. default:
  377. goto out;
  378. }
  379. }
  380. break;
  381. case SCI_RNC_RESUMING:
  382. if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
  383. sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
  384. } else {
  385. switch (scu_get_event_type(event_code)) {
  386. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  387. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  388. /* We really dont care if the hardware is going to suspend
  389. * the device since it's being resumed anyway */
  390. dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  391. "%s: SCIC Remote Node Context 0x%p was "
  392. "suspeneded by hardware while being resumed.\n",
  393. __func__, sci_rnc);
  394. break;
  395. default:
  396. goto out;
  397. }
  398. }
  399. break;
  400. case SCI_RNC_READY:
  401. switch (scu_get_event_type(event_code)) {
  402. case SCU_EVENT_TL_RNC_SUSPEND_TX:
  403. sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
  404. sci_rnc->suspend_type = scu_get_event_type(event_code);
  405. break;
  406. case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
  407. sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
  408. sci_rnc->suspend_type = scu_get_event_type(event_code);
  409. break;
  410. default:
  411. goto out;
  412. }
  413. break;
  414. case SCI_RNC_AWAIT_SUSPENSION:
  415. switch (scu_get_event_type(event_code)) {
  416. case SCU_EVENT_TL_RNC_SUSPEND_TX:
  417. next_state = SCI_RNC_TX_SUSPENDED;
  418. break;
  419. case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
  420. next_state = SCI_RNC_TX_RX_SUSPENDED;
  421. break;
  422. default:
  423. goto out;
  424. }
  425. if (sci_rnc->suspend_type == scu_get_event_type(event_code))
  426. sci_change_state(&sci_rnc->sm, next_state);
  427. break;
  428. default:
  429. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  430. "%s: invalid state: %s\n", __func__,
  431. rnc_state_name(state));
  432. return SCI_FAILURE_INVALID_STATE;
  433. }
  434. return SCI_SUCCESS;
  435. out:
  436. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  437. "%s: code: %#x state: %s\n", __func__, event_code,
  438. rnc_state_name(state));
  439. return SCI_FAILURE;
  440. }
  441. enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
  442. scics_sds_remote_node_context_callback cb_fn,
  443. void *cb_p)
  444. {
  445. enum scis_sds_remote_node_context_states state;
  446. state = sci_rnc->sm.current_state_id;
  447. switch (state) {
  448. case SCI_RNC_INVALIDATING:
  449. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  450. return SCI_SUCCESS;
  451. case SCI_RNC_POSTING:
  452. case SCI_RNC_RESUMING:
  453. case SCI_RNC_READY:
  454. case SCI_RNC_TX_SUSPENDED:
  455. case SCI_RNC_TX_RX_SUSPENDED:
  456. case SCI_RNC_AWAIT_SUSPENSION:
  457. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  458. sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
  459. return SCI_SUCCESS;
  460. case SCI_RNC_INITIAL:
  461. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  462. "%s: invalid state: %s\n", __func__,
  463. rnc_state_name(state));
  464. /* We have decided that the destruct request on the remote node context
  465. * can not fail since it is either in the initial/destroyed state or is
  466. * can be destroyed.
  467. */
  468. return SCI_SUCCESS;
  469. default:
  470. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  471. "%s: invalid state %s\n", __func__,
  472. rnc_state_name(state));
  473. return SCI_FAILURE_INVALID_STATE;
  474. }
  475. }
  476. enum sci_status sci_remote_node_context_suspend(
  477. struct sci_remote_node_context *sci_rnc,
  478. enum sci_remote_node_suspension_reasons suspend_reason,
  479. u32 suspend_type)
  480. {
  481. enum scis_sds_remote_node_context_states state
  482. = sci_rnc->sm.current_state_id;
  483. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  484. enum sci_status status = SCI_FAILURE_INVALID_STATE;
  485. enum sci_remote_node_context_destination_state dest_param =
  486. RNC_DEST_UNSPECIFIED;
  487. dev_dbg(scirdev_to_dev(idev),
  488. "%s: current state %d, current suspend_type %x dest state %d,"
  489. " arg suspend_reason %d, arg suspend_type %x",
  490. __func__, state, sci_rnc->suspend_type,
  491. sci_rnc->destination_state, suspend_reason,
  492. suspend_type);
  493. /* Disable automatic state continuations if explicitly suspending. */
  494. if ((suspend_reason == SCI_HW_SUSPEND) ||
  495. (sci_rnc->destination_state == RNC_DEST_FINAL))
  496. dest_param = sci_rnc->destination_state;
  497. switch (state) {
  498. case SCI_RNC_RESUMING:
  499. break; /* The RNC has been posted, so start the suspend. */
  500. case SCI_RNC_READY:
  501. break;
  502. case SCI_RNC_INVALIDATING:
  503. if (sci_rnc->destination_state == RNC_DEST_FINAL) {
  504. dev_warn(scirdev_to_dev(idev),
  505. "%s: already destroying %p\n",
  506. __func__, sci_rnc);
  507. return SCI_FAILURE_INVALID_STATE;
  508. }
  509. /* Fall through and handle like SCI_RNC_POSTING */
  510. case SCI_RNC_POSTING:
  511. /* Set the destination state to AWAIT - this signals the
  512. * entry into the SCI_RNC_READY state that a suspension
  513. * needs to be done immediately.
  514. */
  515. sci_rnc->destination_state = RNC_DEST_SUSPENDED;
  516. sci_rnc->suspend_type = suspend_type;
  517. sci_rnc->suspend_reason = suspend_reason;
  518. return SCI_SUCCESS;
  519. case SCI_RNC_TX_SUSPENDED:
  520. if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
  521. status = SCI_SUCCESS;
  522. break;
  523. case SCI_RNC_TX_RX_SUSPENDED:
  524. if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
  525. status = SCI_SUCCESS;
  526. break;
  527. case SCI_RNC_AWAIT_SUSPENSION:
  528. if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
  529. || (suspend_type == sci_rnc->suspend_type))
  530. return SCI_SUCCESS;
  531. break;
  532. default:
  533. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  534. "%s: invalid state %s\n", __func__,
  535. rnc_state_name(state));
  536. return SCI_FAILURE_INVALID_STATE;
  537. }
  538. sci_rnc->destination_state = dest_param;
  539. sci_rnc->suspend_type = suspend_type;
  540. sci_rnc->suspend_reason = suspend_reason;
  541. if (status == SCI_SUCCESS) { /* Already in the destination state? */
  542. struct isci_host *ihost = idev->owning_port->owning_controller;
  543. wake_up_all(&ihost->eventq); /* Let observers look. */
  544. return SCI_SUCCESS;
  545. }
  546. if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
  547. (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
  548. if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
  549. isci_dev_set_hang_detection_timeout(idev, 0x00000001);
  550. sci_remote_device_post_request(
  551. idev, SCI_SOFTWARE_SUSPEND_CMD);
  552. }
  553. if (state != SCI_RNC_AWAIT_SUSPENSION)
  554. sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
  555. return SCI_SUCCESS;
  556. }
  557. enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
  558. scics_sds_remote_node_context_callback cb_fn,
  559. void *cb_p)
  560. {
  561. enum scis_sds_remote_node_context_states state;
  562. state = sci_rnc->sm.current_state_id;
  563. switch (state) {
  564. case SCI_RNC_INITIAL:
  565. if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
  566. return SCI_FAILURE_INVALID_STATE;
  567. sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
  568. RNC_DEST_READY);
  569. sci_remote_node_context_construct_buffer(sci_rnc);
  570. sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
  571. return SCI_SUCCESS;
  572. case SCI_RNC_POSTING:
  573. case SCI_RNC_INVALIDATING:
  574. case SCI_RNC_RESUMING:
  575. /* We are still waiting to post when a resume was requested. */
  576. switch (sci_rnc->destination_state) {
  577. case RNC_DEST_SUSPENDED:
  578. case RNC_DEST_SUSPENDED_RESUME:
  579. /* Previously waiting to suspend after posting. Now
  580. * continue onto resumption.
  581. */
  582. sci_remote_node_context_setup_to_resume(
  583. sci_rnc, cb_fn, cb_p,
  584. RNC_DEST_SUSPENDED_RESUME);
  585. break;
  586. default:
  587. sci_remote_node_context_setup_to_resume(
  588. sci_rnc, cb_fn, cb_p,
  589. RNC_DEST_READY);
  590. break;
  591. }
  592. return SCI_SUCCESS;
  593. case SCI_RNC_TX_SUSPENDED:
  594. case SCI_RNC_TX_RX_SUSPENDED: {
  595. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  596. struct domain_device *dev = idev->domain_dev;
  597. /* If this is an expander attached SATA device we must
  598. * invalidate and repost the RNC since this is the only way
  599. * to clear the TCi to NCQ tag mapping table for the RNi.
  600. * All other device types we can just resume.
  601. */
  602. sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
  603. RNC_DEST_READY);
  604. if (dev_is_sata(dev) && dev->parent)
  605. sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
  606. else
  607. sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
  608. return SCI_SUCCESS;
  609. }
  610. case SCI_RNC_AWAIT_SUSPENSION:
  611. sci_remote_node_context_setup_to_resume(
  612. sci_rnc, cb_fn, cb_p,
  613. RNC_DEST_SUSPENDED_RESUME);
  614. return SCI_SUCCESS;
  615. default:
  616. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  617. "%s: invalid state %s\n", __func__,
  618. rnc_state_name(state));
  619. return SCI_FAILURE_INVALID_STATE;
  620. }
  621. }
  622. enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
  623. struct isci_request *ireq)
  624. {
  625. enum scis_sds_remote_node_context_states state;
  626. state = sci_rnc->sm.current_state_id;
  627. switch (state) {
  628. case SCI_RNC_READY:
  629. return SCI_SUCCESS;
  630. case SCI_RNC_TX_SUSPENDED:
  631. case SCI_RNC_TX_RX_SUSPENDED:
  632. case SCI_RNC_AWAIT_SUSPENSION:
  633. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  634. "%s: invalid state %s\n", __func__,
  635. rnc_state_name(state));
  636. return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
  637. default:
  638. dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  639. "%s: invalid state %s\n", __func__,
  640. rnc_state_name(state));
  641. return SCI_FAILURE_INVALID_STATE;
  642. }
  643. }
  644. enum sci_status sci_remote_node_context_start_task(
  645. struct sci_remote_node_context *sci_rnc,
  646. struct isci_request *ireq,
  647. scics_sds_remote_node_context_callback cb_fn,
  648. void *cb_p)
  649. {
  650. enum sci_status status = sci_remote_node_context_resume(sci_rnc,
  651. cb_fn, cb_p);
  652. if (status != SCI_SUCCESS)
  653. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  654. "%s: resume failed: %d\n", __func__, status);
  655. return status;
  656. }
  657. int sci_remote_node_context_is_safe_to_abort(
  658. struct sci_remote_node_context *sci_rnc)
  659. {
  660. enum scis_sds_remote_node_context_states state;
  661. state = sci_rnc->sm.current_state_id;
  662. switch (state) {
  663. case SCI_RNC_INVALIDATING:
  664. case SCI_RNC_TX_RX_SUSPENDED:
  665. return 1;
  666. case SCI_RNC_POSTING:
  667. case SCI_RNC_RESUMING:
  668. case SCI_RNC_READY:
  669. case SCI_RNC_TX_SUSPENDED:
  670. case SCI_RNC_AWAIT_SUSPENSION:
  671. case SCI_RNC_INITIAL:
  672. return 0;
  673. default:
  674. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  675. "%s: invalid state %d\n", __func__, state);
  676. return 0;
  677. }
  678. }