remote_node_context.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include <scsi/sas_ata.h>
  56. #include "host.h"
  57. #include "isci.h"
  58. #include "remote_device.h"
  59. #include "remote_node_context.h"
  60. #include "scu_event_codes.h"
  61. #include "scu_task_context.h"
  62. #undef C
  63. #define C(a) (#a)
  64. const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
  65. {
  66. static const char * const strings[] = RNC_STATES;
  67. if (state >= ARRAY_SIZE(strings))
  68. return "UNKNOWN";
  69. return strings[state];
  70. }
  71. #undef C
  72. /**
  73. *
  74. * @sci_rnc: The state of the remote node context object to check.
  75. *
  76. * This method will return true if the remote node context is in a READY state
  77. * otherwise it will return false bool true if the remote node context is in
  78. * the ready state. false if the remote node context is not in the ready state.
  79. */
  80. bool sci_remote_node_context_is_ready(
  81. struct sci_remote_node_context *sci_rnc)
  82. {
  83. u32 current_state = sci_rnc->sm.current_state_id;
  84. if (current_state == SCI_RNC_READY) {
  85. return true;
  86. }
  87. return false;
  88. }
  89. bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
  90. {
  91. u32 current_state = sci_rnc->sm.current_state_id;
  92. if (current_state == SCI_RNC_TX_RX_SUSPENDED)
  93. return true;
  94. return false;
  95. }
  96. static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
  97. {
  98. if (id < ihost->remote_node_entries &&
  99. ihost->device_table[id])
  100. return &ihost->remote_node_context_table[id];
  101. return NULL;
  102. }
  103. static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
  104. {
  105. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  106. struct domain_device *dev = idev->domain_dev;
  107. int rni = sci_rnc->remote_node_index;
  108. union scu_remote_node_context *rnc;
  109. struct isci_host *ihost;
  110. __le64 sas_addr;
  111. ihost = idev->owning_port->owning_controller;
  112. rnc = sci_rnc_by_id(ihost, rni);
  113. memset(rnc, 0, sizeof(union scu_remote_node_context)
  114. * sci_remote_device_node_count(idev));
  115. rnc->ssp.remote_node_index = rni;
  116. rnc->ssp.remote_node_port_width = idev->device_port_width;
  117. rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
  118. /* sas address is __be64, context ram format is __le64 */
  119. sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
  120. rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
  121. rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
  122. rnc->ssp.nexus_loss_timer_enable = true;
  123. rnc->ssp.check_bit = false;
  124. rnc->ssp.is_valid = false;
  125. rnc->ssp.is_remote_node_context = true;
  126. rnc->ssp.function_number = 0;
  127. rnc->ssp.arbitration_wait_time = 0;
  128. if (dev_is_sata(dev)) {
  129. rnc->ssp.connection_occupancy_timeout =
  130. ihost->user_parameters.stp_max_occupancy_timeout;
  131. rnc->ssp.connection_inactivity_timeout =
  132. ihost->user_parameters.stp_inactivity_timeout;
  133. } else {
  134. rnc->ssp.connection_occupancy_timeout =
  135. ihost->user_parameters.ssp_max_occupancy_timeout;
  136. rnc->ssp.connection_inactivity_timeout =
  137. ihost->user_parameters.ssp_inactivity_timeout;
  138. }
  139. rnc->ssp.initial_arbitration_wait_time = 0;
  140. /* Open Address Frame Parameters */
  141. rnc->ssp.oaf_connection_rate = idev->connection_rate;
  142. rnc->ssp.oaf_features = 0;
  143. rnc->ssp.oaf_source_zone_group = 0;
  144. rnc->ssp.oaf_more_compatibility_features = 0;
  145. }
  146. /**
  147. *
  148. * @sci_rnc:
  149. * @callback:
  150. * @callback_parameter:
  151. *
  152. * This method will setup the remote node context object so it will transition
  153. * to its ready state. If the remote node context is already setup to
  154. * transition to its final state then this function does nothing. none
  155. */
  156. static void sci_remote_node_context_setup_to_resume(
  157. struct sci_remote_node_context *sci_rnc,
  158. scics_sds_remote_node_context_callback callback,
  159. void *callback_parameter,
  160. enum sci_remote_node_context_destination_state dest_param)
  161. {
  162. if (sci_rnc->destination_state != RNC_DEST_FINAL) {
  163. sci_rnc->destination_state = dest_param;
  164. if (callback != NULL) {
  165. sci_rnc->user_callback = callback;
  166. sci_rnc->user_cookie = callback_parameter;
  167. }
  168. }
  169. }
  170. static void sci_remote_node_context_setup_to_destroy(
  171. struct sci_remote_node_context *sci_rnc,
  172. scics_sds_remote_node_context_callback callback,
  173. void *callback_parameter)
  174. {
  175. struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
  176. sci_rnc->destination_state = RNC_DEST_FINAL;
  177. sci_rnc->user_callback = callback;
  178. sci_rnc->user_cookie = callback_parameter;
  179. wake_up(&ihost->eventq);
  180. }
  181. /**
  182. *
  183. *
  184. * This method just calls the user callback function and then resets the
  185. * callback.
  186. */
  187. static void sci_remote_node_context_notify_user(
  188. struct sci_remote_node_context *rnc)
  189. {
  190. if (rnc->user_callback != NULL) {
  191. (*rnc->user_callback)(rnc->user_cookie);
  192. rnc->user_callback = NULL;
  193. rnc->user_cookie = NULL;
  194. }
  195. }
  196. static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
  197. {
  198. switch (rnc->destination_state) {
  199. case RNC_DEST_READY:
  200. case RNC_DEST_SUSPENDED_RESUME:
  201. rnc->destination_state = RNC_DEST_READY;
  202. /* Fall through... */
  203. case RNC_DEST_FINAL:
  204. sci_remote_node_context_resume(rnc, rnc->user_callback,
  205. rnc->user_cookie);
  206. break;
  207. default:
  208. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  209. break;
  210. }
  211. }
  212. static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
  213. {
  214. union scu_remote_node_context *rnc_buffer;
  215. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  216. struct domain_device *dev = idev->domain_dev;
  217. struct isci_host *ihost = idev->owning_port->owning_controller;
  218. rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
  219. rnc_buffer->ssp.is_valid = true;
  220. if (dev_is_sata(dev) && dev->parent) {
  221. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
  222. } else {
  223. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
  224. if (!dev->parent)
  225. sci_port_setup_transports(idev->owning_port,
  226. sci_rnc->remote_node_index);
  227. }
  228. }
  229. static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
  230. {
  231. union scu_remote_node_context *rnc_buffer;
  232. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  233. struct isci_host *ihost = idev->owning_port->owning_controller;
  234. rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
  235. rnc_buffer->ssp.is_valid = false;
  236. sci_remote_device_post_request(rnc_to_dev(sci_rnc),
  237. SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
  238. }
  239. static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
  240. {
  241. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  242. struct isci_remote_device *idev = rnc_to_dev(rnc);
  243. struct isci_host *ihost = idev->owning_port->owning_controller;
  244. /* Check to see if we have gotten back to the initial state because
  245. * someone requested to destroy the remote node context object.
  246. */
  247. if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
  248. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  249. sci_remote_node_context_notify_user(rnc);
  250. smp_wmb();
  251. wake_up(&ihost->eventq);
  252. }
  253. }
  254. static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
  255. {
  256. struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
  257. sci_remote_node_context_validate_context_buffer(sci_rnc);
  258. }
  259. static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
  260. {
  261. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  262. /* Terminate all outstanding requests. */
  263. sci_remote_device_terminate_requests(rnc_to_dev(rnc));
  264. sci_remote_node_context_invalidate_context_buffer(rnc);
  265. }
  266. static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
  267. {
  268. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  269. struct isci_remote_device *idev;
  270. struct domain_device *dev;
  271. idev = rnc_to_dev(rnc);
  272. dev = idev->domain_dev;
  273. /*
  274. * For direct attached SATA devices we need to clear the TLCR
  275. * NCQ to TCi tag mapping on the phy and in cases where we
  276. * resume because of a target reset we also need to update
  277. * the STPTLDARNI register with the RNi of the device
  278. */
  279. if (dev_is_sata(dev) && !dev->parent)
  280. sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
  281. sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
  282. }
  283. static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
  284. {
  285. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  286. enum sci_remote_node_context_destination_state dest_select;
  287. int tell_user = 1;
  288. dest_select = rnc->destination_state;
  289. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  290. if ((dest_select == RNC_DEST_SUSPENDED) ||
  291. (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
  292. sci_remote_node_context_suspend(
  293. rnc, rnc->suspend_reason,
  294. SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
  295. if (dest_select == RNC_DEST_SUSPENDED_RESUME)
  296. tell_user = 0; /* Wait until ready again. */
  297. }
  298. if (tell_user)
  299. sci_remote_node_context_notify_user(rnc);
  300. }
  301. static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
  302. {
  303. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  304. sci_remote_node_context_continue_state_transitions(rnc);
  305. }
  306. static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
  307. {
  308. struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
  309. struct isci_remote_device *idev = rnc_to_dev(rnc);
  310. struct isci_host *ihost = idev->owning_port->owning_controller;
  311. u32 new_count = rnc->suspend_count + 1;
  312. if (new_count == 0)
  313. rnc->suspend_count = 1;
  314. else
  315. rnc->suspend_count = new_count;
  316. smp_wmb();
  317. /* Terminate outstanding requests pending abort. */
  318. sci_remote_device_abort_requests_pending_abort(idev);
  319. wake_up(&ihost->eventq);
  320. sci_remote_node_context_continue_state_transitions(rnc);
  321. }
  322. static void sci_remote_node_context_await_suspend_state_exit(
  323. struct sci_base_state_machine *sm)
  324. {
  325. struct sci_remote_node_context *rnc
  326. = container_of(sm, typeof(*rnc), sm);
  327. struct isci_remote_device *idev = rnc_to_dev(rnc);
  328. if (dev_is_sata(idev->domain_dev))
  329. isci_dev_set_hang_detection_timeout(idev, 0);
  330. }
  331. static const struct sci_base_state sci_remote_node_context_state_table[] = {
  332. [SCI_RNC_INITIAL] = {
  333. .enter_state = sci_remote_node_context_initial_state_enter,
  334. },
  335. [SCI_RNC_POSTING] = {
  336. .enter_state = sci_remote_node_context_posting_state_enter,
  337. },
  338. [SCI_RNC_INVALIDATING] = {
  339. .enter_state = sci_remote_node_context_invalidating_state_enter,
  340. },
  341. [SCI_RNC_RESUMING] = {
  342. .enter_state = sci_remote_node_context_resuming_state_enter,
  343. },
  344. [SCI_RNC_READY] = {
  345. .enter_state = sci_remote_node_context_ready_state_enter,
  346. },
  347. [SCI_RNC_TX_SUSPENDED] = {
  348. .enter_state = sci_remote_node_context_tx_suspended_state_enter,
  349. },
  350. [SCI_RNC_TX_RX_SUSPENDED] = {
  351. .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
  352. },
  353. [SCI_RNC_AWAIT_SUSPENSION] = {
  354. .exit_state = sci_remote_node_context_await_suspend_state_exit,
  355. },
  356. };
  357. void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
  358. u16 remote_node_index)
  359. {
  360. memset(rnc, 0, sizeof(struct sci_remote_node_context));
  361. rnc->remote_node_index = remote_node_index;
  362. rnc->destination_state = RNC_DEST_UNSPECIFIED;
  363. sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
  364. }
  365. enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
  366. u32 event_code)
  367. {
  368. enum scis_sds_remote_node_context_states state;
  369. u32 next_state;
  370. state = sci_rnc->sm.current_state_id;
  371. switch (state) {
  372. case SCI_RNC_POSTING:
  373. switch (scu_get_event_code(event_code)) {
  374. case SCU_EVENT_POST_RNC_COMPLETE:
  375. sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
  376. break;
  377. default:
  378. goto out;
  379. }
  380. break;
  381. case SCI_RNC_INVALIDATING:
  382. if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
  383. if (sci_rnc->destination_state == RNC_DEST_FINAL)
  384. next_state = SCI_RNC_INITIAL;
  385. else
  386. next_state = SCI_RNC_POSTING;
  387. sci_change_state(&sci_rnc->sm, next_state);
  388. } else {
  389. switch (scu_get_event_type(event_code)) {
  390. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  391. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  392. /* We really dont care if the hardware is going to suspend
  393. * the device since it's being invalidated anyway */
  394. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  395. "%s: SCIC Remote Node Context 0x%p was "
  396. "suspeneded by hardware while being "
  397. "invalidated.\n", __func__, sci_rnc);
  398. break;
  399. default:
  400. goto out;
  401. }
  402. }
  403. break;
  404. case SCI_RNC_RESUMING:
  405. if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
  406. sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
  407. } else {
  408. switch (scu_get_event_type(event_code)) {
  409. case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
  410. case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
  411. /* We really dont care if the hardware is going to suspend
  412. * the device since it's being resumed anyway */
  413. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  414. "%s: SCIC Remote Node Context 0x%p was "
  415. "suspeneded by hardware while being resumed.\n",
  416. __func__, sci_rnc);
  417. break;
  418. default:
  419. goto out;
  420. }
  421. }
  422. break;
  423. case SCI_RNC_READY:
  424. switch (scu_get_event_type(event_code)) {
  425. case SCU_EVENT_TL_RNC_SUSPEND_TX:
  426. sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
  427. sci_rnc->suspend_type = scu_get_event_type(event_code);
  428. break;
  429. case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
  430. sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
  431. sci_rnc->suspend_type = scu_get_event_type(event_code);
  432. break;
  433. default:
  434. goto out;
  435. }
  436. break;
  437. case SCI_RNC_AWAIT_SUSPENSION:
  438. switch (scu_get_event_type(event_code)) {
  439. case SCU_EVENT_TL_RNC_SUSPEND_TX:
  440. next_state = SCI_RNC_TX_SUSPENDED;
  441. break;
  442. case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
  443. next_state = SCI_RNC_TX_RX_SUSPENDED;
  444. break;
  445. default:
  446. goto out;
  447. }
  448. if (sci_rnc->suspend_type == scu_get_event_type(event_code))
  449. sci_change_state(&sci_rnc->sm, next_state);
  450. break;
  451. default:
  452. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  453. "%s: invalid state: %s\n", __func__,
  454. rnc_state_name(state));
  455. return SCI_FAILURE_INVALID_STATE;
  456. }
  457. return SCI_SUCCESS;
  458. out:
  459. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  460. "%s: code: %#x state: %s\n", __func__, event_code,
  461. rnc_state_name(state));
  462. return SCI_FAILURE;
  463. }
  464. enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
  465. scics_sds_remote_node_context_callback cb_fn,
  466. void *cb_p)
  467. {
  468. enum scis_sds_remote_node_context_states state;
  469. state = sci_rnc->sm.current_state_id;
  470. switch (state) {
  471. case SCI_RNC_INVALIDATING:
  472. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  473. return SCI_SUCCESS;
  474. case SCI_RNC_POSTING:
  475. case SCI_RNC_RESUMING:
  476. case SCI_RNC_READY:
  477. case SCI_RNC_TX_SUSPENDED:
  478. case SCI_RNC_TX_RX_SUSPENDED:
  479. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  480. sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
  481. return SCI_SUCCESS;
  482. case SCI_RNC_AWAIT_SUSPENSION:
  483. sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
  484. return SCI_SUCCESS;
  485. case SCI_RNC_INITIAL:
  486. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  487. "%s: invalid state: %s\n", __func__,
  488. rnc_state_name(state));
  489. /* We have decided that the destruct request on the remote node context
  490. * can not fail since it is either in the initial/destroyed state or is
  491. * can be destroyed.
  492. */
  493. return SCI_SUCCESS;
  494. default:
  495. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  496. "%s: invalid state %s\n", __func__,
  497. rnc_state_name(state));
  498. return SCI_FAILURE_INVALID_STATE;
  499. }
  500. }
  501. enum sci_status sci_remote_node_context_suspend(
  502. struct sci_remote_node_context *sci_rnc,
  503. enum sci_remote_node_suspension_reasons suspend_reason,
  504. u32 suspend_type)
  505. {
  506. enum scis_sds_remote_node_context_states state
  507. = sci_rnc->sm.current_state_id;
  508. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  509. enum sci_status status = SCI_FAILURE_INVALID_STATE;
  510. enum sci_remote_node_context_destination_state dest_param =
  511. RNC_DEST_UNSPECIFIED;
  512. dev_dbg(scirdev_to_dev(idev),
  513. "%s: current state %s, current suspend_type %x dest state %d,"
  514. " arg suspend_reason %d, arg suspend_type %x",
  515. __func__, rnc_state_name(state), sci_rnc->suspend_type,
  516. sci_rnc->destination_state, suspend_reason,
  517. suspend_type);
  518. /* Disable automatic state continuations if explicitly suspending. */
  519. if ((suspend_reason == SCI_HW_SUSPEND) ||
  520. (sci_rnc->destination_state == RNC_DEST_FINAL))
  521. dest_param = sci_rnc->destination_state;
  522. switch (state) {
  523. case SCI_RNC_READY:
  524. break;
  525. case SCI_RNC_INVALIDATING:
  526. if (sci_rnc->destination_state == RNC_DEST_FINAL) {
  527. dev_warn(scirdev_to_dev(idev),
  528. "%s: already destroying %p\n",
  529. __func__, sci_rnc);
  530. return SCI_FAILURE_INVALID_STATE;
  531. }
  532. /* Fall through and handle like SCI_RNC_POSTING */
  533. case SCI_RNC_RESUMING:
  534. /* Fall through and handle like SCI_RNC_POSTING */
  535. case SCI_RNC_POSTING:
  536. /* Set the destination state to AWAIT - this signals the
  537. * entry into the SCI_RNC_READY state that a suspension
  538. * needs to be done immediately.
  539. */
  540. if (sci_rnc->destination_state != RNC_DEST_FINAL)
  541. sci_rnc->destination_state = RNC_DEST_SUSPENDED;
  542. sci_rnc->suspend_type = suspend_type;
  543. sci_rnc->suspend_reason = suspend_reason;
  544. return SCI_SUCCESS;
  545. case SCI_RNC_TX_SUSPENDED:
  546. if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
  547. status = SCI_SUCCESS;
  548. break;
  549. case SCI_RNC_TX_RX_SUSPENDED:
  550. if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
  551. status = SCI_SUCCESS;
  552. break;
  553. case SCI_RNC_AWAIT_SUSPENSION:
  554. if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
  555. || (suspend_type == sci_rnc->suspend_type))
  556. return SCI_SUCCESS;
  557. break;
  558. default:
  559. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  560. "%s: invalid state %s\n", __func__,
  561. rnc_state_name(state));
  562. return SCI_FAILURE_INVALID_STATE;
  563. }
  564. sci_rnc->destination_state = dest_param;
  565. sci_rnc->suspend_type = suspend_type;
  566. sci_rnc->suspend_reason = suspend_reason;
  567. if (status == SCI_SUCCESS) { /* Already in the destination state? */
  568. struct isci_host *ihost = idev->owning_port->owning_controller;
  569. wake_up_all(&ihost->eventq); /* Let observers look. */
  570. return SCI_SUCCESS;
  571. }
  572. if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
  573. (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
  574. if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
  575. isci_dev_set_hang_detection_timeout(idev, 0x00000001);
  576. sci_remote_device_post_request(
  577. idev, SCI_SOFTWARE_SUSPEND_CMD);
  578. }
  579. if (state != SCI_RNC_AWAIT_SUSPENSION)
  580. sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
  581. return SCI_SUCCESS;
  582. }
  583. enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
  584. scics_sds_remote_node_context_callback cb_fn,
  585. void *cb_p)
  586. {
  587. enum scis_sds_remote_node_context_states state;
  588. struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
  589. state = sci_rnc->sm.current_state_id;
  590. dev_dbg(scirdev_to_dev(idev),
  591. "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
  592. "dev resume path %s\n",
  593. __func__, rnc_state_name(state), cb_fn, cb_p,
  594. sci_rnc->destination_state,
  595. test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
  596. ? "<abort active>" : "<normal>");
  597. switch (state) {
  598. case SCI_RNC_INITIAL:
  599. if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
  600. return SCI_FAILURE_INVALID_STATE;
  601. sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
  602. RNC_DEST_READY);
  603. if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
  604. sci_remote_node_context_construct_buffer(sci_rnc);
  605. sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
  606. }
  607. return SCI_SUCCESS;
  608. case SCI_RNC_POSTING:
  609. case SCI_RNC_INVALIDATING:
  610. case SCI_RNC_RESUMING:
  611. /* We are still waiting to post when a resume was
  612. * requested.
  613. */
  614. switch (sci_rnc->destination_state) {
  615. case RNC_DEST_SUSPENDED:
  616. case RNC_DEST_SUSPENDED_RESUME:
  617. /* Previously waiting to suspend after posting.
  618. * Now continue onto resumption.
  619. */
  620. sci_remote_node_context_setup_to_resume(
  621. sci_rnc, cb_fn, cb_p,
  622. RNC_DEST_SUSPENDED_RESUME);
  623. break;
  624. default:
  625. sci_remote_node_context_setup_to_resume(
  626. sci_rnc, cb_fn, cb_p,
  627. RNC_DEST_READY);
  628. break;
  629. }
  630. return SCI_SUCCESS;
  631. case SCI_RNC_TX_SUSPENDED:
  632. case SCI_RNC_TX_RX_SUSPENDED:
  633. {
  634. struct domain_device *dev = idev->domain_dev;
  635. /* If this is an expander attached SATA device we must
  636. * invalidate and repost the RNC since this is the only
  637. * way to clear the TCi to NCQ tag mapping table for
  638. * the RNi. All other device types we can just resume.
  639. */
  640. sci_remote_node_context_setup_to_resume(
  641. sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
  642. if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
  643. if ((dev_is_sata(dev) && dev->parent) ||
  644. (sci_rnc->destination_state == RNC_DEST_FINAL))
  645. sci_change_state(&sci_rnc->sm,
  646. SCI_RNC_INVALIDATING);
  647. else
  648. sci_change_state(&sci_rnc->sm,
  649. SCI_RNC_RESUMING);
  650. }
  651. }
  652. return SCI_SUCCESS;
  653. case SCI_RNC_AWAIT_SUSPENSION:
  654. sci_remote_node_context_setup_to_resume(
  655. sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
  656. return SCI_SUCCESS;
  657. default:
  658. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  659. "%s: invalid state %s\n", __func__,
  660. rnc_state_name(state));
  661. return SCI_FAILURE_INVALID_STATE;
  662. }
  663. }
  664. enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
  665. struct isci_request *ireq)
  666. {
  667. enum scis_sds_remote_node_context_states state;
  668. state = sci_rnc->sm.current_state_id;
  669. switch (state) {
  670. case SCI_RNC_READY:
  671. return SCI_SUCCESS;
  672. case SCI_RNC_TX_SUSPENDED:
  673. case SCI_RNC_TX_RX_SUSPENDED:
  674. case SCI_RNC_AWAIT_SUSPENSION:
  675. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  676. "%s: invalid state %s\n", __func__,
  677. rnc_state_name(state));
  678. return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
  679. default:
  680. dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  681. "%s: invalid state %s\n", __func__,
  682. rnc_state_name(state));
  683. return SCI_FAILURE_INVALID_STATE;
  684. }
  685. }
  686. enum sci_status sci_remote_node_context_start_task(
  687. struct sci_remote_node_context *sci_rnc,
  688. struct isci_request *ireq,
  689. scics_sds_remote_node_context_callback cb_fn,
  690. void *cb_p)
  691. {
  692. enum sci_status status = sci_remote_node_context_resume(sci_rnc,
  693. cb_fn, cb_p);
  694. if (status != SCI_SUCCESS)
  695. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  696. "%s: resume failed: %d\n", __func__, status);
  697. return status;
  698. }
  699. int sci_remote_node_context_is_safe_to_abort(
  700. struct sci_remote_node_context *sci_rnc)
  701. {
  702. enum scis_sds_remote_node_context_states state;
  703. state = sci_rnc->sm.current_state_id;
  704. switch (state) {
  705. case SCI_RNC_INVALIDATING:
  706. case SCI_RNC_TX_RX_SUSPENDED:
  707. return 1;
  708. case SCI_RNC_POSTING:
  709. case SCI_RNC_RESUMING:
  710. case SCI_RNC_READY:
  711. case SCI_RNC_TX_SUSPENDED:
  712. case SCI_RNC_AWAIT_SUSPENSION:
  713. case SCI_RNC_INITIAL:
  714. return 0;
  715. default:
  716. dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
  717. "%s: invalid state %d\n", __func__, state);
  718. return 0;
  719. }
  720. }