sas_init.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. * Serial Attached SCSI (SAS) Transport Layer initialization
  3. *
  4. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  5. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  6. *
  7. * This file is licensed under GPLv2.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License as
  11. * published by the Free Software Foundation; either version 2 of the
  12. * License, or (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. #include <linux/module.h>
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/device.h>
  29. #include <linux/spinlock.h>
  30. #include <scsi/sas_ata.h>
  31. #include <scsi/scsi_host.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_transport.h>
  34. #include <scsi/scsi_transport_sas.h>
  35. #include "sas_internal.h"
  36. #include "../scsi_sas_internal.h"
  37. static struct kmem_cache *sas_task_cache;
  38. struct sas_task *sas_alloc_task(gfp_t flags)
  39. {
  40. struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
  41. if (task) {
  42. INIT_LIST_HEAD(&task->list);
  43. spin_lock_init(&task->task_state_lock);
  44. task->task_state_flags = SAS_TASK_STATE_PENDING;
  45. }
  46. return task;
  47. }
  48. EXPORT_SYMBOL_GPL(sas_alloc_task);
  49. struct sas_task *sas_alloc_slow_task(gfp_t flags)
  50. {
  51. struct sas_task *task = sas_alloc_task(flags);
  52. struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
  53. if (!task || !slow) {
  54. if (task)
  55. kmem_cache_free(sas_task_cache, task);
  56. kfree(slow);
  57. return NULL;
  58. }
  59. task->slow_task = slow;
  60. init_timer(&slow->timer);
  61. init_completion(&slow->completion);
  62. return task;
  63. }
  64. EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
  65. void sas_free_task(struct sas_task *task)
  66. {
  67. if (task) {
  68. BUG_ON(!list_empty(&task->list));
  69. kfree(task->slow_task);
  70. kmem_cache_free(sas_task_cache, task);
  71. }
  72. }
  73. EXPORT_SYMBOL_GPL(sas_free_task);
  74. /*------------ SAS addr hash -----------*/
  75. void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
  76. {
  77. const u32 poly = 0x00DB2777;
  78. u32 r = 0;
  79. int i;
  80. for (i = 0; i < 8; i++) {
  81. int b;
  82. for (b = 7; b >= 0; b--) {
  83. r <<= 1;
  84. if ((1 << b) & sas_addr[i]) {
  85. if (!(r & 0x01000000))
  86. r ^= poly;
  87. } else if (r & 0x01000000)
  88. r ^= poly;
  89. }
  90. }
  91. hashed[0] = (r >> 16) & 0xFF;
  92. hashed[1] = (r >> 8) & 0xFF ;
  93. hashed[2] = r & 0xFF;
  94. }
  95. /* ---------- HA events ---------- */
  96. void sas_hae_reset(struct work_struct *work)
  97. {
  98. struct sas_ha_event *ev = to_sas_ha_event(work);
  99. struct sas_ha_struct *ha = ev->ha;
  100. clear_bit(HAE_RESET, &ha->pending);
  101. }
  102. int sas_register_ha(struct sas_ha_struct *sas_ha)
  103. {
  104. int error = 0;
  105. mutex_init(&sas_ha->disco_mutex);
  106. spin_lock_init(&sas_ha->phy_port_lock);
  107. sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
  108. if (sas_ha->lldd_queue_size == 0)
  109. sas_ha->lldd_queue_size = 1;
  110. else if (sas_ha->lldd_queue_size == -1)
  111. sas_ha->lldd_queue_size = 128; /* Sanity */
  112. set_bit(SAS_HA_REGISTERED, &sas_ha->state);
  113. spin_lock_init(&sas_ha->lock);
  114. mutex_init(&sas_ha->drain_mutex);
  115. init_waitqueue_head(&sas_ha->eh_wait_q);
  116. INIT_LIST_HEAD(&sas_ha->defer_q);
  117. INIT_LIST_HEAD(&sas_ha->eh_dev_q);
  118. error = sas_register_phys(sas_ha);
  119. if (error) {
  120. printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
  121. return error;
  122. }
  123. error = sas_register_ports(sas_ha);
  124. if (error) {
  125. printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
  126. goto Undo_phys;
  127. }
  128. error = sas_init_events(sas_ha);
  129. if (error) {
  130. printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
  131. goto Undo_ports;
  132. }
  133. if (sas_ha->lldd_max_execute_num > 1) {
  134. error = sas_init_queue(sas_ha);
  135. if (error) {
  136. printk(KERN_NOTICE "couldn't start queue thread:%d, "
  137. "running in direct mode\n", error);
  138. sas_ha->lldd_max_execute_num = 1;
  139. }
  140. }
  141. INIT_LIST_HEAD(&sas_ha->eh_done_q);
  142. INIT_LIST_HEAD(&sas_ha->eh_ata_q);
  143. return 0;
  144. Undo_ports:
  145. sas_unregister_ports(sas_ha);
  146. Undo_phys:
  147. return error;
  148. }
  149. static void sas_disable_events(struct sas_ha_struct *sas_ha)
  150. {
  151. /* Set the state to unregistered to avoid further unchained
  152. * events to be queued, and flush any in-progress drainers
  153. */
  154. mutex_lock(&sas_ha->drain_mutex);
  155. spin_lock_irq(&sas_ha->lock);
  156. clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
  157. spin_unlock_irq(&sas_ha->lock);
  158. __sas_drain_work(sas_ha);
  159. mutex_unlock(&sas_ha->drain_mutex);
  160. }
  161. int sas_unregister_ha(struct sas_ha_struct *sas_ha)
  162. {
  163. sas_disable_events(sas_ha);
  164. sas_unregister_ports(sas_ha);
  165. /* flush unregistration work */
  166. mutex_lock(&sas_ha->drain_mutex);
  167. __sas_drain_work(sas_ha);
  168. mutex_unlock(&sas_ha->drain_mutex);
  169. if (sas_ha->lldd_max_execute_num > 1) {
  170. sas_shutdown_queue(sas_ha);
  171. sas_ha->lldd_max_execute_num = 1;
  172. }
  173. return 0;
  174. }
  175. static int sas_get_linkerrors(struct sas_phy *phy)
  176. {
  177. if (scsi_is_sas_phy_local(phy)) {
  178. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  179. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  180. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  181. struct sas_internal *i =
  182. to_sas_internal(sas_ha->core.shost->transportt);
  183. return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
  184. }
  185. return sas_smp_get_phy_events(phy);
  186. }
  187. int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
  188. {
  189. struct domain_device *dev = NULL;
  190. /* try to route user requested link resets through libata */
  191. if (asd_phy->port)
  192. dev = asd_phy->port->port_dev;
  193. /* validate that dev has been probed */
  194. if (dev)
  195. dev = sas_find_dev_by_rphy(dev->rphy);
  196. if (dev && dev_is_sata(dev)) {
  197. sas_ata_schedule_reset(dev);
  198. sas_ata_wait_eh(dev);
  199. return 0;
  200. }
  201. return -ENODEV;
  202. }
  203. /**
  204. * transport_sas_phy_reset - reset a phy and permit libata to manage the link
  205. *
  206. * phy reset request via sysfs in host workqueue context so we know we
  207. * can block on eh and safely traverse the domain_device topology
  208. */
  209. static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
  210. {
  211. enum phy_func reset_type;
  212. if (hard_reset)
  213. reset_type = PHY_FUNC_HARD_RESET;
  214. else
  215. reset_type = PHY_FUNC_LINK_RESET;
  216. if (scsi_is_sas_phy_local(phy)) {
  217. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  218. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  219. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  220. struct sas_internal *i =
  221. to_sas_internal(sas_ha->core.shost->transportt);
  222. if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
  223. return 0;
  224. return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
  225. } else {
  226. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  227. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  228. struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
  229. if (ata_dev && !hard_reset) {
  230. sas_ata_schedule_reset(ata_dev);
  231. sas_ata_wait_eh(ata_dev);
  232. return 0;
  233. } else
  234. return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
  235. }
  236. }
  237. static int sas_phy_enable(struct sas_phy *phy, int enable)
  238. {
  239. int ret;
  240. enum phy_func cmd;
  241. if (enable)
  242. cmd = PHY_FUNC_LINK_RESET;
  243. else
  244. cmd = PHY_FUNC_DISABLE;
  245. if (scsi_is_sas_phy_local(phy)) {
  246. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  247. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  248. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  249. struct sas_internal *i =
  250. to_sas_internal(sas_ha->core.shost->transportt);
  251. if (enable)
  252. ret = transport_sas_phy_reset(phy, 0);
  253. else
  254. ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
  255. } else {
  256. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  257. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  258. if (enable)
  259. ret = transport_sas_phy_reset(phy, 0);
  260. else
  261. ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
  262. }
  263. return ret;
  264. }
  265. int sas_phy_reset(struct sas_phy *phy, int hard_reset)
  266. {
  267. int ret;
  268. enum phy_func reset_type;
  269. if (!phy->enabled)
  270. return -ENODEV;
  271. if (hard_reset)
  272. reset_type = PHY_FUNC_HARD_RESET;
  273. else
  274. reset_type = PHY_FUNC_LINK_RESET;
  275. if (scsi_is_sas_phy_local(phy)) {
  276. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  277. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  278. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  279. struct sas_internal *i =
  280. to_sas_internal(sas_ha->core.shost->transportt);
  281. ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
  282. } else {
  283. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  284. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  285. ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
  286. }
  287. return ret;
  288. }
  289. int sas_set_phy_speed(struct sas_phy *phy,
  290. struct sas_phy_linkrates *rates)
  291. {
  292. int ret;
  293. if ((rates->minimum_linkrate &&
  294. rates->minimum_linkrate > phy->maximum_linkrate) ||
  295. (rates->maximum_linkrate &&
  296. rates->maximum_linkrate < phy->minimum_linkrate))
  297. return -EINVAL;
  298. if (rates->minimum_linkrate &&
  299. rates->minimum_linkrate < phy->minimum_linkrate_hw)
  300. rates->minimum_linkrate = phy->minimum_linkrate_hw;
  301. if (rates->maximum_linkrate &&
  302. rates->maximum_linkrate > phy->maximum_linkrate_hw)
  303. rates->maximum_linkrate = phy->maximum_linkrate_hw;
  304. if (scsi_is_sas_phy_local(phy)) {
  305. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  306. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  307. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  308. struct sas_internal *i =
  309. to_sas_internal(sas_ha->core.shost->transportt);
  310. ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
  311. rates);
  312. } else {
  313. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  314. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  315. ret = sas_smp_phy_control(ddev, phy->number,
  316. PHY_FUNC_LINK_RESET, rates);
  317. }
  318. return ret;
  319. }
  320. void sas_prep_resume_ha(struct sas_ha_struct *ha)
  321. {
  322. int i;
  323. set_bit(SAS_HA_REGISTERED, &ha->state);
  324. /* clear out any stale link events/data from the suspension path */
  325. for (i = 0; i < ha->num_phys; i++) {
  326. struct asd_sas_phy *phy = ha->sas_phy[i];
  327. memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
  328. phy->port_events_pending = 0;
  329. phy->phy_events_pending = 0;
  330. phy->frame_rcvd_size = 0;
  331. }
  332. }
  333. EXPORT_SYMBOL(sas_prep_resume_ha);
  334. static int phys_suspended(struct sas_ha_struct *ha)
  335. {
  336. int i, rc = 0;
  337. for (i = 0; i < ha->num_phys; i++) {
  338. struct asd_sas_phy *phy = ha->sas_phy[i];
  339. if (phy->suspended)
  340. rc++;
  341. }
  342. return rc;
  343. }
  344. void sas_resume_ha(struct sas_ha_struct *ha)
  345. {
  346. const unsigned long tmo = msecs_to_jiffies(25000);
  347. int i;
  348. /* deform ports on phys that did not resume
  349. * at this point we may be racing the phy coming back (as posted
  350. * by the lldd). So we post the event and once we are in the
  351. * libsas context check that the phy remains suspended before
  352. * tearing it down.
  353. */
  354. i = phys_suspended(ha);
  355. if (i)
  356. dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
  357. i, i > 1 ? "s" : "");
  358. wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
  359. for (i = 0; i < ha->num_phys; i++) {
  360. struct asd_sas_phy *phy = ha->sas_phy[i];
  361. if (phy->suspended) {
  362. dev_warn(&phy->phy->dev, "resume timeout\n");
  363. sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
  364. }
  365. }
  366. /* all phys are back up or timed out, turn on i/o so we can
  367. * flush out disks that did not return
  368. */
  369. scsi_unblock_requests(ha->core.shost);
  370. sas_drain_work(ha);
  371. }
  372. EXPORT_SYMBOL(sas_resume_ha);
  373. void sas_suspend_ha(struct sas_ha_struct *ha)
  374. {
  375. int i;
  376. sas_disable_events(ha);
  377. scsi_block_requests(ha->core.shost);
  378. for (i = 0; i < ha->num_phys; i++) {
  379. struct asd_sas_port *port = ha->sas_port[i];
  380. sas_discover_event(port, DISCE_SUSPEND);
  381. }
  382. /* flush suspend events while unregistered */
  383. mutex_lock(&ha->drain_mutex);
  384. __sas_drain_work(ha);
  385. mutex_unlock(&ha->drain_mutex);
  386. }
  387. EXPORT_SYMBOL(sas_suspend_ha);
  388. static void sas_phy_release(struct sas_phy *phy)
  389. {
  390. kfree(phy->hostdata);
  391. phy->hostdata = NULL;
  392. }
  393. static void phy_reset_work(struct work_struct *work)
  394. {
  395. struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
  396. d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
  397. }
  398. static void phy_enable_work(struct work_struct *work)
  399. {
  400. struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
  401. d->enable_result = sas_phy_enable(d->phy, d->enable);
  402. }
  403. static int sas_phy_setup(struct sas_phy *phy)
  404. {
  405. struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
  406. if (!d)
  407. return -ENOMEM;
  408. mutex_init(&d->event_lock);
  409. INIT_SAS_WORK(&d->reset_work, phy_reset_work);
  410. INIT_SAS_WORK(&d->enable_work, phy_enable_work);
  411. d->phy = phy;
  412. phy->hostdata = d;
  413. return 0;
  414. }
  415. static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
  416. {
  417. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  418. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  419. struct sas_phy_data *d = phy->hostdata;
  420. int rc;
  421. if (!d)
  422. return -ENOMEM;
  423. /* libsas workqueue coordinates ata-eh reset with discovery */
  424. mutex_lock(&d->event_lock);
  425. d->reset_result = 0;
  426. d->hard_reset = hard_reset;
  427. spin_lock_irq(&ha->lock);
  428. sas_queue_work(ha, &d->reset_work);
  429. spin_unlock_irq(&ha->lock);
  430. rc = sas_drain_work(ha);
  431. if (rc == 0)
  432. rc = d->reset_result;
  433. mutex_unlock(&d->event_lock);
  434. return rc;
  435. }
  436. static int queue_phy_enable(struct sas_phy *phy, int enable)
  437. {
  438. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  439. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  440. struct sas_phy_data *d = phy->hostdata;
  441. int rc;
  442. if (!d)
  443. return -ENOMEM;
  444. /* libsas workqueue coordinates ata-eh reset with discovery */
  445. mutex_lock(&d->event_lock);
  446. d->enable_result = 0;
  447. d->enable = enable;
  448. spin_lock_irq(&ha->lock);
  449. sas_queue_work(ha, &d->enable_work);
  450. spin_unlock_irq(&ha->lock);
  451. rc = sas_drain_work(ha);
  452. if (rc == 0)
  453. rc = d->enable_result;
  454. mutex_unlock(&d->event_lock);
  455. return rc;
  456. }
  457. static struct sas_function_template sft = {
  458. .phy_enable = queue_phy_enable,
  459. .phy_reset = queue_phy_reset,
  460. .phy_setup = sas_phy_setup,
  461. .phy_release = sas_phy_release,
  462. .set_phy_speed = sas_set_phy_speed,
  463. .get_linkerrors = sas_get_linkerrors,
  464. .smp_handler = sas_smp_handler,
  465. };
  466. struct scsi_transport_template *
  467. sas_domain_attach_transport(struct sas_domain_function_template *dft)
  468. {
  469. struct scsi_transport_template *stt = sas_attach_transport(&sft);
  470. struct sas_internal *i;
  471. if (!stt)
  472. return stt;
  473. i = to_sas_internal(stt);
  474. i->dft = dft;
  475. stt->create_work_queue = 1;
  476. stt->eh_timed_out = sas_scsi_timed_out;
  477. stt->eh_strategy_handler = sas_scsi_recover_host;
  478. return stt;
  479. }
  480. EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
  481. void sas_domain_release_transport(struct scsi_transport_template *stt)
  482. {
  483. sas_release_transport(stt);
  484. }
  485. EXPORT_SYMBOL_GPL(sas_domain_release_transport);
  486. /* ---------- SAS Class register/unregister ---------- */
  487. static int __init sas_class_init(void)
  488. {
  489. sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
  490. if (!sas_task_cache)
  491. return -ENOMEM;
  492. return 0;
  493. }
  494. static void __exit sas_class_exit(void)
  495. {
  496. kmem_cache_destroy(sas_task_cache);
  497. }
  498. MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
  499. MODULE_DESCRIPTION("SAS Transport Layer");
  500. MODULE_LICENSE("GPL v2");
  501. module_init(sas_class_init);
  502. module_exit(sas_class_exit);
  503. EXPORT_SYMBOL_GPL(sas_register_ha);
  504. EXPORT_SYMBOL_GPL(sas_unregister_ha);