core.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * Filename: core.c
  3. *
  4. *
  5. * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
  6. * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  7. *
  8. * (C) Copyright 2013 IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software Foundation,
  22. * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/reboot.h>
  30. #include <linux/slab.h>
  31. #include <linux/bitops.h>
  32. #include <linux/genhd.h>
  33. #include <linux/idr.h>
  34. #include "rsxx_priv.h"
  35. #include "rsxx_cfg.h"
  36. #define NO_LEGACY 0
  37. MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
  38. MODULE_AUTHOR("IBM <support@ramsan.com>");
  39. MODULE_LICENSE("GPL");
  40. MODULE_VERSION(DRIVER_VERSION);
  41. static unsigned int force_legacy = NO_LEGACY;
  42. module_param(force_legacy, uint, 0444);
  43. MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
  44. static DEFINE_IDA(rsxx_disk_ida);
  45. static DEFINE_SPINLOCK(rsxx_ida_lock);
  46. /*----------------- Interrupt Control & Handling -------------------*/
  47. static void __enable_intr(unsigned int *mask, unsigned int intr)
  48. {
  49. *mask |= intr;
  50. }
  51. static void __disable_intr(unsigned int *mask, unsigned int intr)
  52. {
  53. *mask &= ~intr;
  54. }
  55. /*
  56. * NOTE: Disabling the IER will disable the hardware interrupt.
  57. * Disabling the ISR will disable the software handling of the ISR bit.
  58. *
  59. * Enable/Disable interrupt functions assume the card->irq_lock
  60. * is held by the caller.
  61. */
  62. void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  63. {
  64. if (unlikely(card->halt))
  65. return;
  66. __enable_intr(&card->ier_mask, intr);
  67. iowrite32(card->ier_mask, card->regmap + IER);
  68. }
  69. void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  70. {
  71. __disable_intr(&card->ier_mask, intr);
  72. iowrite32(card->ier_mask, card->regmap + IER);
  73. }
  74. void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
  75. unsigned int intr)
  76. {
  77. if (unlikely(card->halt))
  78. return;
  79. __enable_intr(&card->isr_mask, intr);
  80. __enable_intr(&card->ier_mask, intr);
  81. iowrite32(card->ier_mask, card->regmap + IER);
  82. }
  83. void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
  84. unsigned int intr)
  85. {
  86. __disable_intr(&card->isr_mask, intr);
  87. __disable_intr(&card->ier_mask, intr);
  88. iowrite32(card->ier_mask, card->regmap + IER);
  89. }
  90. irqreturn_t rsxx_isr(int irq, void *pdata)
  91. {
  92. struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) pdata;
  93. unsigned int isr;
  94. int handled = 0;
  95. int reread_isr;
  96. int i;
  97. spin_lock(&card->irq_lock);
  98. do {
  99. reread_isr = 0;
  100. isr = ioread32(card->regmap + ISR);
  101. if (isr == 0xffffffff) {
  102. /*
  103. * A few systems seem to have an intermittent issue
  104. * where PCI reads return all Fs, but retrying the read
  105. * a little later will return as expected.
  106. */
  107. dev_info(CARD_TO_DEV(card),
  108. "ISR = 0xFFFFFFFF, retrying later\n");
  109. break;
  110. }
  111. isr &= card->isr_mask;
  112. if (!isr)
  113. break;
  114. for (i = 0; i < card->n_targets; i++) {
  115. if (isr & CR_INTR_DMA(i)) {
  116. if (card->ier_mask & CR_INTR_DMA(i)) {
  117. rsxx_disable_ier(card, CR_INTR_DMA(i));
  118. reread_isr = 1;
  119. }
  120. queue_work(card->ctrl[i].done_wq,
  121. &card->ctrl[i].dma_done_work);
  122. handled++;
  123. }
  124. }
  125. if (isr & CR_INTR_CREG) {
  126. schedule_work(&card->creg_ctrl.done_work);
  127. handled++;
  128. }
  129. if (isr & CR_INTR_EVENT) {
  130. schedule_work(&card->event_work);
  131. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  132. handled++;
  133. }
  134. } while (reread_isr);
  135. spin_unlock(&card->irq_lock);
  136. return handled ? IRQ_HANDLED : IRQ_NONE;
  137. }
  138. /*----------------- Card Event Handler -------------------*/
  139. static void card_state_change(struct rsxx_cardinfo *card,
  140. unsigned int new_state)
  141. {
  142. int st;
  143. dev_info(CARD_TO_DEV(card),
  144. "card state change detected.(%s -> %s)\n",
  145. rsxx_card_state_to_str(card->state),
  146. rsxx_card_state_to_str(new_state));
  147. card->state = new_state;
  148. /* Don't attach DMA interfaces if the card has an invalid config */
  149. if (!card->config_valid)
  150. return;
  151. switch (new_state) {
  152. case CARD_STATE_RD_ONLY_FAULT:
  153. dev_crit(CARD_TO_DEV(card),
  154. "Hardware has entered read-only mode!\n");
  155. /*
  156. * Fall through so the DMA devices can be attached and
  157. * the user can attempt to pull off their data.
  158. */
  159. case CARD_STATE_GOOD:
  160. st = rsxx_get_card_size8(card, &card->size8);
  161. if (st)
  162. dev_err(CARD_TO_DEV(card),
  163. "Failed attaching DMA devices\n");
  164. if (card->config_valid)
  165. set_capacity(card->gendisk, card->size8 >> 9);
  166. break;
  167. case CARD_STATE_FAULT:
  168. dev_crit(CARD_TO_DEV(card),
  169. "Hardware Fault reported!\n");
  170. /* Fall through. */
  171. /* Everything else, detach DMA interface if it's attached. */
  172. case CARD_STATE_SHUTDOWN:
  173. case CARD_STATE_STARTING:
  174. case CARD_STATE_FORMATTING:
  175. case CARD_STATE_UNINITIALIZED:
  176. case CARD_STATE_SHUTTING_DOWN:
  177. /*
  178. * dStroy is a term coined by marketing to represent the low level
  179. * secure erase.
  180. */
  181. case CARD_STATE_DSTROYING:
  182. set_capacity(card->gendisk, 0);
  183. break;
  184. }
  185. }
  186. static void card_event_handler(struct work_struct *work)
  187. {
  188. struct rsxx_cardinfo *card;
  189. unsigned int state;
  190. unsigned long flags;
  191. int st;
  192. card = container_of(work, struct rsxx_cardinfo, event_work);
  193. if (unlikely(card->halt))
  194. return;
  195. /*
  196. * Enable the interrupt now to avoid any weird race conditions where a
  197. * state change might occur while rsxx_get_card_state() is
  198. * processing a returned creg cmd.
  199. */
  200. spin_lock_irqsave(&card->irq_lock, flags);
  201. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  202. spin_unlock_irqrestore(&card->irq_lock, flags);
  203. st = rsxx_get_card_state(card, &state);
  204. if (st) {
  205. dev_info(CARD_TO_DEV(card),
  206. "Failed reading state after event.\n");
  207. return;
  208. }
  209. if (card->state != state)
  210. card_state_change(card, state);
  211. if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
  212. rsxx_read_hw_log(card);
  213. }
  214. char *rsxx_card_state_to_str(unsigned int state)
  215. {
  216. static char *state_strings[] = {
  217. "Unknown", "Shutdown", "Starting", "Formatting",
  218. "Uninitialized", "Good", "Shutting Down",
  219. "Fault", "Read Only Fault", "dStroying"
  220. };
  221. return state_strings[ffs(state)];
  222. }
  223. /*----------------- Card Operations -------------------*/
  224. static int card_shutdown(struct rsxx_cardinfo *card)
  225. {
  226. unsigned int state;
  227. signed long start;
  228. const int timeout = msecs_to_jiffies(120000);
  229. int st;
  230. /* We can't issue a shutdown if the card is in a transition state */
  231. start = jiffies;
  232. do {
  233. st = rsxx_get_card_state(card, &state);
  234. if (st)
  235. return st;
  236. } while (state == CARD_STATE_STARTING &&
  237. (jiffies - start < timeout));
  238. if (state == CARD_STATE_STARTING)
  239. return -ETIMEDOUT;
  240. /* Only issue a shutdown if we need to */
  241. if ((state != CARD_STATE_SHUTTING_DOWN) &&
  242. (state != CARD_STATE_SHUTDOWN)) {
  243. st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
  244. if (st)
  245. return st;
  246. }
  247. start = jiffies;
  248. do {
  249. st = rsxx_get_card_state(card, &state);
  250. if (st)
  251. return st;
  252. } while (state != CARD_STATE_SHUTDOWN &&
  253. (jiffies - start < timeout));
  254. if (state != CARD_STATE_SHUTDOWN)
  255. return -ETIMEDOUT;
  256. return 0;
  257. }
  258. /*----------------- Driver Initialization & Setup -------------------*/
  259. /* Returns: 0 if the driver is compatible with the device
  260. -1 if the driver is NOT compatible with the device */
  261. static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
  262. {
  263. unsigned char pci_rev;
  264. pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
  265. if (pci_rev > RS70_PCI_REV_SUPPORTED)
  266. return -1;
  267. return 0;
  268. }
  269. static int rsxx_pci_probe(struct pci_dev *dev,
  270. const struct pci_device_id *id)
  271. {
  272. struct rsxx_cardinfo *card;
  273. unsigned long flags;
  274. int st;
  275. dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
  276. card = kzalloc(sizeof(*card), GFP_KERNEL);
  277. if (!card)
  278. return -ENOMEM;
  279. card->dev = dev;
  280. pci_set_drvdata(dev, card);
  281. do {
  282. if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
  283. st = -ENOMEM;
  284. goto failed_ida_get;
  285. }
  286. spin_lock(&rsxx_ida_lock);
  287. st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
  288. spin_unlock(&rsxx_ida_lock);
  289. } while (st == -EAGAIN);
  290. if (st)
  291. goto failed_ida_get;
  292. st = pci_enable_device(dev);
  293. if (st)
  294. goto failed_enable;
  295. pci_set_master(dev);
  296. pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
  297. st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
  298. if (st) {
  299. dev_err(CARD_TO_DEV(card),
  300. "No usable DMA configuration,aborting\n");
  301. goto failed_dma_mask;
  302. }
  303. st = pci_request_regions(dev, DRIVER_NAME);
  304. if (st) {
  305. dev_err(CARD_TO_DEV(card),
  306. "Failed to request memory region\n");
  307. goto failed_request_regions;
  308. }
  309. if (pci_resource_len(dev, 0) == 0) {
  310. dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
  311. st = -ENOMEM;
  312. goto failed_iomap;
  313. }
  314. card->regmap = pci_iomap(dev, 0, 0);
  315. if (!card->regmap) {
  316. dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
  317. st = -ENOMEM;
  318. goto failed_iomap;
  319. }
  320. spin_lock_init(&card->irq_lock);
  321. card->halt = 0;
  322. spin_lock_irqsave(&card->irq_lock, flags);
  323. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  324. spin_unlock_irqrestore(&card->irq_lock, flags);
  325. if (!force_legacy) {
  326. st = pci_enable_msi(dev);
  327. if (st)
  328. dev_warn(CARD_TO_DEV(card),
  329. "Failed to enable MSI\n");
  330. }
  331. st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
  332. DRIVER_NAME, card);
  333. if (st) {
  334. dev_err(CARD_TO_DEV(card),
  335. "Failed requesting IRQ%d\n", dev->irq);
  336. goto failed_irq;
  337. }
  338. /************* Setup Processor Command Interface *************/
  339. rsxx_creg_setup(card);
  340. spin_lock_irqsave(&card->irq_lock, flags);
  341. rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
  342. spin_unlock_irqrestore(&card->irq_lock, flags);
  343. st = rsxx_compatibility_check(card);
  344. if (st) {
  345. dev_warn(CARD_TO_DEV(card),
  346. "Incompatible driver detected. Please update the driver.\n");
  347. st = -EINVAL;
  348. goto failed_compatiblity_check;
  349. }
  350. /************* Load Card Config *************/
  351. st = rsxx_load_config(card);
  352. if (st)
  353. dev_err(CARD_TO_DEV(card),
  354. "Failed loading card config\n");
  355. /************* Setup DMA Engine *************/
  356. st = rsxx_get_num_targets(card, &card->n_targets);
  357. if (st)
  358. dev_info(CARD_TO_DEV(card),
  359. "Failed reading the number of DMA targets\n");
  360. card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
  361. if (!card->ctrl) {
  362. st = -ENOMEM;
  363. goto failed_dma_setup;
  364. }
  365. st = rsxx_dma_setup(card);
  366. if (st) {
  367. dev_info(CARD_TO_DEV(card),
  368. "Failed to setup DMA engine\n");
  369. goto failed_dma_setup;
  370. }
  371. /************* Setup Card Event Handler *************/
  372. INIT_WORK(&card->event_work, card_event_handler);
  373. st = rsxx_setup_dev(card);
  374. if (st)
  375. goto failed_create_dev;
  376. rsxx_get_card_state(card, &card->state);
  377. dev_info(CARD_TO_DEV(card),
  378. "card state: %s\n",
  379. rsxx_card_state_to_str(card->state));
  380. /*
  381. * Now that the DMA Engine and devices have been setup,
  382. * we can enable the event interrupt(it kicks off actions in
  383. * those layers so we couldn't enable it right away.)
  384. */
  385. spin_lock_irqsave(&card->irq_lock, flags);
  386. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  387. spin_unlock_irqrestore(&card->irq_lock, flags);
  388. if (card->state == CARD_STATE_SHUTDOWN) {
  389. st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
  390. if (st)
  391. dev_crit(CARD_TO_DEV(card),
  392. "Failed issuing card startup\n");
  393. } else if (card->state == CARD_STATE_GOOD ||
  394. card->state == CARD_STATE_RD_ONLY_FAULT) {
  395. st = rsxx_get_card_size8(card, &card->size8);
  396. if (st)
  397. card->size8 = 0;
  398. }
  399. rsxx_attach_dev(card);
  400. return 0;
  401. failed_create_dev:
  402. rsxx_dma_destroy(card);
  403. failed_dma_setup:
  404. failed_compatiblity_check:
  405. spin_lock_irqsave(&card->irq_lock, flags);
  406. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  407. spin_unlock_irqrestore(&card->irq_lock, flags);
  408. free_irq(dev->irq, card);
  409. if (!force_legacy)
  410. pci_disable_msi(dev);
  411. failed_irq:
  412. pci_iounmap(dev, card->regmap);
  413. failed_iomap:
  414. pci_release_regions(dev);
  415. failed_request_regions:
  416. failed_dma_mask:
  417. pci_disable_device(dev);
  418. failed_enable:
  419. spin_lock(&rsxx_ida_lock);
  420. ida_remove(&rsxx_disk_ida, card->disk_id);
  421. spin_unlock(&rsxx_ida_lock);
  422. failed_ida_get:
  423. kfree(card);
  424. return st;
  425. }
  426. static void rsxx_pci_remove(struct pci_dev *dev)
  427. {
  428. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  429. unsigned long flags;
  430. int st;
  431. int i;
  432. if (!card)
  433. return;
  434. dev_info(CARD_TO_DEV(card),
  435. "Removing PCI-Flash SSD.\n");
  436. rsxx_detach_dev(card);
  437. for (i = 0; i < card->n_targets; i++) {
  438. spin_lock_irqsave(&card->irq_lock, flags);
  439. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  440. spin_unlock_irqrestore(&card->irq_lock, flags);
  441. }
  442. st = card_shutdown(card);
  443. if (st)
  444. dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
  445. /* Sync outstanding event handlers. */
  446. spin_lock_irqsave(&card->irq_lock, flags);
  447. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  448. spin_unlock_irqrestore(&card->irq_lock, flags);
  449. /* Prevent work_structs from re-queuing themselves. */
  450. card->halt = 1;
  451. cancel_work_sync(&card->event_work);
  452. rsxx_destroy_dev(card);
  453. rsxx_dma_destroy(card);
  454. spin_lock_irqsave(&card->irq_lock, flags);
  455. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  456. spin_unlock_irqrestore(&card->irq_lock, flags);
  457. free_irq(dev->irq, card);
  458. if (!force_legacy)
  459. pci_disable_msi(dev);
  460. rsxx_creg_destroy(card);
  461. pci_iounmap(dev, card->regmap);
  462. pci_disable_device(dev);
  463. pci_release_regions(dev);
  464. kfree(card);
  465. }
  466. static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
  467. {
  468. /* We don't support suspend at this time. */
  469. return -ENOSYS;
  470. }
  471. static void rsxx_pci_shutdown(struct pci_dev *dev)
  472. {
  473. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  474. unsigned long flags;
  475. int i;
  476. if (!card)
  477. return;
  478. dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
  479. rsxx_detach_dev(card);
  480. for (i = 0; i < card->n_targets; i++) {
  481. spin_lock_irqsave(&card->irq_lock, flags);
  482. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  483. spin_unlock_irqrestore(&card->irq_lock, flags);
  484. }
  485. card_shutdown(card);
  486. }
  487. static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
  488. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
  489. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
  490. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
  491. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
  492. {0,},
  493. };
  494. MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
  495. static struct pci_driver rsxx_pci_driver = {
  496. .name = DRIVER_NAME,
  497. .id_table = rsxx_pci_ids,
  498. .probe = rsxx_pci_probe,
  499. .remove = rsxx_pci_remove,
  500. .suspend = rsxx_pci_suspend,
  501. .shutdown = rsxx_pci_shutdown,
  502. };
  503. static int __init rsxx_core_init(void)
  504. {
  505. int st;
  506. st = rsxx_dev_init();
  507. if (st)
  508. return st;
  509. st = rsxx_dma_init();
  510. if (st)
  511. goto dma_init_failed;
  512. st = rsxx_creg_init();
  513. if (st)
  514. goto creg_init_failed;
  515. return pci_register_driver(&rsxx_pci_driver);
  516. creg_init_failed:
  517. rsxx_dma_cleanup();
  518. dma_init_failed:
  519. rsxx_dev_cleanup();
  520. return st;
  521. }
  522. static void __exit rsxx_core_cleanup(void)
  523. {
  524. pci_unregister_driver(&rsxx_pci_driver);
  525. rsxx_creg_cleanup();
  526. rsxx_dma_cleanup();
  527. rsxx_dev_cleanup();
  528. }
  529. module_init(rsxx_core_init);
  530. module_exit(rsxx_core_cleanup);