core.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. /*
  2. * Filename: core.c
  3. *
  4. *
  5. * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
  6. * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  7. *
  8. * (C) Copyright 2013 IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software Foundation,
  22. * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/reboot.h>
  30. #include <linux/slab.h>
  31. #include <linux/bitops.h>
  32. #include <linux/genhd.h>
  33. #include <linux/idr.h>
  34. #include "rsxx_priv.h"
  35. #include "rsxx_cfg.h"
  36. #define NO_LEGACY 0
  37. MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
  38. MODULE_AUTHOR("IBM <support@ramsan.com>");
  39. MODULE_LICENSE("GPL");
  40. MODULE_VERSION(DRIVER_VERSION);
  41. static unsigned int force_legacy = NO_LEGACY;
  42. module_param(force_legacy, uint, 0444);
  43. MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
  44. static DEFINE_IDA(rsxx_disk_ida);
  45. static DEFINE_SPINLOCK(rsxx_ida_lock);
  46. /*----------------- Interrupt Control & Handling -------------------*/
  47. static void __enable_intr(unsigned int *mask, unsigned int intr)
  48. {
  49. *mask |= intr;
  50. }
  51. static void __disable_intr(unsigned int *mask, unsigned int intr)
  52. {
  53. *mask &= ~intr;
  54. }
  55. /*
  56. * NOTE: Disabling the IER will disable the hardware interrupt.
  57. * Disabling the ISR will disable the software handling of the ISR bit.
  58. *
  59. * Enable/Disable interrupt functions assume the card->irq_lock
  60. * is held by the caller.
  61. */
  62. void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  63. {
  64. if (unlikely(card->halt))
  65. return;
  66. __enable_intr(&card->ier_mask, intr);
  67. iowrite32(card->ier_mask, card->regmap + IER);
  68. }
  69. void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  70. {
  71. __disable_intr(&card->ier_mask, intr);
  72. iowrite32(card->ier_mask, card->regmap + IER);
  73. }
  74. void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
  75. unsigned int intr)
  76. {
  77. if (unlikely(card->halt))
  78. return;
  79. __enable_intr(&card->isr_mask, intr);
  80. __enable_intr(&card->ier_mask, intr);
  81. iowrite32(card->ier_mask, card->regmap + IER);
  82. }
  83. void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
  84. unsigned int intr)
  85. {
  86. __disable_intr(&card->isr_mask, intr);
  87. __disable_intr(&card->ier_mask, intr);
  88. iowrite32(card->ier_mask, card->regmap + IER);
  89. }
  90. static irqreturn_t rsxx_isr(int irq, void *pdata)
  91. {
  92. struct rsxx_cardinfo *card = pdata;
  93. unsigned int isr;
  94. int handled = 0;
  95. int reread_isr;
  96. int i;
  97. spin_lock(&card->irq_lock);
  98. do {
  99. reread_isr = 0;
  100. isr = ioread32(card->regmap + ISR);
  101. if (isr == 0xffffffff) {
  102. /*
  103. * A few systems seem to have an intermittent issue
  104. * where PCI reads return all Fs, but retrying the read
  105. * a little later will return as expected.
  106. */
  107. dev_info(CARD_TO_DEV(card),
  108. "ISR = 0xFFFFFFFF, retrying later\n");
  109. break;
  110. }
  111. isr &= card->isr_mask;
  112. if (!isr)
  113. break;
  114. for (i = 0; i < card->n_targets; i++) {
  115. if (isr & CR_INTR_DMA(i)) {
  116. if (card->ier_mask & CR_INTR_DMA(i)) {
  117. rsxx_disable_ier(card, CR_INTR_DMA(i));
  118. reread_isr = 1;
  119. }
  120. queue_work(card->ctrl[i].done_wq,
  121. &card->ctrl[i].dma_done_work);
  122. handled++;
  123. }
  124. }
  125. if (isr & CR_INTR_CREG) {
  126. schedule_work(&card->creg_ctrl.done_work);
  127. handled++;
  128. }
  129. if (isr & CR_INTR_EVENT) {
  130. schedule_work(&card->event_work);
  131. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  132. handled++;
  133. }
  134. } while (reread_isr);
  135. spin_unlock(&card->irq_lock);
  136. return handled ? IRQ_HANDLED : IRQ_NONE;
  137. }
  138. /*----------------- Card Event Handler -------------------*/
  139. static char *rsxx_card_state_to_str(unsigned int state)
  140. {
  141. static char *state_strings[] = {
  142. "Unknown", "Shutdown", "Starting", "Formatting",
  143. "Uninitialized", "Good", "Shutting Down",
  144. "Fault", "Read Only Fault", "dStroying"
  145. };
  146. return state_strings[ffs(state)];
  147. }
  148. static void card_state_change(struct rsxx_cardinfo *card,
  149. unsigned int new_state)
  150. {
  151. int st;
  152. dev_info(CARD_TO_DEV(card),
  153. "card state change detected.(%s -> %s)\n",
  154. rsxx_card_state_to_str(card->state),
  155. rsxx_card_state_to_str(new_state));
  156. card->state = new_state;
  157. /* Don't attach DMA interfaces if the card has an invalid config */
  158. if (!card->config_valid)
  159. return;
  160. switch (new_state) {
  161. case CARD_STATE_RD_ONLY_FAULT:
  162. dev_crit(CARD_TO_DEV(card),
  163. "Hardware has entered read-only mode!\n");
  164. /*
  165. * Fall through so the DMA devices can be attached and
  166. * the user can attempt to pull off their data.
  167. */
  168. case CARD_STATE_GOOD:
  169. st = rsxx_get_card_size8(card, &card->size8);
  170. if (st)
  171. dev_err(CARD_TO_DEV(card),
  172. "Failed attaching DMA devices\n");
  173. if (card->config_valid)
  174. set_capacity(card->gendisk, card->size8 >> 9);
  175. break;
  176. case CARD_STATE_FAULT:
  177. dev_crit(CARD_TO_DEV(card),
  178. "Hardware Fault reported!\n");
  179. /* Fall through. */
  180. /* Everything else, detach DMA interface if it's attached. */
  181. case CARD_STATE_SHUTDOWN:
  182. case CARD_STATE_STARTING:
  183. case CARD_STATE_FORMATTING:
  184. case CARD_STATE_UNINITIALIZED:
  185. case CARD_STATE_SHUTTING_DOWN:
  186. /*
  187. * dStroy is a term coined by marketing to represent the low level
  188. * secure erase.
  189. */
  190. case CARD_STATE_DSTROYING:
  191. set_capacity(card->gendisk, 0);
  192. break;
  193. }
  194. }
  195. static void card_event_handler(struct work_struct *work)
  196. {
  197. struct rsxx_cardinfo *card;
  198. unsigned int state;
  199. unsigned long flags;
  200. int st;
  201. card = container_of(work, struct rsxx_cardinfo, event_work);
  202. if (unlikely(card->halt))
  203. return;
  204. /*
  205. * Enable the interrupt now to avoid any weird race conditions where a
  206. * state change might occur while rsxx_get_card_state() is
  207. * processing a returned creg cmd.
  208. */
  209. spin_lock_irqsave(&card->irq_lock, flags);
  210. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  211. spin_unlock_irqrestore(&card->irq_lock, flags);
  212. st = rsxx_get_card_state(card, &state);
  213. if (st) {
  214. dev_info(CARD_TO_DEV(card),
  215. "Failed reading state after event.\n");
  216. return;
  217. }
  218. if (card->state != state)
  219. card_state_change(card, state);
  220. if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
  221. rsxx_read_hw_log(card);
  222. }
  223. /*----------------- Card Operations -------------------*/
  224. static int card_shutdown(struct rsxx_cardinfo *card)
  225. {
  226. unsigned int state;
  227. signed long start;
  228. const int timeout = msecs_to_jiffies(120000);
  229. int st;
  230. /* We can't issue a shutdown if the card is in a transition state */
  231. start = jiffies;
  232. do {
  233. st = rsxx_get_card_state(card, &state);
  234. if (st)
  235. return st;
  236. } while (state == CARD_STATE_STARTING &&
  237. (jiffies - start < timeout));
  238. if (state == CARD_STATE_STARTING)
  239. return -ETIMEDOUT;
  240. /* Only issue a shutdown if we need to */
  241. if ((state != CARD_STATE_SHUTTING_DOWN) &&
  242. (state != CARD_STATE_SHUTDOWN)) {
  243. st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
  244. if (st)
  245. return st;
  246. }
  247. start = jiffies;
  248. do {
  249. st = rsxx_get_card_state(card, &state);
  250. if (st)
  251. return st;
  252. } while (state != CARD_STATE_SHUTDOWN &&
  253. (jiffies - start < timeout));
  254. if (state != CARD_STATE_SHUTDOWN)
  255. return -ETIMEDOUT;
  256. return 0;
  257. }
  258. /*----------------- Driver Initialization & Setup -------------------*/
  259. /* Returns: 0 if the driver is compatible with the device
  260. -1 if the driver is NOT compatible with the device */
  261. static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
  262. {
  263. unsigned char pci_rev;
  264. pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
  265. if (pci_rev > RS70_PCI_REV_SUPPORTED)
  266. return -1;
  267. return 0;
  268. }
  269. static int rsxx_pci_probe(struct pci_dev *dev,
  270. const struct pci_device_id *id)
  271. {
  272. struct rsxx_cardinfo *card;
  273. int st;
  274. dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
  275. card = kzalloc(sizeof(*card), GFP_KERNEL);
  276. if (!card)
  277. return -ENOMEM;
  278. card->dev = dev;
  279. pci_set_drvdata(dev, card);
  280. do {
  281. if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
  282. st = -ENOMEM;
  283. goto failed_ida_get;
  284. }
  285. spin_lock(&rsxx_ida_lock);
  286. st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
  287. spin_unlock(&rsxx_ida_lock);
  288. } while (st == -EAGAIN);
  289. if (st)
  290. goto failed_ida_get;
  291. st = pci_enable_device(dev);
  292. if (st)
  293. goto failed_enable;
  294. pci_set_master(dev);
  295. pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
  296. st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
  297. if (st) {
  298. dev_err(CARD_TO_DEV(card),
  299. "No usable DMA configuration,aborting\n");
  300. goto failed_dma_mask;
  301. }
  302. st = pci_request_regions(dev, DRIVER_NAME);
  303. if (st) {
  304. dev_err(CARD_TO_DEV(card),
  305. "Failed to request memory region\n");
  306. goto failed_request_regions;
  307. }
  308. if (pci_resource_len(dev, 0) == 0) {
  309. dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
  310. st = -ENOMEM;
  311. goto failed_iomap;
  312. }
  313. card->regmap = pci_iomap(dev, 0, 0);
  314. if (!card->regmap) {
  315. dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
  316. st = -ENOMEM;
  317. goto failed_iomap;
  318. }
  319. spin_lock_init(&card->irq_lock);
  320. card->halt = 0;
  321. spin_lock_irq(&card->irq_lock);
  322. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  323. spin_unlock_irq(&card->irq_lock);
  324. if (!force_legacy) {
  325. st = pci_enable_msi(dev);
  326. if (st)
  327. dev_warn(CARD_TO_DEV(card),
  328. "Failed to enable MSI\n");
  329. }
  330. st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
  331. DRIVER_NAME, card);
  332. if (st) {
  333. dev_err(CARD_TO_DEV(card),
  334. "Failed requesting IRQ%d\n", dev->irq);
  335. goto failed_irq;
  336. }
  337. /************* Setup Processor Command Interface *************/
  338. rsxx_creg_setup(card);
  339. spin_lock_irq(&card->irq_lock);
  340. rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
  341. spin_unlock_irq(&card->irq_lock);
  342. st = rsxx_compatibility_check(card);
  343. if (st) {
  344. dev_warn(CARD_TO_DEV(card),
  345. "Incompatible driver detected. Please update the driver.\n");
  346. st = -EINVAL;
  347. goto failed_compatiblity_check;
  348. }
  349. /************* Load Card Config *************/
  350. st = rsxx_load_config(card);
  351. if (st)
  352. dev_err(CARD_TO_DEV(card),
  353. "Failed loading card config\n");
  354. /************* Setup DMA Engine *************/
  355. st = rsxx_get_num_targets(card, &card->n_targets);
  356. if (st)
  357. dev_info(CARD_TO_DEV(card),
  358. "Failed reading the number of DMA targets\n");
  359. card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
  360. if (!card->ctrl) {
  361. st = -ENOMEM;
  362. goto failed_dma_setup;
  363. }
  364. st = rsxx_dma_setup(card);
  365. if (st) {
  366. dev_info(CARD_TO_DEV(card),
  367. "Failed to setup DMA engine\n");
  368. goto failed_dma_setup;
  369. }
  370. /************* Setup Card Event Handler *************/
  371. INIT_WORK(&card->event_work, card_event_handler);
  372. st = rsxx_setup_dev(card);
  373. if (st)
  374. goto failed_create_dev;
  375. rsxx_get_card_state(card, &card->state);
  376. dev_info(CARD_TO_DEV(card),
  377. "card state: %s\n",
  378. rsxx_card_state_to_str(card->state));
  379. /*
  380. * Now that the DMA Engine and devices have been setup,
  381. * we can enable the event interrupt(it kicks off actions in
  382. * those layers so we couldn't enable it right away.)
  383. */
  384. spin_lock_irq(&card->irq_lock);
  385. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  386. spin_unlock_irq(&card->irq_lock);
  387. if (card->state == CARD_STATE_SHUTDOWN) {
  388. st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
  389. if (st)
  390. dev_crit(CARD_TO_DEV(card),
  391. "Failed issuing card startup\n");
  392. } else if (card->state == CARD_STATE_GOOD ||
  393. card->state == CARD_STATE_RD_ONLY_FAULT) {
  394. st = rsxx_get_card_size8(card, &card->size8);
  395. if (st)
  396. card->size8 = 0;
  397. }
  398. rsxx_attach_dev(card);
  399. return 0;
  400. failed_create_dev:
  401. rsxx_dma_destroy(card);
  402. failed_dma_setup:
  403. failed_compatiblity_check:
  404. spin_lock_irq(&card->irq_lock);
  405. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  406. spin_unlock_irq(&card->irq_lock);
  407. free_irq(dev->irq, card);
  408. if (!force_legacy)
  409. pci_disable_msi(dev);
  410. failed_irq:
  411. pci_iounmap(dev, card->regmap);
  412. failed_iomap:
  413. pci_release_regions(dev);
  414. failed_request_regions:
  415. failed_dma_mask:
  416. pci_disable_device(dev);
  417. failed_enable:
  418. spin_lock(&rsxx_ida_lock);
  419. ida_remove(&rsxx_disk_ida, card->disk_id);
  420. spin_unlock(&rsxx_ida_lock);
  421. failed_ida_get:
  422. kfree(card);
  423. return st;
  424. }
  425. static void rsxx_pci_remove(struct pci_dev *dev)
  426. {
  427. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  428. unsigned long flags;
  429. int st;
  430. int i;
  431. if (!card)
  432. return;
  433. dev_info(CARD_TO_DEV(card),
  434. "Removing PCI-Flash SSD.\n");
  435. rsxx_detach_dev(card);
  436. for (i = 0; i < card->n_targets; i++) {
  437. spin_lock_irqsave(&card->irq_lock, flags);
  438. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  439. spin_unlock_irqrestore(&card->irq_lock, flags);
  440. }
  441. st = card_shutdown(card);
  442. if (st)
  443. dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
  444. /* Sync outstanding event handlers. */
  445. spin_lock_irqsave(&card->irq_lock, flags);
  446. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  447. spin_unlock_irqrestore(&card->irq_lock, flags);
  448. /* Prevent work_structs from re-queuing themselves. */
  449. card->halt = 1;
  450. cancel_work_sync(&card->event_work);
  451. rsxx_destroy_dev(card);
  452. rsxx_dma_destroy(card);
  453. spin_lock_irqsave(&card->irq_lock, flags);
  454. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  455. spin_unlock_irqrestore(&card->irq_lock, flags);
  456. free_irq(dev->irq, card);
  457. if (!force_legacy)
  458. pci_disable_msi(dev);
  459. rsxx_creg_destroy(card);
  460. pci_iounmap(dev, card->regmap);
  461. pci_disable_device(dev);
  462. pci_release_regions(dev);
  463. kfree(card);
  464. }
  465. static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
  466. {
  467. /* We don't support suspend at this time. */
  468. return -ENOSYS;
  469. }
  470. static void rsxx_pci_shutdown(struct pci_dev *dev)
  471. {
  472. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  473. unsigned long flags;
  474. int i;
  475. if (!card)
  476. return;
  477. dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
  478. rsxx_detach_dev(card);
  479. for (i = 0; i < card->n_targets; i++) {
  480. spin_lock_irqsave(&card->irq_lock, flags);
  481. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  482. spin_unlock_irqrestore(&card->irq_lock, flags);
  483. }
  484. card_shutdown(card);
  485. }
  486. static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
  487. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
  488. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
  489. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
  490. {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
  491. {0,},
  492. };
  493. MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
  494. static struct pci_driver rsxx_pci_driver = {
  495. .name = DRIVER_NAME,
  496. .id_table = rsxx_pci_ids,
  497. .probe = rsxx_pci_probe,
  498. .remove = rsxx_pci_remove,
  499. .suspend = rsxx_pci_suspend,
  500. .shutdown = rsxx_pci_shutdown,
  501. };
  502. static int __init rsxx_core_init(void)
  503. {
  504. int st;
  505. st = rsxx_dev_init();
  506. if (st)
  507. return st;
  508. st = rsxx_dma_init();
  509. if (st)
  510. goto dma_init_failed;
  511. st = rsxx_creg_init();
  512. if (st)
  513. goto creg_init_failed;
  514. return pci_register_driver(&rsxx_pci_driver);
  515. creg_init_failed:
  516. rsxx_dma_cleanup();
  517. dma_init_failed:
  518. rsxx_dev_cleanup();
  519. return st;
  520. }
  521. static void __exit rsxx_core_cleanup(void)
  522. {
  523. pci_unregister_driver(&rsxx_pci_driver);
  524. rsxx_creg_cleanup();
  525. rsxx_dma_cleanup();
  526. rsxx_dev_cleanup();
  527. }
  528. module_init(rsxx_core_init);
  529. module_exit(rsxx_core_cleanup);