scm_blk.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /*
  2. * Block driver for s390 storage class memory.
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "scm_block"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/interrupt.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/module.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/genhd.h>
  14. #include <linux/slab.h>
  15. #include <linux/list.h>
  16. #include <asm/eadm.h>
  17. #include "scm_blk.h"
  18. debug_info_t *scm_debug;
  19. static int scm_major;
  20. static DEFINE_SPINLOCK(list_lock);
  21. static LIST_HEAD(inactive_requests);
  22. static unsigned int nr_requests = 64;
  23. static atomic_t nr_devices = ATOMIC_INIT(0);
  24. module_param(nr_requests, uint, S_IRUGO);
  25. MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
  26. MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
  27. MODULE_LICENSE("GPL");
  28. MODULE_ALIAS("scm:scmdev*");
  29. static void __scm_free_rq(struct scm_request *scmrq)
  30. {
  31. struct aob_rq_header *aobrq = to_aobrq(scmrq);
  32. free_page((unsigned long) scmrq->aob);
  33. free_page((unsigned long) scmrq->aidaw);
  34. kfree(aobrq);
  35. }
  36. static void scm_free_rqs(void)
  37. {
  38. struct list_head *iter, *safe;
  39. struct scm_request *scmrq;
  40. spin_lock_irq(&list_lock);
  41. list_for_each_safe(iter, safe, &inactive_requests) {
  42. scmrq = list_entry(iter, struct scm_request, list);
  43. list_del(&scmrq->list);
  44. __scm_free_rq(scmrq);
  45. }
  46. spin_unlock_irq(&list_lock);
  47. }
  48. static int __scm_alloc_rq(void)
  49. {
  50. struct aob_rq_header *aobrq;
  51. struct scm_request *scmrq;
  52. aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
  53. if (!aobrq)
  54. return -ENOMEM;
  55. scmrq = (void *) aobrq->data;
  56. scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
  57. scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
  58. if (!scmrq->aob || !scmrq->aidaw) {
  59. __scm_free_rq(scmrq);
  60. return -ENOMEM;
  61. }
  62. INIT_LIST_HEAD(&scmrq->list);
  63. spin_lock_irq(&list_lock);
  64. list_add(&scmrq->list, &inactive_requests);
  65. spin_unlock_irq(&list_lock);
  66. return 0;
  67. }
  68. static int scm_alloc_rqs(unsigned int nrqs)
  69. {
  70. int ret = 0;
  71. while (nrqs-- && !ret)
  72. ret = __scm_alloc_rq();
  73. return ret;
  74. }
  75. static struct scm_request *scm_request_fetch(void)
  76. {
  77. struct scm_request *scmrq = NULL;
  78. spin_lock(&list_lock);
  79. if (list_empty(&inactive_requests))
  80. goto out;
  81. scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
  82. list_del(&scmrq->list);
  83. out:
  84. spin_unlock(&list_lock);
  85. return scmrq;
  86. }
  87. static void scm_request_done(struct scm_request *scmrq)
  88. {
  89. unsigned long flags;
  90. spin_lock_irqsave(&list_lock, flags);
  91. list_add(&scmrq->list, &inactive_requests);
  92. spin_unlock_irqrestore(&list_lock, flags);
  93. }
  94. static int scm_open(struct block_device *blkdev, fmode_t mode)
  95. {
  96. return scm_get_ref();
  97. }
  98. static int scm_release(struct gendisk *gendisk, fmode_t mode)
  99. {
  100. scm_put_ref();
  101. return 0;
  102. }
  103. static const struct block_device_operations scm_blk_devops = {
  104. .owner = THIS_MODULE,
  105. .open = scm_open,
  106. .release = scm_release,
  107. };
  108. static void scm_request_prepare(struct scm_request *scmrq)
  109. {
  110. struct scm_blk_dev *bdev = scmrq->bdev;
  111. struct scm_device *scmdev = bdev->gendisk->private_data;
  112. struct aidaw *aidaw = scmrq->aidaw;
  113. struct msb *msb = &scmrq->aob->msb[0];
  114. struct req_iterator iter;
  115. struct bio_vec *bv;
  116. msb->bs = MSB_BS_4K;
  117. scmrq->aob->request.msb_count = 1;
  118. msb->scm_addr = scmdev->address +
  119. ((u64) blk_rq_pos(scmrq->request) << 9);
  120. msb->oc = (rq_data_dir(scmrq->request) == READ) ?
  121. MSB_OC_READ : MSB_OC_WRITE;
  122. msb->flags |= MSB_FLAG_IDA;
  123. msb->data_addr = (u64) aidaw;
  124. rq_for_each_segment(bv, scmrq->request, iter) {
  125. WARN_ON(bv->bv_offset);
  126. msb->blk_count += bv->bv_len >> 12;
  127. aidaw->data_addr = (u64) page_address(bv->bv_page);
  128. aidaw++;
  129. }
  130. }
  131. static inline void scm_request_init(struct scm_blk_dev *bdev,
  132. struct scm_request *scmrq,
  133. struct request *req)
  134. {
  135. struct aob_rq_header *aobrq = to_aobrq(scmrq);
  136. struct aob *aob = scmrq->aob;
  137. memset(aob, 0, sizeof(*aob));
  138. memset(scmrq->aidaw, 0, PAGE_SIZE);
  139. aobrq->scmdev = bdev->scmdev;
  140. aob->request.cmd_code = ARQB_CMD_MOVE;
  141. aob->request.data = (u64) aobrq;
  142. scmrq->request = req;
  143. scmrq->bdev = bdev;
  144. scmrq->retries = 4;
  145. scmrq->error = 0;
  146. }
  147. static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
  148. {
  149. if (atomic_read(&bdev->queued_reqs)) {
  150. /* Queue restart is triggered by the next interrupt. */
  151. return;
  152. }
  153. blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
  154. }
  155. static void scm_request_requeue(struct scm_request *scmrq)
  156. {
  157. struct scm_blk_dev *bdev = scmrq->bdev;
  158. blk_requeue_request(bdev->rq, scmrq->request);
  159. scm_request_done(scmrq);
  160. scm_ensure_queue_restart(bdev);
  161. }
  162. static void scm_request_finish(struct scm_request *scmrq)
  163. {
  164. blk_end_request_all(scmrq->request, scmrq->error);
  165. scm_request_done(scmrq);
  166. }
  167. static void scm_blk_request(struct request_queue *rq)
  168. {
  169. struct scm_device *scmdev = rq->queuedata;
  170. struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
  171. struct scm_request *scmrq;
  172. struct request *req;
  173. int ret;
  174. while ((req = blk_peek_request(rq))) {
  175. if (req->cmd_type != REQ_TYPE_FS)
  176. continue;
  177. scmrq = scm_request_fetch();
  178. if (!scmrq) {
  179. SCM_LOG(5, "no request");
  180. scm_ensure_queue_restart(bdev);
  181. return;
  182. }
  183. scm_request_init(bdev, scmrq, req);
  184. scm_request_prepare(scmrq);
  185. blk_start_request(req);
  186. ret = scm_start_aob(scmrq->aob);
  187. if (ret) {
  188. SCM_LOG(5, "no subchannel");
  189. scm_request_requeue(scmrq);
  190. return;
  191. }
  192. atomic_inc(&bdev->queued_reqs);
  193. }
  194. }
  195. static void __scmrq_log_error(struct scm_request *scmrq)
  196. {
  197. struct aob *aob = scmrq->aob;
  198. if (scmrq->error == -ETIMEDOUT)
  199. SCM_LOG(1, "Request timeout");
  200. else {
  201. SCM_LOG(1, "Request error");
  202. SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
  203. }
  204. if (scmrq->retries)
  205. SCM_LOG(1, "Retry request");
  206. else
  207. pr_err("An I/O operation to SCM failed with rc=%d\n",
  208. scmrq->error);
  209. }
  210. void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
  211. {
  212. struct scm_request *scmrq = data;
  213. struct scm_blk_dev *bdev = scmrq->bdev;
  214. scmrq->error = error;
  215. if (error)
  216. __scmrq_log_error(scmrq);
  217. spin_lock(&bdev->lock);
  218. list_add_tail(&scmrq->list, &bdev->finished_requests);
  219. spin_unlock(&bdev->lock);
  220. tasklet_hi_schedule(&bdev->tasklet);
  221. }
  222. static void scm_blk_tasklet(struct scm_blk_dev *bdev)
  223. {
  224. struct scm_request *scmrq;
  225. unsigned long flags;
  226. spin_lock_irqsave(&bdev->lock, flags);
  227. while (!list_empty(&bdev->finished_requests)) {
  228. scmrq = list_first_entry(&bdev->finished_requests,
  229. struct scm_request, list);
  230. list_del(&scmrq->list);
  231. spin_unlock_irqrestore(&bdev->lock, flags);
  232. if (scmrq->error && scmrq->retries-- > 0) {
  233. if (scm_start_aob(scmrq->aob)) {
  234. spin_lock_irqsave(&bdev->rq_lock, flags);
  235. scm_request_requeue(scmrq);
  236. spin_unlock_irqrestore(&bdev->rq_lock, flags);
  237. }
  238. /* Request restarted or requeued, handle next. */
  239. spin_lock_irqsave(&bdev->lock, flags);
  240. continue;
  241. }
  242. scm_request_finish(scmrq);
  243. atomic_dec(&bdev->queued_reqs);
  244. spin_lock_irqsave(&bdev->lock, flags);
  245. }
  246. spin_unlock_irqrestore(&bdev->lock, flags);
  247. /* Look out for more requests. */
  248. blk_run_queue(bdev->rq);
  249. }
  250. int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
  251. {
  252. struct request_queue *rq;
  253. int len, ret = -ENOMEM;
  254. unsigned int devindex, nr_max_blk;
  255. devindex = atomic_inc_return(&nr_devices) - 1;
  256. /* scma..scmz + scmaa..scmzz */
  257. if (devindex > 701) {
  258. ret = -ENODEV;
  259. goto out;
  260. }
  261. bdev->scmdev = scmdev;
  262. spin_lock_init(&bdev->rq_lock);
  263. spin_lock_init(&bdev->lock);
  264. INIT_LIST_HEAD(&bdev->finished_requests);
  265. atomic_set(&bdev->queued_reqs, 0);
  266. tasklet_init(&bdev->tasklet,
  267. (void (*)(unsigned long)) scm_blk_tasklet,
  268. (unsigned long) bdev);
  269. rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
  270. if (!rq)
  271. goto out;
  272. bdev->rq = rq;
  273. nr_max_blk = min(scmdev->nr_max_block,
  274. (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
  275. blk_queue_logical_block_size(rq, 1 << 12);
  276. blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
  277. blk_queue_max_segments(rq, nr_max_blk);
  278. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
  279. bdev->gendisk = alloc_disk(SCM_NR_PARTS);
  280. if (!bdev->gendisk)
  281. goto out_queue;
  282. rq->queuedata = scmdev;
  283. bdev->gendisk->driverfs_dev = &scmdev->dev;
  284. bdev->gendisk->private_data = scmdev;
  285. bdev->gendisk->fops = &scm_blk_devops;
  286. bdev->gendisk->queue = rq;
  287. bdev->gendisk->major = scm_major;
  288. bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
  289. len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
  290. if (devindex > 25) {
  291. len += snprintf(bdev->gendisk->disk_name + len,
  292. DISK_NAME_LEN - len, "%c",
  293. 'a' + (devindex / 26) - 1);
  294. devindex = devindex % 26;
  295. }
  296. snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
  297. 'a' + devindex);
  298. /* 512 byte sectors */
  299. set_capacity(bdev->gendisk, scmdev->size >> 9);
  300. add_disk(bdev->gendisk);
  301. return 0;
  302. out_queue:
  303. blk_cleanup_queue(rq);
  304. out:
  305. atomic_dec(&nr_devices);
  306. return ret;
  307. }
  308. void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
  309. {
  310. tasklet_kill(&bdev->tasklet);
  311. del_gendisk(bdev->gendisk);
  312. blk_cleanup_queue(bdev->gendisk->queue);
  313. put_disk(bdev->gendisk);
  314. }
  315. static int __init scm_blk_init(void)
  316. {
  317. int ret;
  318. ret = register_blkdev(0, "scm");
  319. if (ret < 0)
  320. goto out;
  321. scm_major = ret;
  322. if (scm_alloc_rqs(nr_requests))
  323. goto out_unreg;
  324. scm_debug = debug_register("scm_log", 16, 1, 16);
  325. if (!scm_debug)
  326. goto out_free;
  327. debug_register_view(scm_debug, &debug_hex_ascii_view);
  328. debug_set_level(scm_debug, 2);
  329. ret = scm_drv_init();
  330. if (ret)
  331. goto out_dbf;
  332. return ret;
  333. out_dbf:
  334. debug_unregister(scm_debug);
  335. out_free:
  336. scm_free_rqs();
  337. out_unreg:
  338. unregister_blkdev(scm_major, "scm");
  339. out:
  340. return ret;
  341. }
  342. module_init(scm_blk_init);
  343. static void __exit scm_blk_cleanup(void)
  344. {
  345. scm_drv_cleanup();
  346. debug_unregister(scm_debug);
  347. scm_free_rqs();
  348. unregister_blkdev(scm_major, "scm");
  349. }
  350. module_exit(scm_blk_cleanup);