scm_blk_cluster.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * Block driver for s390 storage class memory.
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
  6. */
  7. #include <linux/spinlock.h>
  8. #include <linux/module.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/genhd.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <asm/eadm.h>
  14. #include "scm_blk.h"
  15. static unsigned int write_cluster_size = 64;
  16. module_param(write_cluster_size, uint, S_IRUGO);
  17. MODULE_PARM_DESC(write_cluster_size,
  18. "Number of pages used for contiguous writes.");
  19. #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
  20. void __scm_free_rq_cluster(struct scm_request *scmrq)
  21. {
  22. int i;
  23. if (!scmrq->cluster.buf)
  24. return;
  25. for (i = 0; i < 2 * write_cluster_size; i++)
  26. free_page((unsigned long) scmrq->cluster.buf[i]);
  27. kfree(scmrq->cluster.buf);
  28. }
  29. int __scm_alloc_rq_cluster(struct scm_request *scmrq)
  30. {
  31. int i;
  32. scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
  33. GFP_KERNEL);
  34. if (!scmrq->cluster.buf)
  35. return -ENOMEM;
  36. for (i = 0; i < 2 * write_cluster_size; i++) {
  37. scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
  38. if (!scmrq->cluster.buf[i])
  39. return -ENOMEM;
  40. }
  41. INIT_LIST_HEAD(&scmrq->cluster.list);
  42. return 0;
  43. }
  44. void scm_request_cluster_init(struct scm_request *scmrq)
  45. {
  46. scmrq->cluster.state = CLUSTER_NONE;
  47. }
  48. static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
  49. {
  50. unsigned long firstA, lastA, firstB, lastB;
  51. firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
  52. lastA = (((u64) blk_rq_pos(A->request) << 9) +
  53. blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
  54. firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
  55. lastB = (((u64) blk_rq_pos(B->request) << 9) +
  56. blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
  57. return (firstB <= lastA && firstA <= lastB);
  58. }
  59. bool scm_reserve_cluster(struct scm_request *scmrq)
  60. {
  61. struct scm_blk_dev *bdev = scmrq->bdev;
  62. struct scm_request *iter;
  63. if (write_cluster_size == 0)
  64. return true;
  65. spin_lock(&bdev->lock);
  66. list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
  67. if (clusters_intersect(scmrq, iter) &&
  68. (rq_data_dir(scmrq->request) == WRITE ||
  69. rq_data_dir(iter->request) == WRITE)) {
  70. spin_unlock(&bdev->lock);
  71. return false;
  72. }
  73. }
  74. list_add(&scmrq->cluster.list, &bdev->cluster_list);
  75. spin_unlock(&bdev->lock);
  76. return true;
  77. }
  78. void scm_release_cluster(struct scm_request *scmrq)
  79. {
  80. struct scm_blk_dev *bdev = scmrq->bdev;
  81. unsigned long flags;
  82. if (write_cluster_size == 0)
  83. return;
  84. spin_lock_irqsave(&bdev->lock, flags);
  85. list_del(&scmrq->cluster.list);
  86. spin_unlock_irqrestore(&bdev->lock, flags);
  87. }
  88. void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
  89. {
  90. INIT_LIST_HEAD(&bdev->cluster_list);
  91. blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
  92. }
  93. static void scm_prepare_cluster_request(struct scm_request *scmrq)
  94. {
  95. struct scm_blk_dev *bdev = scmrq->bdev;
  96. struct scm_device *scmdev = bdev->gendisk->private_data;
  97. struct request *req = scmrq->request;
  98. struct aidaw *aidaw = scmrq->aidaw;
  99. struct msb *msb = &scmrq->aob->msb[0];
  100. struct req_iterator iter;
  101. struct bio_vec *bv;
  102. int i = 0;
  103. u64 addr;
  104. switch (scmrq->cluster.state) {
  105. case CLUSTER_NONE:
  106. scmrq->cluster.state = CLUSTER_READ;
  107. /* fall through */
  108. case CLUSTER_READ:
  109. scmrq->aob->request.msb_count = 1;
  110. msb->bs = MSB_BS_4K;
  111. msb->oc = MSB_OC_READ;
  112. msb->flags = MSB_FLAG_IDA;
  113. msb->data_addr = (u64) aidaw;
  114. msb->blk_count = write_cluster_size;
  115. addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
  116. msb->scm_addr = round_down(addr, CLUSTER_SIZE);
  117. if (msb->scm_addr !=
  118. round_down(addr + (u64) blk_rq_bytes(req) - 1,
  119. CLUSTER_SIZE))
  120. msb->blk_count = 2 * write_cluster_size;
  121. for (i = 0; i < msb->blk_count; i++) {
  122. aidaw->data_addr = (u64) scmrq->cluster.buf[i];
  123. aidaw++;
  124. }
  125. break;
  126. case CLUSTER_WRITE:
  127. msb->oc = MSB_OC_WRITE;
  128. for (addr = msb->scm_addr;
  129. addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
  130. addr += PAGE_SIZE) {
  131. aidaw->data_addr = (u64) scmrq->cluster.buf[i];
  132. aidaw++;
  133. i++;
  134. }
  135. rq_for_each_segment(bv, req, iter) {
  136. aidaw->data_addr = (u64) page_address(bv->bv_page);
  137. aidaw++;
  138. i++;
  139. }
  140. for (; i < msb->blk_count; i++) {
  141. aidaw->data_addr = (u64) scmrq->cluster.buf[i];
  142. aidaw++;
  143. }
  144. break;
  145. }
  146. }
  147. bool scm_need_cluster_request(struct scm_request *scmrq)
  148. {
  149. if (rq_data_dir(scmrq->request) == READ)
  150. return false;
  151. return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
  152. }
  153. /* Called with queue lock held. */
  154. void scm_initiate_cluster_request(struct scm_request *scmrq)
  155. {
  156. scm_prepare_cluster_request(scmrq);
  157. if (scm_start_aob(scmrq->aob))
  158. scm_request_requeue(scmrq);
  159. }
  160. bool scm_test_cluster_request(struct scm_request *scmrq)
  161. {
  162. return scmrq->cluster.state != CLUSTER_NONE;
  163. }
  164. void scm_cluster_request_irq(struct scm_request *scmrq)
  165. {
  166. struct scm_blk_dev *bdev = scmrq->bdev;
  167. unsigned long flags;
  168. switch (scmrq->cluster.state) {
  169. case CLUSTER_NONE:
  170. BUG();
  171. break;
  172. case CLUSTER_READ:
  173. if (scmrq->error) {
  174. scm_request_finish(scmrq);
  175. break;
  176. }
  177. scmrq->cluster.state = CLUSTER_WRITE;
  178. spin_lock_irqsave(&bdev->rq_lock, flags);
  179. scm_initiate_cluster_request(scmrq);
  180. spin_unlock_irqrestore(&bdev->rq_lock, flags);
  181. break;
  182. case CLUSTER_WRITE:
  183. scm_request_finish(scmrq);
  184. break;
  185. }
  186. }
  187. bool scm_cluster_size_valid(void)
  188. {
  189. return write_cluster_size == 0 || write_cluster_size == 32 ||
  190. write_cluster_size == 64 || write_cluster_size == 128;
  191. }