mmc_queue.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * linux/drivers/mmc/mmc_queue.c
  3. *
  4. * Copyright (C) 2003 Russell King, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/kthread.h>
  14. #include <linux/mmc/card.h>
  15. #include <linux/mmc/host.h>
  16. #include "mmc_queue.h"
  17. #define MMC_QUEUE_SUSPENDED (1 << 0)
  18. /*
  19. * Prepare a MMC request. Essentially, this means passing the
  20. * preparation off to the media driver. The media driver will
  21. * create a mmc_io_request in req->special.
  22. */
  23. static int mmc_prep_request(struct request_queue *q, struct request *req)
  24. {
  25. struct mmc_queue *mq = q->queuedata;
  26. int ret = BLKPREP_KILL;
  27. if (blk_special_request(req)) {
  28. /*
  29. * Special commands already have the command
  30. * blocks already setup in req->special.
  31. */
  32. BUG_ON(!req->special);
  33. ret = BLKPREP_OK;
  34. } else if (blk_fs_request(req) || blk_pc_request(req)) {
  35. /*
  36. * Block I/O requests need translating according
  37. * to the protocol.
  38. */
  39. ret = mq->prep_fn(mq, req);
  40. } else {
  41. /*
  42. * Everything else is invalid.
  43. */
  44. blk_dump_rq_flags(req, "MMC bad request");
  45. }
  46. if (ret == BLKPREP_OK)
  47. req->cmd_flags |= REQ_DONTPREP;
  48. return ret;
  49. }
  50. static int mmc_queue_thread(void *d)
  51. {
  52. struct mmc_queue *mq = d;
  53. struct request_queue *q = mq->queue;
  54. /*
  55. * Set iothread to ensure that we aren't put to sleep by
  56. * the process freezing. We handle suspension ourselves.
  57. */
  58. current->flags |= PF_MEMALLOC|PF_NOFREEZE;
  59. down(&mq->thread_sem);
  60. do {
  61. struct request *req = NULL;
  62. spin_lock_irq(q->queue_lock);
  63. set_current_state(TASK_INTERRUPTIBLE);
  64. if (!blk_queue_plugged(q))
  65. req = elv_next_request(q);
  66. mq->req = req;
  67. spin_unlock_irq(q->queue_lock);
  68. if (!req) {
  69. if (kthread_should_stop()) {
  70. set_current_state(TASK_RUNNING);
  71. break;
  72. }
  73. up(&mq->thread_sem);
  74. schedule();
  75. down(&mq->thread_sem);
  76. continue;
  77. }
  78. set_current_state(TASK_RUNNING);
  79. mq->issue_fn(mq, req);
  80. } while (1);
  81. up(&mq->thread_sem);
  82. return 0;
  83. }
  84. /*
  85. * Generic MMC request handler. This is called for any queue on a
  86. * particular host. When the host is not busy, we look for a request
  87. * on any queue on this host, and attempt to issue it. This may
  88. * not be the queue we were asked to process.
  89. */
  90. static void mmc_request(request_queue_t *q)
  91. {
  92. struct mmc_queue *mq = q->queuedata;
  93. struct request *req;
  94. int ret;
  95. if (!mq) {
  96. printk(KERN_ERR "MMC: killing requests for dead queue\n");
  97. while ((req = elv_next_request(q)) != NULL) {
  98. do {
  99. ret = end_that_request_chunk(req, 0,
  100. req->current_nr_sectors << 9);
  101. } while (ret);
  102. }
  103. return;
  104. }
  105. if (!mq->req)
  106. wake_up_process(mq->thread);
  107. }
  108. /**
  109. * mmc_init_queue - initialise a queue structure.
  110. * @mq: mmc queue
  111. * @card: mmc card to attach this queue
  112. * @lock: queue lock
  113. *
  114. * Initialise a MMC card request queue.
  115. */
  116. int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
  117. {
  118. struct mmc_host *host = card->host;
  119. u64 limit = BLK_BOUNCE_HIGH;
  120. int ret;
  121. if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
  122. limit = *mmc_dev(host)->dma_mask;
  123. mq->card = card;
  124. mq->queue = blk_init_queue(mmc_request, lock);
  125. if (!mq->queue)
  126. return -ENOMEM;
  127. blk_queue_prep_rq(mq->queue, mmc_prep_request);
  128. blk_queue_bounce_limit(mq->queue, limit);
  129. blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
  130. blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
  131. blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
  132. blk_queue_max_segment_size(mq->queue, host->max_seg_size);
  133. mq->queue->queuedata = mq;
  134. mq->req = NULL;
  135. mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
  136. GFP_KERNEL);
  137. if (!mq->sg) {
  138. ret = -ENOMEM;
  139. goto cleanup_queue;
  140. }
  141. init_MUTEX(&mq->thread_sem);
  142. mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
  143. if (IS_ERR(mq->thread)) {
  144. ret = PTR_ERR(mq->thread);
  145. goto free_sg;
  146. }
  147. return 0;
  148. free_sg:
  149. kfree(mq->sg);
  150. mq->sg = NULL;
  151. cleanup_queue:
  152. blk_cleanup_queue(mq->queue);
  153. return ret;
  154. }
  155. EXPORT_SYMBOL(mmc_init_queue);
  156. void mmc_cleanup_queue(struct mmc_queue *mq)
  157. {
  158. request_queue_t *q = mq->queue;
  159. unsigned long flags;
  160. /* Mark that we should start throwing out stragglers */
  161. spin_lock_irqsave(q->queue_lock, flags);
  162. q->queuedata = NULL;
  163. spin_unlock_irqrestore(q->queue_lock, flags);
  164. /* Then terminate our worker thread */
  165. kthread_stop(mq->thread);
  166. kfree(mq->sg);
  167. mq->sg = NULL;
  168. blk_cleanup_queue(mq->queue);
  169. mq->card = NULL;
  170. }
  171. EXPORT_SYMBOL(mmc_cleanup_queue);
  172. /**
  173. * mmc_queue_suspend - suspend a MMC request queue
  174. * @mq: MMC queue to suspend
  175. *
  176. * Stop the block request queue, and wait for our thread to
  177. * complete any outstanding requests. This ensures that we
  178. * won't suspend while a request is being processed.
  179. */
  180. void mmc_queue_suspend(struct mmc_queue *mq)
  181. {
  182. request_queue_t *q = mq->queue;
  183. unsigned long flags;
  184. if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
  185. mq->flags |= MMC_QUEUE_SUSPENDED;
  186. spin_lock_irqsave(q->queue_lock, flags);
  187. blk_stop_queue(q);
  188. spin_unlock_irqrestore(q->queue_lock, flags);
  189. down(&mq->thread_sem);
  190. }
  191. }
  192. EXPORT_SYMBOL(mmc_queue_suspend);
  193. /**
  194. * mmc_queue_resume - resume a previously suspended MMC request queue
  195. * @mq: MMC queue to resume
  196. */
  197. void mmc_queue_resume(struct mmc_queue *mq)
  198. {
  199. request_queue_t *q = mq->queue;
  200. unsigned long flags;
  201. if (mq->flags & MMC_QUEUE_SUSPENDED) {
  202. mq->flags &= ~MMC_QUEUE_SUSPENDED;
  203. up(&mq->thread_sem);
  204. spin_lock_irqsave(q->queue_lock, flags);
  205. blk_start_queue(q);
  206. spin_unlock_irqrestore(q->queue_lock, flags);
  207. }
  208. }
  209. EXPORT_SYMBOL(mmc_queue_resume);