mmc_queue.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * linux/drivers/mmc/mmc_queue.c
  3. *
  4. * Copyright (C) 2003 Russell King, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. */
  11. #include <linux/module.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/kthread.h>
  14. #include <linux/mmc/card.h>
  15. #include <linux/mmc/host.h>
  16. #include "mmc_queue.h"
  17. #define MMC_QUEUE_SUSPENDED (1 << 0)
  18. /*
  19. * Prepare a MMC request. Essentially, this means passing the
  20. * preparation off to the media driver. The media driver will
  21. * create a mmc_io_request in req->special.
  22. */
  23. static int mmc_prep_request(struct request_queue *q, struct request *req)
  24. {
  25. struct mmc_queue *mq = q->queuedata;
  26. int ret = BLKPREP_KILL;
  27. if (blk_special_request(req)) {
  28. /*
  29. * Special commands already have the command
  30. * blocks already setup in req->special.
  31. */
  32. BUG_ON(!req->special);
  33. ret = BLKPREP_OK;
  34. } else if (blk_fs_request(req) || blk_pc_request(req)) {
  35. /*
  36. * Block I/O requests need translating according
  37. * to the protocol.
  38. */
  39. ret = mq->prep_fn(mq, req);
  40. } else {
  41. /*
  42. * Everything else is invalid.
  43. */
  44. blk_dump_rq_flags(req, "MMC bad request");
  45. }
  46. if (ret == BLKPREP_OK)
  47. req->cmd_flags |= REQ_DONTPREP;
  48. return ret;
  49. }
  50. static int mmc_queue_thread(void *d)
  51. {
  52. struct mmc_queue *mq = d;
  53. struct request_queue *q = mq->queue;
  54. /*
  55. * Set iothread to ensure that we aren't put to sleep by
  56. * the process freezing. We handle suspension ourselves.
  57. */
  58. current->flags |= PF_MEMALLOC|PF_NOFREEZE;
  59. down(&mq->thread_sem);
  60. do {
  61. struct request *req = NULL;
  62. spin_lock_irq(q->queue_lock);
  63. set_current_state(TASK_INTERRUPTIBLE);
  64. if (!blk_queue_plugged(q))
  65. req = elv_next_request(q);
  66. mq->req = req;
  67. spin_unlock_irq(q->queue_lock);
  68. if (!req) {
  69. if (kthread_should_stop())
  70. break;
  71. up(&mq->thread_sem);
  72. schedule();
  73. down(&mq->thread_sem);
  74. continue;
  75. }
  76. set_current_state(TASK_RUNNING);
  77. mq->issue_fn(mq, req);
  78. } while (1);
  79. up(&mq->thread_sem);
  80. return 0;
  81. }
  82. /*
  83. * Generic MMC request handler. This is called for any queue on a
  84. * particular host. When the host is not busy, we look for a request
  85. * on any queue on this host, and attempt to issue it. This may
  86. * not be the queue we were asked to process.
  87. */
  88. static void mmc_request(request_queue_t *q)
  89. {
  90. struct mmc_queue *mq = q->queuedata;
  91. struct request *req;
  92. int ret;
  93. if (!mq) {
  94. printk(KERN_ERR "MMC: killing requests for dead queue\n");
  95. while ((req = elv_next_request(q)) != NULL) {
  96. do {
  97. ret = end_that_request_chunk(req, 0,
  98. req->current_nr_sectors << 9);
  99. } while (ret);
  100. }
  101. return;
  102. }
  103. if (!mq->req)
  104. wake_up_process(mq->thread);
  105. }
  106. /**
  107. * mmc_init_queue - initialise a queue structure.
  108. * @mq: mmc queue
  109. * @card: mmc card to attach this queue
  110. * @lock: queue lock
  111. *
  112. * Initialise a MMC card request queue.
  113. */
  114. int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
  115. {
  116. struct mmc_host *host = card->host;
  117. u64 limit = BLK_BOUNCE_HIGH;
  118. int ret;
  119. if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
  120. limit = *mmc_dev(host)->dma_mask;
  121. mq->card = card;
  122. mq->queue = blk_init_queue(mmc_request, lock);
  123. if (!mq->queue)
  124. return -ENOMEM;
  125. blk_queue_prep_rq(mq->queue, mmc_prep_request);
  126. blk_queue_bounce_limit(mq->queue, limit);
  127. blk_queue_max_sectors(mq->queue, host->max_sectors);
  128. blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
  129. blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
  130. blk_queue_max_segment_size(mq->queue, host->max_seg_size);
  131. mq->queue->queuedata = mq;
  132. mq->req = NULL;
  133. mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
  134. GFP_KERNEL);
  135. if (!mq->sg) {
  136. ret = -ENOMEM;
  137. goto cleanup_queue;
  138. }
  139. init_MUTEX(&mq->thread_sem);
  140. mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
  141. if (IS_ERR(mq->thread)) {
  142. ret = PTR_ERR(mq->thread);
  143. goto free_sg;
  144. }
  145. return 0;
  146. free_sg:
  147. kfree(mq->sg);
  148. mq->sg = NULL;
  149. cleanup_queue:
  150. blk_cleanup_queue(mq->queue);
  151. return ret;
  152. }
  153. EXPORT_SYMBOL(mmc_init_queue);
  154. void mmc_cleanup_queue(struct mmc_queue *mq)
  155. {
  156. request_queue_t *q = mq->queue;
  157. unsigned long flags;
  158. /* Mark that we should start throwing out stragglers */
  159. spin_lock_irqsave(q->queue_lock, flags);
  160. q->queuedata = NULL;
  161. spin_unlock_irqrestore(q->queue_lock, flags);
  162. /* Then terminate our worker thread */
  163. kthread_stop(mq->thread);
  164. kfree(mq->sg);
  165. mq->sg = NULL;
  166. blk_cleanup_queue(mq->queue);
  167. mq->card = NULL;
  168. }
  169. EXPORT_SYMBOL(mmc_cleanup_queue);
  170. /**
  171. * mmc_queue_suspend - suspend a MMC request queue
  172. * @mq: MMC queue to suspend
  173. *
  174. * Stop the block request queue, and wait for our thread to
  175. * complete any outstanding requests. This ensures that we
  176. * won't suspend while a request is being processed.
  177. */
  178. void mmc_queue_suspend(struct mmc_queue *mq)
  179. {
  180. request_queue_t *q = mq->queue;
  181. unsigned long flags;
  182. if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
  183. mq->flags |= MMC_QUEUE_SUSPENDED;
  184. spin_lock_irqsave(q->queue_lock, flags);
  185. blk_stop_queue(q);
  186. spin_unlock_irqrestore(q->queue_lock, flags);
  187. down(&mq->thread_sem);
  188. }
  189. }
  190. EXPORT_SYMBOL(mmc_queue_suspend);
  191. /**
  192. * mmc_queue_resume - resume a previously suspended MMC request queue
  193. * @mq: MMC queue to resume
  194. */
  195. void mmc_queue_resume(struct mmc_queue *mq)
  196. {
  197. request_queue_t *q = mq->queue;
  198. unsigned long flags;
  199. if (mq->flags & MMC_QUEUE_SUSPENDED) {
  200. mq->flags &= ~MMC_QUEUE_SUSPENDED;
  201. up(&mq->thread_sem);
  202. spin_lock_irqsave(q->queue_lock, flags);
  203. blk_start_queue(q);
  204. spin_unlock_irqrestore(q->queue_lock, flags);
  205. }
  206. }
  207. EXPORT_SYMBOL(mmc_queue_resume);