blk-settings.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. /*
  2. * Functions related to setting various queue properties from drivers
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include "blk.h"
  11. unsigned long blk_max_low_pfn;
  12. EXPORT_SYMBOL(blk_max_low_pfn);
  13. unsigned long blk_max_pfn;
  14. /**
  15. * blk_queue_prep_rq - set a prepare_request function for queue
  16. * @q: queue
  17. * @pfn: prepare_request function
  18. *
  19. * It's possible for a queue to register a prepare_request callback which
  20. * is invoked before the request is handed to the request_fn. The goal of
  21. * the function is to prepare a request for I/O, it can be used to build a
  22. * cdb from the request data for instance.
  23. *
  24. */
  25. void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  26. {
  27. q->prep_rq_fn = pfn;
  28. }
  29. EXPORT_SYMBOL(blk_queue_prep_rq);
  30. /**
  31. * blk_queue_set_discard - set a discard_sectors function for queue
  32. * @q: queue
  33. * @dfn: prepare_discard function
  34. *
  35. * It's possible for a queue to register a discard callback which is used
  36. * to transform a discard request into the appropriate type for the
  37. * hardware. If none is registered, then discard requests are failed
  38. * with %EOPNOTSUPP.
  39. *
  40. */
  41. void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
  42. {
  43. q->prepare_discard_fn = dfn;
  44. }
  45. EXPORT_SYMBOL(blk_queue_set_discard);
  46. /**
  47. * blk_queue_merge_bvec - set a merge_bvec function for queue
  48. * @q: queue
  49. * @mbfn: merge_bvec_fn
  50. *
  51. * Usually queues have static limitations on the max sectors or segments that
  52. * we can put in a request. Stacking drivers may have some settings that
  53. * are dynamic, and thus we have to query the queue whether it is ok to
  54. * add a new bio_vec to a bio at a given offset or not. If the block device
  55. * has such limitations, it needs to register a merge_bvec_fn to control
  56. * the size of bio's sent to it. Note that a block device *must* allow a
  57. * single page to be added to an empty bio. The block device driver may want
  58. * to use the bio_split() function to deal with these bio's. By default
  59. * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  60. * honored.
  61. */
  62. void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  63. {
  64. q->merge_bvec_fn = mbfn;
  65. }
  66. EXPORT_SYMBOL(blk_queue_merge_bvec);
  67. void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  68. {
  69. q->softirq_done_fn = fn;
  70. }
  71. EXPORT_SYMBOL(blk_queue_softirq_done);
  72. void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  73. {
  74. q->rq_timeout = timeout;
  75. }
  76. EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  77. void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  78. {
  79. q->rq_timed_out_fn = fn;
  80. }
  81. EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  82. void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  83. {
  84. q->lld_busy_fn = fn;
  85. }
  86. EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
  87. /**
  88. * blk_queue_make_request - define an alternate make_request function for a device
  89. * @q: the request queue for the device to be affected
  90. * @mfn: the alternate make_request function
  91. *
  92. * Description:
  93. * The normal way for &struct bios to be passed to a device
  94. * driver is for them to be collected into requests on a request
  95. * queue, and then to allow the device driver to select requests
  96. * off that queue when it is ready. This works well for many block
  97. * devices. However some block devices (typically virtual devices
  98. * such as md or lvm) do not benefit from the processing on the
  99. * request queue, and are served best by having the requests passed
  100. * directly to them. This can be achieved by providing a function
  101. * to blk_queue_make_request().
  102. *
  103. * Caveat:
  104. * The driver that does this *must* be able to deal appropriately
  105. * with buffers in "highmemory". This can be accomplished by either calling
  106. * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  107. * blk_queue_bounce() to create a buffer in normal memory.
  108. **/
  109. void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
  110. {
  111. /*
  112. * set defaults
  113. */
  114. q->nr_requests = BLKDEV_MAX_RQ;
  115. blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
  116. blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
  117. blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
  118. blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
  119. q->make_request_fn = mfn;
  120. q->backing_dev_info.ra_pages =
  121. (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
  122. q->backing_dev_info.state = 0;
  123. q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
  124. blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
  125. blk_queue_hardsect_size(q, 512);
  126. blk_queue_dma_alignment(q, 511);
  127. blk_queue_congestion_threshold(q);
  128. q->nr_batching = BLK_BATCH_REQ;
  129. q->unplug_thresh = 4; /* hmm */
  130. q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
  131. if (q->unplug_delay == 0)
  132. q->unplug_delay = 1;
  133. q->unplug_timer.function = blk_unplug_timeout;
  134. q->unplug_timer.data = (unsigned long)q;
  135. /*
  136. * by default assume old behaviour and bounce for any highmem page
  137. */
  138. blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  139. }
  140. EXPORT_SYMBOL(blk_queue_make_request);
  141. /**
  142. * blk_queue_bounce_limit - set bounce buffer limit for queue
  143. * @q: the request queue for the device
  144. * @dma_mask: the maximum address the device can handle
  145. *
  146. * Description:
  147. * Different hardware can have different requirements as to what pages
  148. * it can do I/O directly to. A low level driver can call
  149. * blk_queue_bounce_limit to have lower memory pages allocated as bounce
  150. * buffers for doing I/O to pages residing above @dma_mask.
  151. **/
  152. void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
  153. {
  154. unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
  155. int dma = 0;
  156. q->bounce_gfp = GFP_NOIO;
  157. #if BITS_PER_LONG == 64
  158. /*
  159. * Assume anything <= 4GB can be handled by IOMMU. Actually
  160. * some IOMMUs can handle everything, but I don't know of a
  161. * way to test this here.
  162. */
  163. if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
  164. dma = 1;
  165. q->bounce_pfn = max_low_pfn;
  166. #else
  167. if (b_pfn < blk_max_low_pfn)
  168. dma = 1;
  169. q->bounce_pfn = b_pfn;
  170. #endif
  171. if (dma) {
  172. init_emergency_isa_pool();
  173. q->bounce_gfp = GFP_NOIO | GFP_DMA;
  174. q->bounce_pfn = b_pfn;
  175. }
  176. }
  177. EXPORT_SYMBOL(blk_queue_bounce_limit);
  178. /**
  179. * blk_queue_max_sectors - set max sectors for a request for this queue
  180. * @q: the request queue for the device
  181. * @max_sectors: max sectors in the usual 512b unit
  182. *
  183. * Description:
  184. * Enables a low level driver to set an upper limit on the size of
  185. * received requests.
  186. **/
  187. void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
  188. {
  189. if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
  190. max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
  191. printk(KERN_INFO "%s: set to minimum %d\n",
  192. __func__, max_sectors);
  193. }
  194. if (BLK_DEF_MAX_SECTORS > max_sectors)
  195. q->max_hw_sectors = q->max_sectors = max_sectors;
  196. else {
  197. q->max_sectors = BLK_DEF_MAX_SECTORS;
  198. q->max_hw_sectors = max_sectors;
  199. }
  200. }
  201. EXPORT_SYMBOL(blk_queue_max_sectors);
  202. /**
  203. * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  204. * @q: the request queue for the device
  205. * @max_segments: max number of segments
  206. *
  207. * Description:
  208. * Enables a low level driver to set an upper limit on the number of
  209. * physical data segments in a request. This would be the largest sized
  210. * scatter list the driver could handle.
  211. **/
  212. void blk_queue_max_phys_segments(struct request_queue *q,
  213. unsigned short max_segments)
  214. {
  215. if (!max_segments) {
  216. max_segments = 1;
  217. printk(KERN_INFO "%s: set to minimum %d\n",
  218. __func__, max_segments);
  219. }
  220. q->max_phys_segments = max_segments;
  221. }
  222. EXPORT_SYMBOL(blk_queue_max_phys_segments);
  223. /**
  224. * blk_queue_max_hw_segments - set max hw segments for a request for this queue
  225. * @q: the request queue for the device
  226. * @max_segments: max number of segments
  227. *
  228. * Description:
  229. * Enables a low level driver to set an upper limit on the number of
  230. * hw data segments in a request. This would be the largest number of
  231. * address/length pairs the host adapter can actually give at once
  232. * to the device.
  233. **/
  234. void blk_queue_max_hw_segments(struct request_queue *q,
  235. unsigned short max_segments)
  236. {
  237. if (!max_segments) {
  238. max_segments = 1;
  239. printk(KERN_INFO "%s: set to minimum %d\n",
  240. __func__, max_segments);
  241. }
  242. q->max_hw_segments = max_segments;
  243. }
  244. EXPORT_SYMBOL(blk_queue_max_hw_segments);
  245. /**
  246. * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
  247. * @q: the request queue for the device
  248. * @max_size: max size of segment in bytes
  249. *
  250. * Description:
  251. * Enables a low level driver to set an upper limit on the size of a
  252. * coalesced segment
  253. **/
  254. void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  255. {
  256. if (max_size < PAGE_CACHE_SIZE) {
  257. max_size = PAGE_CACHE_SIZE;
  258. printk(KERN_INFO "%s: set to minimum %d\n",
  259. __func__, max_size);
  260. }
  261. q->max_segment_size = max_size;
  262. }
  263. EXPORT_SYMBOL(blk_queue_max_segment_size);
  264. /**
  265. * blk_queue_hardsect_size - set hardware sector size for the queue
  266. * @q: the request queue for the device
  267. * @size: the hardware sector size, in bytes
  268. *
  269. * Description:
  270. * This should typically be set to the lowest possible sector size
  271. * that the hardware can operate on (possible without reverting to
  272. * even internal read-modify-write operations). Usually the default
  273. * of 512 covers most hardware.
  274. **/
  275. void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
  276. {
  277. q->hardsect_size = size;
  278. }
  279. EXPORT_SYMBOL(blk_queue_hardsect_size);
  280. /*
  281. * Returns the minimum that is _not_ zero, unless both are zero.
  282. */
  283. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  284. /**
  285. * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
  286. * @t: the stacking driver (top)
  287. * @b: the underlying device (bottom)
  288. **/
  289. void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  290. {
  291. /* zero is "infinity" */
  292. t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  293. t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
  294. t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
  295. t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
  296. t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
  297. t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
  298. t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
  299. if (!t->queue_lock)
  300. WARN_ON_ONCE(1);
  301. else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  302. unsigned long flags;
  303. spin_lock_irqsave(t->queue_lock, flags);
  304. queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
  305. spin_unlock_irqrestore(t->queue_lock, flags);
  306. }
  307. }
  308. EXPORT_SYMBOL(blk_queue_stack_limits);
  309. /**
  310. * blk_queue_dma_pad - set pad mask
  311. * @q: the request queue for the device
  312. * @mask: pad mask
  313. *
  314. * Set dma pad mask.
  315. *
  316. * Appending pad buffer to a request modifies the last entry of a
  317. * scatter list such that it includes the pad buffer.
  318. **/
  319. void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  320. {
  321. q->dma_pad_mask = mask;
  322. }
  323. EXPORT_SYMBOL(blk_queue_dma_pad);
  324. /**
  325. * blk_queue_update_dma_pad - update pad mask
  326. * @q: the request queue for the device
  327. * @mask: pad mask
  328. *
  329. * Update dma pad mask.
  330. *
  331. * Appending pad buffer to a request modifies the last entry of a
  332. * scatter list such that it includes the pad buffer.
  333. **/
  334. void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  335. {
  336. if (mask > q->dma_pad_mask)
  337. q->dma_pad_mask = mask;
  338. }
  339. EXPORT_SYMBOL(blk_queue_update_dma_pad);
  340. /**
  341. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  342. * @q: the request queue for the device
  343. * @dma_drain_needed: fn which returns non-zero if drain is necessary
  344. * @buf: physically contiguous buffer
  345. * @size: size of the buffer in bytes
  346. *
  347. * Some devices have excess DMA problems and can't simply discard (or
  348. * zero fill) the unwanted piece of the transfer. They have to have a
  349. * real area of memory to transfer it into. The use case for this is
  350. * ATAPI devices in DMA mode. If the packet command causes a transfer
  351. * bigger than the transfer size some HBAs will lock up if there
  352. * aren't DMA elements to contain the excess transfer. What this API
  353. * does is adjust the queue so that the buf is always appended
  354. * silently to the scatterlist.
  355. *
  356. * Note: This routine adjusts max_hw_segments to make room for
  357. * appending the drain buffer. If you call
  358. * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
  359. * calling this routine, you must set the limit to one fewer than your
  360. * device can support otherwise there won't be room for the drain
  361. * buffer.
  362. */
  363. int blk_queue_dma_drain(struct request_queue *q,
  364. dma_drain_needed_fn *dma_drain_needed,
  365. void *buf, unsigned int size)
  366. {
  367. if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
  368. return -EINVAL;
  369. /* make room for appending the drain */
  370. --q->max_hw_segments;
  371. --q->max_phys_segments;
  372. q->dma_drain_needed = dma_drain_needed;
  373. q->dma_drain_buffer = buf;
  374. q->dma_drain_size = size;
  375. return 0;
  376. }
  377. EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  378. /**
  379. * blk_queue_segment_boundary - set boundary rules for segment merging
  380. * @q: the request queue for the device
  381. * @mask: the memory boundary mask
  382. **/
  383. void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  384. {
  385. if (mask < PAGE_CACHE_SIZE - 1) {
  386. mask = PAGE_CACHE_SIZE - 1;
  387. printk(KERN_INFO "%s: set to minimum %lx\n",
  388. __func__, mask);
  389. }
  390. q->seg_boundary_mask = mask;
  391. }
  392. EXPORT_SYMBOL(blk_queue_segment_boundary);
  393. /**
  394. * blk_queue_dma_alignment - set dma length and memory alignment
  395. * @q: the request queue for the device
  396. * @mask: alignment mask
  397. *
  398. * description:
  399. * set required memory and length alignment for direct dma transactions.
  400. * this is used when building direct io requests for the queue.
  401. *
  402. **/
  403. void blk_queue_dma_alignment(struct request_queue *q, int mask)
  404. {
  405. q->dma_alignment = mask;
  406. }
  407. EXPORT_SYMBOL(blk_queue_dma_alignment);
  408. /**
  409. * blk_queue_update_dma_alignment - update dma length and memory alignment
  410. * @q: the request queue for the device
  411. * @mask: alignment mask
  412. *
  413. * description:
  414. * update required memory and length alignment for direct dma transactions.
  415. * If the requested alignment is larger than the current alignment, then
  416. * the current queue alignment is updated to the new value, otherwise it
  417. * is left alone. The design of this is to allow multiple objects
  418. * (driver, device, transport etc) to set their respective
  419. * alignments without having them interfere.
  420. *
  421. **/
  422. void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  423. {
  424. BUG_ON(mask > PAGE_SIZE);
  425. if (mask > q->dma_alignment)
  426. q->dma_alignment = mask;
  427. }
  428. EXPORT_SYMBOL(blk_queue_update_dma_alignment);
  429. static int __init blk_settings_init(void)
  430. {
  431. blk_max_low_pfn = max_low_pfn - 1;
  432. blk_max_pfn = max_pfn - 1;
  433. return 0;
  434. }
  435. subsys_initcall(blk_settings_init);