blk-settings.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810
  1. /*
  2. * Functions related to setting various queue properties from drivers
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/gcd.h>
  11. #include <linux/jiffies.h>
  12. #include "blk.h"
  13. unsigned long blk_max_low_pfn;
  14. EXPORT_SYMBOL(blk_max_low_pfn);
  15. unsigned long blk_max_pfn;
  16. /**
  17. * blk_queue_prep_rq - set a prepare_request function for queue
  18. * @q: queue
  19. * @pfn: prepare_request function
  20. *
  21. * It's possible for a queue to register a prepare_request callback which
  22. * is invoked before the request is handed to the request_fn. The goal of
  23. * the function is to prepare a request for I/O, it can be used to build a
  24. * cdb from the request data for instance.
  25. *
  26. */
  27. void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  28. {
  29. q->prep_rq_fn = pfn;
  30. }
  31. EXPORT_SYMBOL(blk_queue_prep_rq);
  32. /**
  33. * blk_queue_merge_bvec - set a merge_bvec function for queue
  34. * @q: queue
  35. * @mbfn: merge_bvec_fn
  36. *
  37. * Usually queues have static limitations on the max sectors or segments that
  38. * we can put in a request. Stacking drivers may have some settings that
  39. * are dynamic, and thus we have to query the queue whether it is ok to
  40. * add a new bio_vec to a bio at a given offset or not. If the block device
  41. * has such limitations, it needs to register a merge_bvec_fn to control
  42. * the size of bio's sent to it. Note that a block device *must* allow a
  43. * single page to be added to an empty bio. The block device driver may want
  44. * to use the bio_split() function to deal with these bio's. By default
  45. * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  46. * honored.
  47. */
  48. void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  49. {
  50. q->merge_bvec_fn = mbfn;
  51. }
  52. EXPORT_SYMBOL(blk_queue_merge_bvec);
  53. void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  54. {
  55. q->softirq_done_fn = fn;
  56. }
  57. EXPORT_SYMBOL(blk_queue_softirq_done);
  58. void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  59. {
  60. q->rq_timeout = timeout;
  61. }
  62. EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  63. void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  64. {
  65. q->rq_timed_out_fn = fn;
  66. }
  67. EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  68. void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  69. {
  70. q->lld_busy_fn = fn;
  71. }
  72. EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
  73. /**
  74. * blk_set_default_limits - reset limits to default values
  75. * @lim: the queue_limits structure to reset
  76. *
  77. * Description:
  78. * Returns a queue_limit struct to its default state. Can be used by
  79. * stacking drivers like DM that stage table swaps and reuse an
  80. * existing device queue.
  81. */
  82. void blk_set_default_limits(struct queue_limits *lim)
  83. {
  84. lim->max_phys_segments = MAX_PHYS_SEGMENTS;
  85. lim->max_hw_segments = MAX_HW_SEGMENTS;
  86. lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  87. lim->max_segment_size = MAX_SEGMENT_SIZE;
  88. lim->max_sectors = BLK_DEF_MAX_SECTORS;
  89. lim->max_hw_sectors = INT_MAX;
  90. lim->max_discard_sectors = 0;
  91. lim->discard_granularity = 0;
  92. lim->discard_alignment = 0;
  93. lim->discard_misaligned = 0;
  94. lim->discard_zeroes_data = -1;
  95. lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
  96. lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
  97. lim->alignment_offset = 0;
  98. lim->io_opt = 0;
  99. lim->misaligned = 0;
  100. lim->no_cluster = 0;
  101. }
  102. EXPORT_SYMBOL(blk_set_default_limits);
  103. /**
  104. * blk_queue_make_request - define an alternate make_request function for a device
  105. * @q: the request queue for the device to be affected
  106. * @mfn: the alternate make_request function
  107. *
  108. * Description:
  109. * The normal way for &struct bios to be passed to a device
  110. * driver is for them to be collected into requests on a request
  111. * queue, and then to allow the device driver to select requests
  112. * off that queue when it is ready. This works well for many block
  113. * devices. However some block devices (typically virtual devices
  114. * such as md or lvm) do not benefit from the processing on the
  115. * request queue, and are served best by having the requests passed
  116. * directly to them. This can be achieved by providing a function
  117. * to blk_queue_make_request().
  118. *
  119. * Caveat:
  120. * The driver that does this *must* be able to deal appropriately
  121. * with buffers in "highmemory". This can be accomplished by either calling
  122. * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  123. * blk_queue_bounce() to create a buffer in normal memory.
  124. **/
  125. void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
  126. {
  127. /*
  128. * set defaults
  129. */
  130. q->nr_requests = BLKDEV_MAX_RQ;
  131. q->make_request_fn = mfn;
  132. blk_queue_dma_alignment(q, 511);
  133. blk_queue_congestion_threshold(q);
  134. q->nr_batching = BLK_BATCH_REQ;
  135. q->unplug_thresh = 4; /* hmm */
  136. q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
  137. if (q->unplug_delay == 0)
  138. q->unplug_delay = 1;
  139. q->unplug_timer.function = blk_unplug_timeout;
  140. q->unplug_timer.data = (unsigned long)q;
  141. blk_set_default_limits(&q->limits);
  142. blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
  143. /*
  144. * If the caller didn't supply a lock, fall back to our embedded
  145. * per-queue locks
  146. */
  147. if (!q->queue_lock)
  148. q->queue_lock = &q->__queue_lock;
  149. /*
  150. * by default assume old behaviour and bounce for any highmem page
  151. */
  152. blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  153. }
  154. EXPORT_SYMBOL(blk_queue_make_request);
  155. /**
  156. * blk_queue_bounce_limit - set bounce buffer limit for queue
  157. * @q: the request queue for the device
  158. * @dma_mask: the maximum address the device can handle
  159. *
  160. * Description:
  161. * Different hardware can have different requirements as to what pages
  162. * it can do I/O directly to. A low level driver can call
  163. * blk_queue_bounce_limit to have lower memory pages allocated as bounce
  164. * buffers for doing I/O to pages residing above @dma_mask.
  165. **/
  166. void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
  167. {
  168. unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
  169. int dma = 0;
  170. q->bounce_gfp = GFP_NOIO;
  171. #if BITS_PER_LONG == 64
  172. /*
  173. * Assume anything <= 4GB can be handled by IOMMU. Actually
  174. * some IOMMUs can handle everything, but I don't know of a
  175. * way to test this here.
  176. */
  177. if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
  178. dma = 1;
  179. q->limits.bounce_pfn = max_low_pfn;
  180. #else
  181. if (b_pfn < blk_max_low_pfn)
  182. dma = 1;
  183. q->limits.bounce_pfn = b_pfn;
  184. #endif
  185. if (dma) {
  186. init_emergency_isa_pool();
  187. q->bounce_gfp = GFP_NOIO | GFP_DMA;
  188. q->limits.bounce_pfn = b_pfn;
  189. }
  190. }
  191. EXPORT_SYMBOL(blk_queue_bounce_limit);
  192. /**
  193. * blk_queue_max_sectors - set max sectors for a request for this queue
  194. * @q: the request queue for the device
  195. * @max_sectors: max sectors in the usual 512b unit
  196. *
  197. * Description:
  198. * Enables a low level driver to set an upper limit on the size of
  199. * received requests.
  200. **/
  201. void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
  202. {
  203. if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
  204. max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
  205. printk(KERN_INFO "%s: set to minimum %d\n",
  206. __func__, max_sectors);
  207. }
  208. if (BLK_DEF_MAX_SECTORS > max_sectors)
  209. q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
  210. else {
  211. q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
  212. q->limits.max_hw_sectors = max_sectors;
  213. }
  214. }
  215. EXPORT_SYMBOL(blk_queue_max_sectors);
  216. void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
  217. {
  218. if (BLK_DEF_MAX_SECTORS > max_sectors)
  219. q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
  220. else
  221. q->limits.max_hw_sectors = max_sectors;
  222. }
  223. EXPORT_SYMBOL(blk_queue_max_hw_sectors);
  224. /**
  225. * blk_queue_max_discard_sectors - set max sectors for a single discard
  226. * @q: the request queue for the device
  227. * @max_discard_sectors: maximum number of sectors to discard
  228. **/
  229. void blk_queue_max_discard_sectors(struct request_queue *q,
  230. unsigned int max_discard_sectors)
  231. {
  232. q->limits.max_discard_sectors = max_discard_sectors;
  233. }
  234. EXPORT_SYMBOL(blk_queue_max_discard_sectors);
  235. /**
  236. * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  237. * @q: the request queue for the device
  238. * @max_segments: max number of segments
  239. *
  240. * Description:
  241. * Enables a low level driver to set an upper limit on the number of
  242. * physical data segments in a request. This would be the largest sized
  243. * scatter list the driver could handle.
  244. **/
  245. void blk_queue_max_phys_segments(struct request_queue *q,
  246. unsigned short max_segments)
  247. {
  248. if (!max_segments) {
  249. max_segments = 1;
  250. printk(KERN_INFO "%s: set to minimum %d\n",
  251. __func__, max_segments);
  252. }
  253. q->limits.max_phys_segments = max_segments;
  254. }
  255. EXPORT_SYMBOL(blk_queue_max_phys_segments);
  256. /**
  257. * blk_queue_max_hw_segments - set max hw segments for a request for this queue
  258. * @q: the request queue for the device
  259. * @max_segments: max number of segments
  260. *
  261. * Description:
  262. * Enables a low level driver to set an upper limit on the number of
  263. * hw data segments in a request. This would be the largest number of
  264. * address/length pairs the host adapter can actually give at once
  265. * to the device.
  266. **/
  267. void blk_queue_max_hw_segments(struct request_queue *q,
  268. unsigned short max_segments)
  269. {
  270. if (!max_segments) {
  271. max_segments = 1;
  272. printk(KERN_INFO "%s: set to minimum %d\n",
  273. __func__, max_segments);
  274. }
  275. q->limits.max_hw_segments = max_segments;
  276. }
  277. EXPORT_SYMBOL(blk_queue_max_hw_segments);
  278. /**
  279. * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
  280. * @q: the request queue for the device
  281. * @max_size: max size of segment in bytes
  282. *
  283. * Description:
  284. * Enables a low level driver to set an upper limit on the size of a
  285. * coalesced segment
  286. **/
  287. void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  288. {
  289. if (max_size < PAGE_CACHE_SIZE) {
  290. max_size = PAGE_CACHE_SIZE;
  291. printk(KERN_INFO "%s: set to minimum %d\n",
  292. __func__, max_size);
  293. }
  294. q->limits.max_segment_size = max_size;
  295. }
  296. EXPORT_SYMBOL(blk_queue_max_segment_size);
  297. /**
  298. * blk_queue_logical_block_size - set logical block size for the queue
  299. * @q: the request queue for the device
  300. * @size: the logical block size, in bytes
  301. *
  302. * Description:
  303. * This should be set to the lowest possible block size that the
  304. * storage device can address. The default of 512 covers most
  305. * hardware.
  306. **/
  307. void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
  308. {
  309. q->limits.logical_block_size = size;
  310. if (q->limits.physical_block_size < size)
  311. q->limits.physical_block_size = size;
  312. if (q->limits.io_min < q->limits.physical_block_size)
  313. q->limits.io_min = q->limits.physical_block_size;
  314. }
  315. EXPORT_SYMBOL(blk_queue_logical_block_size);
  316. /**
  317. * blk_queue_physical_block_size - set physical block size for the queue
  318. * @q: the request queue for the device
  319. * @size: the physical block size, in bytes
  320. *
  321. * Description:
  322. * This should be set to the lowest possible sector size that the
  323. * hardware can operate on without reverting to read-modify-write
  324. * operations.
  325. */
  326. void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
  327. {
  328. q->limits.physical_block_size = size;
  329. if (q->limits.physical_block_size < q->limits.logical_block_size)
  330. q->limits.physical_block_size = q->limits.logical_block_size;
  331. if (q->limits.io_min < q->limits.physical_block_size)
  332. q->limits.io_min = q->limits.physical_block_size;
  333. }
  334. EXPORT_SYMBOL(blk_queue_physical_block_size);
  335. /**
  336. * blk_queue_alignment_offset - set physical block alignment offset
  337. * @q: the request queue for the device
  338. * @offset: alignment offset in bytes
  339. *
  340. * Description:
  341. * Some devices are naturally misaligned to compensate for things like
  342. * the legacy DOS partition table 63-sector offset. Low-level drivers
  343. * should call this function for devices whose first sector is not
  344. * naturally aligned.
  345. */
  346. void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
  347. {
  348. q->limits.alignment_offset =
  349. offset & (q->limits.physical_block_size - 1);
  350. q->limits.misaligned = 0;
  351. }
  352. EXPORT_SYMBOL(blk_queue_alignment_offset);
  353. /**
  354. * blk_limits_io_min - set minimum request size for a device
  355. * @limits: the queue limits
  356. * @min: smallest I/O size in bytes
  357. *
  358. * Description:
  359. * Some devices have an internal block size bigger than the reported
  360. * hardware sector size. This function can be used to signal the
  361. * smallest I/O the device can perform without incurring a performance
  362. * penalty.
  363. */
  364. void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
  365. {
  366. limits->io_min = min;
  367. if (limits->io_min < limits->logical_block_size)
  368. limits->io_min = limits->logical_block_size;
  369. if (limits->io_min < limits->physical_block_size)
  370. limits->io_min = limits->physical_block_size;
  371. }
  372. EXPORT_SYMBOL(blk_limits_io_min);
  373. /**
  374. * blk_queue_io_min - set minimum request size for the queue
  375. * @q: the request queue for the device
  376. * @min: smallest I/O size in bytes
  377. *
  378. * Description:
  379. * Storage devices may report a granularity or preferred minimum I/O
  380. * size which is the smallest request the device can perform without
  381. * incurring a performance penalty. For disk drives this is often the
  382. * physical block size. For RAID arrays it is often the stripe chunk
  383. * size. A properly aligned multiple of minimum_io_size is the
  384. * preferred request size for workloads where a high number of I/O
  385. * operations is desired.
  386. */
  387. void blk_queue_io_min(struct request_queue *q, unsigned int min)
  388. {
  389. blk_limits_io_min(&q->limits, min);
  390. }
  391. EXPORT_SYMBOL(blk_queue_io_min);
  392. /**
  393. * blk_limits_io_opt - set optimal request size for a device
  394. * @limits: the queue limits
  395. * @opt: smallest I/O size in bytes
  396. *
  397. * Description:
  398. * Storage devices may report an optimal I/O size, which is the
  399. * device's preferred unit for sustained I/O. This is rarely reported
  400. * for disk drives. For RAID arrays it is usually the stripe width or
  401. * the internal track size. A properly aligned multiple of
  402. * optimal_io_size is the preferred request size for workloads where
  403. * sustained throughput is desired.
  404. */
  405. void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
  406. {
  407. limits->io_opt = opt;
  408. }
  409. EXPORT_SYMBOL(blk_limits_io_opt);
  410. /**
  411. * blk_queue_io_opt - set optimal request size for the queue
  412. * @q: the request queue for the device
  413. * @opt: optimal request size in bytes
  414. *
  415. * Description:
  416. * Storage devices may report an optimal I/O size, which is the
  417. * device's preferred unit for sustained I/O. This is rarely reported
  418. * for disk drives. For RAID arrays it is usually the stripe width or
  419. * the internal track size. A properly aligned multiple of
  420. * optimal_io_size is the preferred request size for workloads where
  421. * sustained throughput is desired.
  422. */
  423. void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
  424. {
  425. blk_limits_io_opt(&q->limits, opt);
  426. }
  427. EXPORT_SYMBOL(blk_queue_io_opt);
  428. /*
  429. * Returns the minimum that is _not_ zero, unless both are zero.
  430. */
  431. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  432. /**
  433. * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
  434. * @t: the stacking driver (top)
  435. * @b: the underlying device (bottom)
  436. **/
  437. void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  438. {
  439. blk_stack_limits(&t->limits, &b->limits, 0);
  440. if (!t->queue_lock)
  441. WARN_ON_ONCE(1);
  442. else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  443. unsigned long flags;
  444. spin_lock_irqsave(t->queue_lock, flags);
  445. queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
  446. spin_unlock_irqrestore(t->queue_lock, flags);
  447. }
  448. }
  449. EXPORT_SYMBOL(blk_queue_stack_limits);
  450. static unsigned int lcm(unsigned int a, unsigned int b)
  451. {
  452. if (a && b)
  453. return (a * b) / gcd(a, b);
  454. else if (b)
  455. return b;
  456. return a;
  457. }
  458. /**
  459. * blk_stack_limits - adjust queue_limits for stacked devices
  460. * @t: the stacking driver limits (top device)
  461. * @b: the underlying queue limits (bottom, component device)
  462. * @offset: offset to beginning of data within component device
  463. *
  464. * Description:
  465. * This function is used by stacking drivers like MD and DM to ensure
  466. * that all component devices have compatible block sizes and
  467. * alignments. The stacking driver must provide a queue_limits
  468. * struct (top) and then iteratively call the stacking function for
  469. * all component (bottom) devices. The stacking function will
  470. * attempt to combine the values and ensure proper alignment.
  471. *
  472. * Returns 0 if the top and bottom queue_limits are compatible. The
  473. * top device's block sizes and alignment offsets may be adjusted to
  474. * ensure alignment with the bottom device. If no compatible sizes
  475. * and alignments exist, -1 is returned and the resulting top
  476. * queue_limits will have the misaligned flag set to indicate that
  477. * the alignment_offset is undefined.
  478. */
  479. int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
  480. sector_t offset)
  481. {
  482. sector_t alignment;
  483. unsigned int top, bottom;
  484. t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  485. t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
  486. t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
  487. t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
  488. b->seg_boundary_mask);
  489. t->max_phys_segments = min_not_zero(t->max_phys_segments,
  490. b->max_phys_segments);
  491. t->max_hw_segments = min_not_zero(t->max_hw_segments,
  492. b->max_hw_segments);
  493. t->max_segment_size = min_not_zero(t->max_segment_size,
  494. b->max_segment_size);
  495. alignment = queue_limit_alignment_offset(b, offset);
  496. /* Bottom device has different alignment. Check that it is
  497. * compatible with the current top alignment.
  498. */
  499. if (t->alignment_offset != alignment) {
  500. top = max(t->physical_block_size, t->io_min)
  501. + t->alignment_offset;
  502. bottom = max(b->physical_block_size, b->io_min) + alignment;
  503. /* Verify that top and bottom intervals line up */
  504. if (max(top, bottom) & (min(top, bottom) - 1))
  505. t->misaligned = 1;
  506. }
  507. t->logical_block_size = max(t->logical_block_size,
  508. b->logical_block_size);
  509. t->physical_block_size = max(t->physical_block_size,
  510. b->physical_block_size);
  511. t->io_min = max(t->io_min, b->io_min);
  512. t->io_opt = lcm(t->io_opt, b->io_opt);
  513. t->no_cluster |= b->no_cluster;
  514. t->discard_zeroes_data &= b->discard_zeroes_data;
  515. /* Physical block size a multiple of the logical block size? */
  516. if (t->physical_block_size & (t->logical_block_size - 1)) {
  517. t->physical_block_size = t->logical_block_size;
  518. t->misaligned = 1;
  519. }
  520. /* Minimum I/O a multiple of the physical block size? */
  521. if (t->io_min & (t->physical_block_size - 1)) {
  522. t->io_min = t->physical_block_size;
  523. t->misaligned = 1;
  524. }
  525. /* Optimal I/O a multiple of the physical block size? */
  526. if (t->io_opt & (t->physical_block_size - 1)) {
  527. t->io_opt = 0;
  528. t->misaligned = 1;
  529. }
  530. /* Find lowest common alignment_offset */
  531. t->alignment_offset = lcm(t->alignment_offset, alignment)
  532. & (max(t->physical_block_size, t->io_min) - 1);
  533. /* Verify that new alignment_offset is on a logical block boundary */
  534. if (t->alignment_offset & (t->logical_block_size - 1))
  535. t->misaligned = 1;
  536. /* Discard alignment and granularity */
  537. if (b->discard_granularity) {
  538. unsigned int granularity = b->discard_granularity;
  539. offset &= granularity - 1;
  540. alignment = (granularity + b->discard_alignment - offset)
  541. & (granularity - 1);
  542. if (t->discard_granularity != 0 &&
  543. t->discard_alignment != alignment) {
  544. top = t->discard_granularity + t->discard_alignment;
  545. bottom = b->discard_granularity + alignment;
  546. /* Verify that top and bottom intervals line up */
  547. if (max(top, bottom) & (min(top, bottom) - 1))
  548. t->discard_misaligned = 1;
  549. }
  550. t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
  551. b->max_discard_sectors);
  552. t->discard_granularity = max(t->discard_granularity,
  553. b->discard_granularity);
  554. t->discard_alignment = lcm(t->discard_alignment, alignment) &
  555. (t->discard_granularity - 1);
  556. }
  557. return t->misaligned ? -1 : 0;
  558. }
  559. EXPORT_SYMBOL(blk_stack_limits);
  560. /**
  561. * disk_stack_limits - adjust queue limits for stacked drivers
  562. * @disk: MD/DM gendisk (top)
  563. * @bdev: the underlying block device (bottom)
  564. * @offset: offset to beginning of data within component device
  565. *
  566. * Description:
  567. * Merges the limits for two queues. Returns 0 if alignment
  568. * didn't change. Returns -1 if adding the bottom device caused
  569. * misalignment.
  570. */
  571. void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  572. sector_t offset)
  573. {
  574. struct request_queue *t = disk->queue;
  575. struct request_queue *b = bdev_get_queue(bdev);
  576. offset += get_start_sect(bdev) << 9;
  577. if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
  578. char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
  579. disk_name(disk, 0, top);
  580. bdevname(bdev, bottom);
  581. printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
  582. top, bottom);
  583. }
  584. if (!t->queue_lock)
  585. WARN_ON_ONCE(1);
  586. else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  587. unsigned long flags;
  588. spin_lock_irqsave(t->queue_lock, flags);
  589. if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
  590. queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
  591. spin_unlock_irqrestore(t->queue_lock, flags);
  592. }
  593. }
  594. EXPORT_SYMBOL(disk_stack_limits);
  595. /**
  596. * blk_queue_dma_pad - set pad mask
  597. * @q: the request queue for the device
  598. * @mask: pad mask
  599. *
  600. * Set dma pad mask.
  601. *
  602. * Appending pad buffer to a request modifies the last entry of a
  603. * scatter list such that it includes the pad buffer.
  604. **/
  605. void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  606. {
  607. q->dma_pad_mask = mask;
  608. }
  609. EXPORT_SYMBOL(blk_queue_dma_pad);
  610. /**
  611. * blk_queue_update_dma_pad - update pad mask
  612. * @q: the request queue for the device
  613. * @mask: pad mask
  614. *
  615. * Update dma pad mask.
  616. *
  617. * Appending pad buffer to a request modifies the last entry of a
  618. * scatter list such that it includes the pad buffer.
  619. **/
  620. void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  621. {
  622. if (mask > q->dma_pad_mask)
  623. q->dma_pad_mask = mask;
  624. }
  625. EXPORT_SYMBOL(blk_queue_update_dma_pad);
  626. /**
  627. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  628. * @q: the request queue for the device
  629. * @dma_drain_needed: fn which returns non-zero if drain is necessary
  630. * @buf: physically contiguous buffer
  631. * @size: size of the buffer in bytes
  632. *
  633. * Some devices have excess DMA problems and can't simply discard (or
  634. * zero fill) the unwanted piece of the transfer. They have to have a
  635. * real area of memory to transfer it into. The use case for this is
  636. * ATAPI devices in DMA mode. If the packet command causes a transfer
  637. * bigger than the transfer size some HBAs will lock up if there
  638. * aren't DMA elements to contain the excess transfer. What this API
  639. * does is adjust the queue so that the buf is always appended
  640. * silently to the scatterlist.
  641. *
  642. * Note: This routine adjusts max_hw_segments to make room for
  643. * appending the drain buffer. If you call
  644. * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
  645. * calling this routine, you must set the limit to one fewer than your
  646. * device can support otherwise there won't be room for the drain
  647. * buffer.
  648. */
  649. int blk_queue_dma_drain(struct request_queue *q,
  650. dma_drain_needed_fn *dma_drain_needed,
  651. void *buf, unsigned int size)
  652. {
  653. if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
  654. return -EINVAL;
  655. /* make room for appending the drain */
  656. blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
  657. blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
  658. q->dma_drain_needed = dma_drain_needed;
  659. q->dma_drain_buffer = buf;
  660. q->dma_drain_size = size;
  661. return 0;
  662. }
  663. EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  664. /**
  665. * blk_queue_segment_boundary - set boundary rules for segment merging
  666. * @q: the request queue for the device
  667. * @mask: the memory boundary mask
  668. **/
  669. void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  670. {
  671. if (mask < PAGE_CACHE_SIZE - 1) {
  672. mask = PAGE_CACHE_SIZE - 1;
  673. printk(KERN_INFO "%s: set to minimum %lx\n",
  674. __func__, mask);
  675. }
  676. q->limits.seg_boundary_mask = mask;
  677. }
  678. EXPORT_SYMBOL(blk_queue_segment_boundary);
  679. /**
  680. * blk_queue_dma_alignment - set dma length and memory alignment
  681. * @q: the request queue for the device
  682. * @mask: alignment mask
  683. *
  684. * description:
  685. * set required memory and length alignment for direct dma transactions.
  686. * this is used when building direct io requests for the queue.
  687. *
  688. **/
  689. void blk_queue_dma_alignment(struct request_queue *q, int mask)
  690. {
  691. q->dma_alignment = mask;
  692. }
  693. EXPORT_SYMBOL(blk_queue_dma_alignment);
  694. /**
  695. * blk_queue_update_dma_alignment - update dma length and memory alignment
  696. * @q: the request queue for the device
  697. * @mask: alignment mask
  698. *
  699. * description:
  700. * update required memory and length alignment for direct dma transactions.
  701. * If the requested alignment is larger than the current alignment, then
  702. * the current queue alignment is updated to the new value, otherwise it
  703. * is left alone. The design of this is to allow multiple objects
  704. * (driver, device, transport etc) to set their respective
  705. * alignments without having them interfere.
  706. *
  707. **/
  708. void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  709. {
  710. BUG_ON(mask > PAGE_SIZE);
  711. if (mask > q->dma_alignment)
  712. q->dma_alignment = mask;
  713. }
  714. EXPORT_SYMBOL(blk_queue_update_dma_alignment);
  715. static int __init blk_settings_init(void)
  716. {
  717. blk_max_low_pfn = max_low_pfn - 1;
  718. blk_max_pfn = max_pfn - 1;
  719. return 0;
  720. }
  721. subsys_initcall(blk_settings_init);