blk-settings.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * Functions related to setting various queue properties from drivers
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/gcd.h>
  11. #include <linux/lcm.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/gfp.h>
  14. #include "blk.h"
  15. unsigned long blk_max_low_pfn;
  16. EXPORT_SYMBOL(blk_max_low_pfn);
  17. unsigned long blk_max_pfn;
  18. /**
  19. * blk_queue_prep_rq - set a prepare_request function for queue
  20. * @q: queue
  21. * @pfn: prepare_request function
  22. *
  23. * It's possible for a queue to register a prepare_request callback which
  24. * is invoked before the request is handed to the request_fn. The goal of
  25. * the function is to prepare a request for I/O, it can be used to build a
  26. * cdb from the request data for instance.
  27. *
  28. */
  29. void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  30. {
  31. q->prep_rq_fn = pfn;
  32. }
  33. EXPORT_SYMBOL(blk_queue_prep_rq);
  34. /**
  35. * blk_queue_unprep_rq - set an unprepare_request function for queue
  36. * @q: queue
  37. * @ufn: unprepare_request function
  38. *
  39. * It's possible for a queue to register an unprepare_request callback
  40. * which is invoked before the request is finally completed. The goal
  41. * of the function is to deallocate any data that was allocated in the
  42. * prepare_request callback.
  43. *
  44. */
  45. void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
  46. {
  47. q->unprep_rq_fn = ufn;
  48. }
  49. EXPORT_SYMBOL(blk_queue_unprep_rq);
  50. /**
  51. * blk_queue_merge_bvec - set a merge_bvec function for queue
  52. * @q: queue
  53. * @mbfn: merge_bvec_fn
  54. *
  55. * Usually queues have static limitations on the max sectors or segments that
  56. * we can put in a request. Stacking drivers may have some settings that
  57. * are dynamic, and thus we have to query the queue whether it is ok to
  58. * add a new bio_vec to a bio at a given offset or not. If the block device
  59. * has such limitations, it needs to register a merge_bvec_fn to control
  60. * the size of bio's sent to it. Note that a block device *must* allow a
  61. * single page to be added to an empty bio. The block device driver may want
  62. * to use the bio_split() function to deal with these bio's. By default
  63. * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  64. * honored.
  65. */
  66. void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  67. {
  68. q->merge_bvec_fn = mbfn;
  69. }
  70. EXPORT_SYMBOL(blk_queue_merge_bvec);
  71. void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  72. {
  73. q->softirq_done_fn = fn;
  74. }
  75. EXPORT_SYMBOL(blk_queue_softirq_done);
  76. void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  77. {
  78. q->rq_timeout = timeout;
  79. }
  80. EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  81. void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  82. {
  83. q->rq_timed_out_fn = fn;
  84. }
  85. EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  86. void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  87. {
  88. q->lld_busy_fn = fn;
  89. }
  90. EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
  91. /**
  92. * blk_set_default_limits - reset limits to default values
  93. * @lim: the queue_limits structure to reset
  94. *
  95. * Description:
  96. * Returns a queue_limit struct to its default state.
  97. */
  98. void blk_set_default_limits(struct queue_limits *lim)
  99. {
  100. lim->max_segments = BLK_MAX_SEGMENTS;
  101. lim->max_integrity_segments = 0;
  102. lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  103. lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
  104. lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
  105. lim->max_write_same_sectors = 0;
  106. lim->max_discard_sectors = 0;
  107. lim->discard_granularity = 0;
  108. lim->discard_alignment = 0;
  109. lim->discard_misaligned = 0;
  110. lim->discard_zeroes_data = 0;
  111. lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
  112. lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
  113. lim->alignment_offset = 0;
  114. lim->io_opt = 0;
  115. lim->misaligned = 0;
  116. lim->cluster = 1;
  117. }
  118. EXPORT_SYMBOL(blk_set_default_limits);
  119. /**
  120. * blk_set_stacking_limits - set default limits for stacking devices
  121. * @lim: the queue_limits structure to reset
  122. *
  123. * Description:
  124. * Returns a queue_limit struct to its default state. Should be used
  125. * by stacking drivers like DM that have no internal limits.
  126. */
  127. void blk_set_stacking_limits(struct queue_limits *lim)
  128. {
  129. blk_set_default_limits(lim);
  130. /* Inherit limits from component devices */
  131. lim->discard_zeroes_data = 1;
  132. lim->max_segments = USHRT_MAX;
  133. lim->max_hw_sectors = UINT_MAX;
  134. lim->max_sectors = UINT_MAX;
  135. lim->max_write_same_sectors = UINT_MAX;
  136. }
  137. EXPORT_SYMBOL(blk_set_stacking_limits);
  138. /**
  139. * blk_queue_make_request - define an alternate make_request function for a device
  140. * @q: the request queue for the device to be affected
  141. * @mfn: the alternate make_request function
  142. *
  143. * Description:
  144. * The normal way for &struct bios to be passed to a device
  145. * driver is for them to be collected into requests on a request
  146. * queue, and then to allow the device driver to select requests
  147. * off that queue when it is ready. This works well for many block
  148. * devices. However some block devices (typically virtual devices
  149. * such as md or lvm) do not benefit from the processing on the
  150. * request queue, and are served best by having the requests passed
  151. * directly to them. This can be achieved by providing a function
  152. * to blk_queue_make_request().
  153. *
  154. * Caveat:
  155. * The driver that does this *must* be able to deal appropriately
  156. * with buffers in "highmemory". This can be accomplished by either calling
  157. * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  158. * blk_queue_bounce() to create a buffer in normal memory.
  159. **/
  160. void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
  161. {
  162. /*
  163. * set defaults
  164. */
  165. q->nr_requests = BLKDEV_MAX_RQ;
  166. q->make_request_fn = mfn;
  167. blk_queue_dma_alignment(q, 511);
  168. blk_queue_congestion_threshold(q);
  169. q->nr_batching = BLK_BATCH_REQ;
  170. blk_set_default_limits(&q->limits);
  171. /*
  172. * by default assume old behaviour and bounce for any highmem page
  173. */
  174. blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  175. }
  176. EXPORT_SYMBOL(blk_queue_make_request);
  177. /**
  178. * blk_queue_bounce_limit - set bounce buffer limit for queue
  179. * @q: the request queue for the device
  180. * @dma_mask: the maximum address the device can handle
  181. *
  182. * Description:
  183. * Different hardware can have different requirements as to what pages
  184. * it can do I/O directly to. A low level driver can call
  185. * blk_queue_bounce_limit to have lower memory pages allocated as bounce
  186. * buffers for doing I/O to pages residing above @dma_mask.
  187. **/
  188. void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
  189. {
  190. unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
  191. int dma = 0;
  192. q->bounce_gfp = GFP_NOIO;
  193. #if BITS_PER_LONG == 64
  194. /*
  195. * Assume anything <= 4GB can be handled by IOMMU. Actually
  196. * some IOMMUs can handle everything, but I don't know of a
  197. * way to test this here.
  198. */
  199. if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
  200. dma = 1;
  201. q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
  202. #else
  203. if (b_pfn < blk_max_low_pfn)
  204. dma = 1;
  205. q->limits.bounce_pfn = b_pfn;
  206. #endif
  207. if (dma) {
  208. init_emergency_isa_pool();
  209. q->bounce_gfp = GFP_NOIO | GFP_DMA;
  210. q->limits.bounce_pfn = b_pfn;
  211. }
  212. }
  213. EXPORT_SYMBOL(blk_queue_bounce_limit);
  214. /**
  215. * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
  216. * @limits: the queue limits
  217. * @max_hw_sectors: max hardware sectors in the usual 512b unit
  218. *
  219. * Description:
  220. * Enables a low level driver to set a hard upper limit,
  221. * max_hw_sectors, on the size of requests. max_hw_sectors is set by
  222. * the device driver based upon the combined capabilities of I/O
  223. * controller and storage device.
  224. *
  225. * max_sectors is a soft limit imposed by the block layer for
  226. * filesystem type requests. This value can be overridden on a
  227. * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
  228. * The soft limit can not exceed max_hw_sectors.
  229. **/
  230. void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
  231. {
  232. if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
  233. max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
  234. printk(KERN_INFO "%s: set to minimum %d\n",
  235. __func__, max_hw_sectors);
  236. }
  237. limits->max_hw_sectors = max_hw_sectors;
  238. limits->max_sectors = min_t(unsigned int, max_hw_sectors,
  239. BLK_DEF_MAX_SECTORS);
  240. }
  241. EXPORT_SYMBOL(blk_limits_max_hw_sectors);
  242. /**
  243. * blk_queue_max_hw_sectors - set max sectors for a request for this queue
  244. * @q: the request queue for the device
  245. * @max_hw_sectors: max hardware sectors in the usual 512b unit
  246. *
  247. * Description:
  248. * See description for blk_limits_max_hw_sectors().
  249. **/
  250. void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
  251. {
  252. blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
  253. }
  254. EXPORT_SYMBOL(blk_queue_max_hw_sectors);
  255. /**
  256. * blk_queue_max_discard_sectors - set max sectors for a single discard
  257. * @q: the request queue for the device
  258. * @max_discard_sectors: maximum number of sectors to discard
  259. **/
  260. void blk_queue_max_discard_sectors(struct request_queue *q,
  261. unsigned int max_discard_sectors)
  262. {
  263. q->limits.max_discard_sectors = max_discard_sectors;
  264. }
  265. EXPORT_SYMBOL(blk_queue_max_discard_sectors);
  266. /**
  267. * blk_queue_max_write_same_sectors - set max sectors for a single write same
  268. * @q: the request queue for the device
  269. * @max_write_same_sectors: maximum number of sectors to write per command
  270. **/
  271. void blk_queue_max_write_same_sectors(struct request_queue *q,
  272. unsigned int max_write_same_sectors)
  273. {
  274. q->limits.max_write_same_sectors = max_write_same_sectors;
  275. }
  276. EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
  277. /**
  278. * blk_queue_max_segments - set max hw segments for a request for this queue
  279. * @q: the request queue for the device
  280. * @max_segments: max number of segments
  281. *
  282. * Description:
  283. * Enables a low level driver to set an upper limit on the number of
  284. * hw data segments in a request.
  285. **/
  286. void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
  287. {
  288. if (!max_segments) {
  289. max_segments = 1;
  290. printk(KERN_INFO "%s: set to minimum %d\n",
  291. __func__, max_segments);
  292. }
  293. q->limits.max_segments = max_segments;
  294. }
  295. EXPORT_SYMBOL(blk_queue_max_segments);
  296. /**
  297. * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
  298. * @q: the request queue for the device
  299. * @max_size: max size of segment in bytes
  300. *
  301. * Description:
  302. * Enables a low level driver to set an upper limit on the size of a
  303. * coalesced segment
  304. **/
  305. void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  306. {
  307. if (max_size < PAGE_CACHE_SIZE) {
  308. max_size = PAGE_CACHE_SIZE;
  309. printk(KERN_INFO "%s: set to minimum %d\n",
  310. __func__, max_size);
  311. }
  312. q->limits.max_segment_size = max_size;
  313. }
  314. EXPORT_SYMBOL(blk_queue_max_segment_size);
  315. /**
  316. * blk_queue_logical_block_size - set logical block size for the queue
  317. * @q: the request queue for the device
  318. * @size: the logical block size, in bytes
  319. *
  320. * Description:
  321. * This should be set to the lowest possible block size that the
  322. * storage device can address. The default of 512 covers most
  323. * hardware.
  324. **/
  325. void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
  326. {
  327. q->limits.logical_block_size = size;
  328. if (q->limits.physical_block_size < size)
  329. q->limits.physical_block_size = size;
  330. if (q->limits.io_min < q->limits.physical_block_size)
  331. q->limits.io_min = q->limits.physical_block_size;
  332. }
  333. EXPORT_SYMBOL(blk_queue_logical_block_size);
  334. /**
  335. * blk_queue_physical_block_size - set physical block size for the queue
  336. * @q: the request queue for the device
  337. * @size: the physical block size, in bytes
  338. *
  339. * Description:
  340. * This should be set to the lowest possible sector size that the
  341. * hardware can operate on without reverting to read-modify-write
  342. * operations.
  343. */
  344. void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
  345. {
  346. q->limits.physical_block_size = size;
  347. if (q->limits.physical_block_size < q->limits.logical_block_size)
  348. q->limits.physical_block_size = q->limits.logical_block_size;
  349. if (q->limits.io_min < q->limits.physical_block_size)
  350. q->limits.io_min = q->limits.physical_block_size;
  351. }
  352. EXPORT_SYMBOL(blk_queue_physical_block_size);
  353. /**
  354. * blk_queue_alignment_offset - set physical block alignment offset
  355. * @q: the request queue for the device
  356. * @offset: alignment offset in bytes
  357. *
  358. * Description:
  359. * Some devices are naturally misaligned to compensate for things like
  360. * the legacy DOS partition table 63-sector offset. Low-level drivers
  361. * should call this function for devices whose first sector is not
  362. * naturally aligned.
  363. */
  364. void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
  365. {
  366. q->limits.alignment_offset =
  367. offset & (q->limits.physical_block_size - 1);
  368. q->limits.misaligned = 0;
  369. }
  370. EXPORT_SYMBOL(blk_queue_alignment_offset);
  371. /**
  372. * blk_limits_io_min - set minimum request size for a device
  373. * @limits: the queue limits
  374. * @min: smallest I/O size in bytes
  375. *
  376. * Description:
  377. * Some devices have an internal block size bigger than the reported
  378. * hardware sector size. This function can be used to signal the
  379. * smallest I/O the device can perform without incurring a performance
  380. * penalty.
  381. */
  382. void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
  383. {
  384. limits->io_min = min;
  385. if (limits->io_min < limits->logical_block_size)
  386. limits->io_min = limits->logical_block_size;
  387. if (limits->io_min < limits->physical_block_size)
  388. limits->io_min = limits->physical_block_size;
  389. }
  390. EXPORT_SYMBOL(blk_limits_io_min);
  391. /**
  392. * blk_queue_io_min - set minimum request size for the queue
  393. * @q: the request queue for the device
  394. * @min: smallest I/O size in bytes
  395. *
  396. * Description:
  397. * Storage devices may report a granularity or preferred minimum I/O
  398. * size which is the smallest request the device can perform without
  399. * incurring a performance penalty. For disk drives this is often the
  400. * physical block size. For RAID arrays it is often the stripe chunk
  401. * size. A properly aligned multiple of minimum_io_size is the
  402. * preferred request size for workloads where a high number of I/O
  403. * operations is desired.
  404. */
  405. void blk_queue_io_min(struct request_queue *q, unsigned int min)
  406. {
  407. blk_limits_io_min(&q->limits, min);
  408. }
  409. EXPORT_SYMBOL(blk_queue_io_min);
  410. /**
  411. * blk_limits_io_opt - set optimal request size for a device
  412. * @limits: the queue limits
  413. * @opt: smallest I/O size in bytes
  414. *
  415. * Description:
  416. * Storage devices may report an optimal I/O size, which is the
  417. * device's preferred unit for sustained I/O. This is rarely reported
  418. * for disk drives. For RAID arrays it is usually the stripe width or
  419. * the internal track size. A properly aligned multiple of
  420. * optimal_io_size is the preferred request size for workloads where
  421. * sustained throughput is desired.
  422. */
  423. void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
  424. {
  425. limits->io_opt = opt;
  426. }
  427. EXPORT_SYMBOL(blk_limits_io_opt);
  428. /**
  429. * blk_queue_io_opt - set optimal request size for the queue
  430. * @q: the request queue for the device
  431. * @opt: optimal request size in bytes
  432. *
  433. * Description:
  434. * Storage devices may report an optimal I/O size, which is the
  435. * device's preferred unit for sustained I/O. This is rarely reported
  436. * for disk drives. For RAID arrays it is usually the stripe width or
  437. * the internal track size. A properly aligned multiple of
  438. * optimal_io_size is the preferred request size for workloads where
  439. * sustained throughput is desired.
  440. */
  441. void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
  442. {
  443. blk_limits_io_opt(&q->limits, opt);
  444. }
  445. EXPORT_SYMBOL(blk_queue_io_opt);
  446. /**
  447. * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
  448. * @t: the stacking driver (top)
  449. * @b: the underlying device (bottom)
  450. **/
  451. void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  452. {
  453. blk_stack_limits(&t->limits, &b->limits, 0);
  454. }
  455. EXPORT_SYMBOL(blk_queue_stack_limits);
  456. /**
  457. * blk_stack_limits - adjust queue_limits for stacked devices
  458. * @t: the stacking driver limits (top device)
  459. * @b: the underlying queue limits (bottom, component device)
  460. * @start: first data sector within component device
  461. *
  462. * Description:
  463. * This function is used by stacking drivers like MD and DM to ensure
  464. * that all component devices have compatible block sizes and
  465. * alignments. The stacking driver must provide a queue_limits
  466. * struct (top) and then iteratively call the stacking function for
  467. * all component (bottom) devices. The stacking function will
  468. * attempt to combine the values and ensure proper alignment.
  469. *
  470. * Returns 0 if the top and bottom queue_limits are compatible. The
  471. * top device's block sizes and alignment offsets may be adjusted to
  472. * ensure alignment with the bottom device. If no compatible sizes
  473. * and alignments exist, -1 is returned and the resulting top
  474. * queue_limits will have the misaligned flag set to indicate that
  475. * the alignment_offset is undefined.
  476. */
  477. int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
  478. sector_t start)
  479. {
  480. unsigned int top, bottom, alignment, ret = 0;
  481. t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  482. t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
  483. t->max_write_same_sectors = min(t->max_write_same_sectors,
  484. b->max_write_same_sectors);
  485. t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
  486. t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
  487. b->seg_boundary_mask);
  488. t->max_segments = min_not_zero(t->max_segments, b->max_segments);
  489. t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
  490. b->max_integrity_segments);
  491. t->max_segment_size = min_not_zero(t->max_segment_size,
  492. b->max_segment_size);
  493. t->misaligned |= b->misaligned;
  494. alignment = queue_limit_alignment_offset(b, start);
  495. /* Bottom device has different alignment. Check that it is
  496. * compatible with the current top alignment.
  497. */
  498. if (t->alignment_offset != alignment) {
  499. top = max(t->physical_block_size, t->io_min)
  500. + t->alignment_offset;
  501. bottom = max(b->physical_block_size, b->io_min) + alignment;
  502. /* Verify that top and bottom intervals line up */
  503. if (max(top, bottom) & (min(top, bottom) - 1)) {
  504. t->misaligned = 1;
  505. ret = -1;
  506. }
  507. }
  508. t->logical_block_size = max(t->logical_block_size,
  509. b->logical_block_size);
  510. t->physical_block_size = max(t->physical_block_size,
  511. b->physical_block_size);
  512. t->io_min = max(t->io_min, b->io_min);
  513. t->io_opt = lcm(t->io_opt, b->io_opt);
  514. t->cluster &= b->cluster;
  515. t->discard_zeroes_data &= b->discard_zeroes_data;
  516. /* Physical block size a multiple of the logical block size? */
  517. if (t->physical_block_size & (t->logical_block_size - 1)) {
  518. t->physical_block_size = t->logical_block_size;
  519. t->misaligned = 1;
  520. ret = -1;
  521. }
  522. /* Minimum I/O a multiple of the physical block size? */
  523. if (t->io_min & (t->physical_block_size - 1)) {
  524. t->io_min = t->physical_block_size;
  525. t->misaligned = 1;
  526. ret = -1;
  527. }
  528. /* Optimal I/O a multiple of the physical block size? */
  529. if (t->io_opt & (t->physical_block_size - 1)) {
  530. t->io_opt = 0;
  531. t->misaligned = 1;
  532. ret = -1;
  533. }
  534. /* Find lowest common alignment_offset */
  535. t->alignment_offset = lcm(t->alignment_offset, alignment)
  536. & (max(t->physical_block_size, t->io_min) - 1);
  537. /* Verify that new alignment_offset is on a logical block boundary */
  538. if (t->alignment_offset & (t->logical_block_size - 1)) {
  539. t->misaligned = 1;
  540. ret = -1;
  541. }
  542. /* Discard alignment and granularity */
  543. if (b->discard_granularity) {
  544. alignment = queue_limit_discard_alignment(b, start);
  545. if (t->discard_granularity != 0 &&
  546. t->discard_alignment != alignment) {
  547. top = t->discard_granularity + t->discard_alignment;
  548. bottom = b->discard_granularity + alignment;
  549. /* Verify that top and bottom intervals line up */
  550. if ((max(top, bottom) % min(top, bottom)) != 0)
  551. t->discard_misaligned = 1;
  552. }
  553. t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
  554. b->max_discard_sectors);
  555. t->discard_granularity = max(t->discard_granularity,
  556. b->discard_granularity);
  557. t->discard_alignment = lcm(t->discard_alignment, alignment) %
  558. t->discard_granularity;
  559. }
  560. return ret;
  561. }
  562. EXPORT_SYMBOL(blk_stack_limits);
  563. /**
  564. * bdev_stack_limits - adjust queue limits for stacked drivers
  565. * @t: the stacking driver limits (top device)
  566. * @bdev: the component block_device (bottom)
  567. * @start: first data sector within component device
  568. *
  569. * Description:
  570. * Merges queue limits for a top device and a block_device. Returns
  571. * 0 if alignment didn't change. Returns -1 if adding the bottom
  572. * device caused misalignment.
  573. */
  574. int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
  575. sector_t start)
  576. {
  577. struct request_queue *bq = bdev_get_queue(bdev);
  578. start += get_start_sect(bdev);
  579. return blk_stack_limits(t, &bq->limits, start);
  580. }
  581. EXPORT_SYMBOL(bdev_stack_limits);
  582. /**
  583. * disk_stack_limits - adjust queue limits for stacked drivers
  584. * @disk: MD/DM gendisk (top)
  585. * @bdev: the underlying block device (bottom)
  586. * @offset: offset to beginning of data within component device
  587. *
  588. * Description:
  589. * Merges the limits for a top level gendisk and a bottom level
  590. * block_device.
  591. */
  592. void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  593. sector_t offset)
  594. {
  595. struct request_queue *t = disk->queue;
  596. if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
  597. char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
  598. disk_name(disk, 0, top);
  599. bdevname(bdev, bottom);
  600. printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
  601. top, bottom);
  602. }
  603. }
  604. EXPORT_SYMBOL(disk_stack_limits);
  605. /**
  606. * blk_queue_dma_pad - set pad mask
  607. * @q: the request queue for the device
  608. * @mask: pad mask
  609. *
  610. * Set dma pad mask.
  611. *
  612. * Appending pad buffer to a request modifies the last entry of a
  613. * scatter list such that it includes the pad buffer.
  614. **/
  615. void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  616. {
  617. q->dma_pad_mask = mask;
  618. }
  619. EXPORT_SYMBOL(blk_queue_dma_pad);
  620. /**
  621. * blk_queue_update_dma_pad - update pad mask
  622. * @q: the request queue for the device
  623. * @mask: pad mask
  624. *
  625. * Update dma pad mask.
  626. *
  627. * Appending pad buffer to a request modifies the last entry of a
  628. * scatter list such that it includes the pad buffer.
  629. **/
  630. void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  631. {
  632. if (mask > q->dma_pad_mask)
  633. q->dma_pad_mask = mask;
  634. }
  635. EXPORT_SYMBOL(blk_queue_update_dma_pad);
  636. /**
  637. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  638. * @q: the request queue for the device
  639. * @dma_drain_needed: fn which returns non-zero if drain is necessary
  640. * @buf: physically contiguous buffer
  641. * @size: size of the buffer in bytes
  642. *
  643. * Some devices have excess DMA problems and can't simply discard (or
  644. * zero fill) the unwanted piece of the transfer. They have to have a
  645. * real area of memory to transfer it into. The use case for this is
  646. * ATAPI devices in DMA mode. If the packet command causes a transfer
  647. * bigger than the transfer size some HBAs will lock up if there
  648. * aren't DMA elements to contain the excess transfer. What this API
  649. * does is adjust the queue so that the buf is always appended
  650. * silently to the scatterlist.
  651. *
  652. * Note: This routine adjusts max_hw_segments to make room for appending
  653. * the drain buffer. If you call blk_queue_max_segments() after calling
  654. * this routine, you must set the limit to one fewer than your device
  655. * can support otherwise there won't be room for the drain buffer.
  656. */
  657. int blk_queue_dma_drain(struct request_queue *q,
  658. dma_drain_needed_fn *dma_drain_needed,
  659. void *buf, unsigned int size)
  660. {
  661. if (queue_max_segments(q) < 2)
  662. return -EINVAL;
  663. /* make room for appending the drain */
  664. blk_queue_max_segments(q, queue_max_segments(q) - 1);
  665. q->dma_drain_needed = dma_drain_needed;
  666. q->dma_drain_buffer = buf;
  667. q->dma_drain_size = size;
  668. return 0;
  669. }
  670. EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  671. /**
  672. * blk_queue_segment_boundary - set boundary rules for segment merging
  673. * @q: the request queue for the device
  674. * @mask: the memory boundary mask
  675. **/
  676. void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  677. {
  678. if (mask < PAGE_CACHE_SIZE - 1) {
  679. mask = PAGE_CACHE_SIZE - 1;
  680. printk(KERN_INFO "%s: set to minimum %lx\n",
  681. __func__, mask);
  682. }
  683. q->limits.seg_boundary_mask = mask;
  684. }
  685. EXPORT_SYMBOL(blk_queue_segment_boundary);
  686. /**
  687. * blk_queue_dma_alignment - set dma length and memory alignment
  688. * @q: the request queue for the device
  689. * @mask: alignment mask
  690. *
  691. * description:
  692. * set required memory and length alignment for direct dma transactions.
  693. * this is used when building direct io requests for the queue.
  694. *
  695. **/
  696. void blk_queue_dma_alignment(struct request_queue *q, int mask)
  697. {
  698. q->dma_alignment = mask;
  699. }
  700. EXPORT_SYMBOL(blk_queue_dma_alignment);
  701. /**
  702. * blk_queue_update_dma_alignment - update dma length and memory alignment
  703. * @q: the request queue for the device
  704. * @mask: alignment mask
  705. *
  706. * description:
  707. * update required memory and length alignment for direct dma transactions.
  708. * If the requested alignment is larger than the current alignment, then
  709. * the current queue alignment is updated to the new value, otherwise it
  710. * is left alone. The design of this is to allow multiple objects
  711. * (driver, device, transport etc) to set their respective
  712. * alignments without having them interfere.
  713. *
  714. **/
  715. void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  716. {
  717. BUG_ON(mask > PAGE_SIZE);
  718. if (mask > q->dma_alignment)
  719. q->dma_alignment = mask;
  720. }
  721. EXPORT_SYMBOL(blk_queue_update_dma_alignment);
  722. /**
  723. * blk_queue_flush - configure queue's cache flush capability
  724. * @q: the request queue for the device
  725. * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
  726. *
  727. * Tell block layer cache flush capability of @q. If it supports
  728. * flushing, REQ_FLUSH should be set. If it supports bypassing
  729. * write cache for individual writes, REQ_FUA should be set.
  730. */
  731. void blk_queue_flush(struct request_queue *q, unsigned int flush)
  732. {
  733. WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
  734. if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
  735. flush &= ~REQ_FUA;
  736. q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
  737. }
  738. EXPORT_SYMBOL_GPL(blk_queue_flush);
  739. void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
  740. {
  741. q->flush_not_queueable = !queueable;
  742. }
  743. EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
  744. static int __init blk_settings_init(void)
  745. {
  746. blk_max_low_pfn = max_low_pfn - 1;
  747. blk_max_pfn = max_pfn - 1;
  748. return 0;
  749. }
  750. subsys_initcall(blk_settings_init);