blk-settings.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Functions related to setting various queue properties from drivers
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/gcd.h>
  11. #include "blk.h"
  12. unsigned long blk_max_low_pfn;
  13. EXPORT_SYMBOL(blk_max_low_pfn);
  14. unsigned long blk_max_pfn;
  15. /**
  16. * blk_queue_prep_rq - set a prepare_request function for queue
  17. * @q: queue
  18. * @pfn: prepare_request function
  19. *
  20. * It's possible for a queue to register a prepare_request callback which
  21. * is invoked before the request is handed to the request_fn. The goal of
  22. * the function is to prepare a request for I/O, it can be used to build a
  23. * cdb from the request data for instance.
  24. *
  25. */
  26. void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  27. {
  28. q->prep_rq_fn = pfn;
  29. }
  30. EXPORT_SYMBOL(blk_queue_prep_rq);
  31. /**
  32. * blk_queue_set_discard - set a discard_sectors function for queue
  33. * @q: queue
  34. * @dfn: prepare_discard function
  35. *
  36. * It's possible for a queue to register a discard callback which is used
  37. * to transform a discard request into the appropriate type for the
  38. * hardware. If none is registered, then discard requests are failed
  39. * with %EOPNOTSUPP.
  40. *
  41. */
  42. void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
  43. {
  44. q->prepare_discard_fn = dfn;
  45. }
  46. EXPORT_SYMBOL(blk_queue_set_discard);
  47. /**
  48. * blk_queue_merge_bvec - set a merge_bvec function for queue
  49. * @q: queue
  50. * @mbfn: merge_bvec_fn
  51. *
  52. * Usually queues have static limitations on the max sectors or segments that
  53. * we can put in a request. Stacking drivers may have some settings that
  54. * are dynamic, and thus we have to query the queue whether it is ok to
  55. * add a new bio_vec to a bio at a given offset or not. If the block device
  56. * has such limitations, it needs to register a merge_bvec_fn to control
  57. * the size of bio's sent to it. Note that a block device *must* allow a
  58. * single page to be added to an empty bio. The block device driver may want
  59. * to use the bio_split() function to deal with these bio's. By default
  60. * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  61. * honored.
  62. */
  63. void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  64. {
  65. q->merge_bvec_fn = mbfn;
  66. }
  67. EXPORT_SYMBOL(blk_queue_merge_bvec);
  68. void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  69. {
  70. q->softirq_done_fn = fn;
  71. }
  72. EXPORT_SYMBOL(blk_queue_softirq_done);
  73. void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  74. {
  75. q->rq_timeout = timeout;
  76. }
  77. EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  78. void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  79. {
  80. q->rq_timed_out_fn = fn;
  81. }
  82. EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  83. void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  84. {
  85. q->lld_busy_fn = fn;
  86. }
  87. EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
  88. /**
  89. * blk_set_default_limits - reset limits to default values
  90. * @lim: the queue_limits structure to reset
  91. *
  92. * Description:
  93. * Returns a queue_limit struct to its default state. Can be used by
  94. * stacking drivers like DM that stage table swaps and reuse an
  95. * existing device queue.
  96. */
  97. void blk_set_default_limits(struct queue_limits *lim)
  98. {
  99. lim->max_phys_segments = MAX_PHYS_SEGMENTS;
  100. lim->max_hw_segments = MAX_HW_SEGMENTS;
  101. lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  102. lim->max_segment_size = MAX_SEGMENT_SIZE;
  103. lim->max_sectors = lim->max_hw_sectors = BLK_DEF_MAX_SECTORS;
  104. lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
  105. lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
  106. lim->alignment_offset = 0;
  107. lim->io_opt = 0;
  108. lim->misaligned = 0;
  109. lim->no_cluster = 0;
  110. }
  111. EXPORT_SYMBOL(blk_set_default_limits);
  112. /**
  113. * blk_queue_make_request - define an alternate make_request function for a device
  114. * @q: the request queue for the device to be affected
  115. * @mfn: the alternate make_request function
  116. *
  117. * Description:
  118. * The normal way for &struct bios to be passed to a device
  119. * driver is for them to be collected into requests on a request
  120. * queue, and then to allow the device driver to select requests
  121. * off that queue when it is ready. This works well for many block
  122. * devices. However some block devices (typically virtual devices
  123. * such as md or lvm) do not benefit from the processing on the
  124. * request queue, and are served best by having the requests passed
  125. * directly to them. This can be achieved by providing a function
  126. * to blk_queue_make_request().
  127. *
  128. * Caveat:
  129. * The driver that does this *must* be able to deal appropriately
  130. * with buffers in "highmemory". This can be accomplished by either calling
  131. * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  132. * blk_queue_bounce() to create a buffer in normal memory.
  133. **/
  134. void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
  135. {
  136. /*
  137. * set defaults
  138. */
  139. q->nr_requests = BLKDEV_MAX_RQ;
  140. q->make_request_fn = mfn;
  141. blk_queue_dma_alignment(q, 511);
  142. blk_queue_congestion_threshold(q);
  143. q->nr_batching = BLK_BATCH_REQ;
  144. q->unplug_thresh = 4; /* hmm */
  145. q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
  146. if (q->unplug_delay == 0)
  147. q->unplug_delay = 1;
  148. q->unplug_timer.function = blk_unplug_timeout;
  149. q->unplug_timer.data = (unsigned long)q;
  150. blk_set_default_limits(&q->limits);
  151. blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
  152. /*
  153. * If the caller didn't supply a lock, fall back to our embedded
  154. * per-queue locks
  155. */
  156. if (!q->queue_lock)
  157. q->queue_lock = &q->__queue_lock;
  158. /*
  159. * by default assume old behaviour and bounce for any highmem page
  160. */
  161. blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  162. }
  163. EXPORT_SYMBOL(blk_queue_make_request);
  164. /**
  165. * blk_queue_bounce_limit - set bounce buffer limit for queue
  166. * @q: the request queue for the device
  167. * @dma_mask: the maximum address the device can handle
  168. *
  169. * Description:
  170. * Different hardware can have different requirements as to what pages
  171. * it can do I/O directly to. A low level driver can call
  172. * blk_queue_bounce_limit to have lower memory pages allocated as bounce
  173. * buffers for doing I/O to pages residing above @dma_mask.
  174. **/
  175. void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
  176. {
  177. unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
  178. int dma = 0;
  179. q->bounce_gfp = GFP_NOIO;
  180. #if BITS_PER_LONG == 64
  181. /*
  182. * Assume anything <= 4GB can be handled by IOMMU. Actually
  183. * some IOMMUs can handle everything, but I don't know of a
  184. * way to test this here.
  185. */
  186. if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
  187. dma = 1;
  188. q->limits.bounce_pfn = max_low_pfn;
  189. #else
  190. if (b_pfn < blk_max_low_pfn)
  191. dma = 1;
  192. q->limits.bounce_pfn = b_pfn;
  193. #endif
  194. if (dma) {
  195. init_emergency_isa_pool();
  196. q->bounce_gfp = GFP_NOIO | GFP_DMA;
  197. q->limits.bounce_pfn = b_pfn;
  198. }
  199. }
  200. EXPORT_SYMBOL(blk_queue_bounce_limit);
  201. /**
  202. * blk_queue_max_sectors - set max sectors for a request for this queue
  203. * @q: the request queue for the device
  204. * @max_sectors: max sectors in the usual 512b unit
  205. *
  206. * Description:
  207. * Enables a low level driver to set an upper limit on the size of
  208. * received requests.
  209. **/
  210. void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
  211. {
  212. if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
  213. max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
  214. printk(KERN_INFO "%s: set to minimum %d\n",
  215. __func__, max_sectors);
  216. }
  217. if (BLK_DEF_MAX_SECTORS > max_sectors)
  218. q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
  219. else {
  220. q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
  221. q->limits.max_hw_sectors = max_sectors;
  222. }
  223. }
  224. EXPORT_SYMBOL(blk_queue_max_sectors);
  225. void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
  226. {
  227. if (BLK_DEF_MAX_SECTORS > max_sectors)
  228. q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
  229. else
  230. q->limits.max_hw_sectors = max_sectors;
  231. }
  232. EXPORT_SYMBOL(blk_queue_max_hw_sectors);
  233. /**
  234. * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  235. * @q: the request queue for the device
  236. * @max_segments: max number of segments
  237. *
  238. * Description:
  239. * Enables a low level driver to set an upper limit on the number of
  240. * physical data segments in a request. This would be the largest sized
  241. * scatter list the driver could handle.
  242. **/
  243. void blk_queue_max_phys_segments(struct request_queue *q,
  244. unsigned short max_segments)
  245. {
  246. if (!max_segments) {
  247. max_segments = 1;
  248. printk(KERN_INFO "%s: set to minimum %d\n",
  249. __func__, max_segments);
  250. }
  251. q->limits.max_phys_segments = max_segments;
  252. }
  253. EXPORT_SYMBOL(blk_queue_max_phys_segments);
  254. /**
  255. * blk_queue_max_hw_segments - set max hw segments for a request for this queue
  256. * @q: the request queue for the device
  257. * @max_segments: max number of segments
  258. *
  259. * Description:
  260. * Enables a low level driver to set an upper limit on the number of
  261. * hw data segments in a request. This would be the largest number of
  262. * address/length pairs the host adapter can actually give at once
  263. * to the device.
  264. **/
  265. void blk_queue_max_hw_segments(struct request_queue *q,
  266. unsigned short max_segments)
  267. {
  268. if (!max_segments) {
  269. max_segments = 1;
  270. printk(KERN_INFO "%s: set to minimum %d\n",
  271. __func__, max_segments);
  272. }
  273. q->limits.max_hw_segments = max_segments;
  274. }
  275. EXPORT_SYMBOL(blk_queue_max_hw_segments);
  276. /**
  277. * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
  278. * @q: the request queue for the device
  279. * @max_size: max size of segment in bytes
  280. *
  281. * Description:
  282. * Enables a low level driver to set an upper limit on the size of a
  283. * coalesced segment
  284. **/
  285. void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  286. {
  287. if (max_size < PAGE_CACHE_SIZE) {
  288. max_size = PAGE_CACHE_SIZE;
  289. printk(KERN_INFO "%s: set to minimum %d\n",
  290. __func__, max_size);
  291. }
  292. q->limits.max_segment_size = max_size;
  293. }
  294. EXPORT_SYMBOL(blk_queue_max_segment_size);
  295. /**
  296. * blk_queue_logical_block_size - set logical block size for the queue
  297. * @q: the request queue for the device
  298. * @size: the logical block size, in bytes
  299. *
  300. * Description:
  301. * This should be set to the lowest possible block size that the
  302. * storage device can address. The default of 512 covers most
  303. * hardware.
  304. **/
  305. void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
  306. {
  307. q->limits.logical_block_size = size;
  308. if (q->limits.physical_block_size < size)
  309. q->limits.physical_block_size = size;
  310. if (q->limits.io_min < q->limits.physical_block_size)
  311. q->limits.io_min = q->limits.physical_block_size;
  312. }
  313. EXPORT_SYMBOL(blk_queue_logical_block_size);
  314. /**
  315. * blk_queue_physical_block_size - set physical block size for the queue
  316. * @q: the request queue for the device
  317. * @size: the physical block size, in bytes
  318. *
  319. * Description:
  320. * This should be set to the lowest possible sector size that the
  321. * hardware can operate on without reverting to read-modify-write
  322. * operations.
  323. */
  324. void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
  325. {
  326. q->limits.physical_block_size = size;
  327. if (q->limits.physical_block_size < q->limits.logical_block_size)
  328. q->limits.physical_block_size = q->limits.logical_block_size;
  329. if (q->limits.io_min < q->limits.physical_block_size)
  330. q->limits.io_min = q->limits.physical_block_size;
  331. }
  332. EXPORT_SYMBOL(blk_queue_physical_block_size);
  333. /**
  334. * blk_queue_alignment_offset - set physical block alignment offset
  335. * @q: the request queue for the device
  336. * @offset: alignment offset in bytes
  337. *
  338. * Description:
  339. * Some devices are naturally misaligned to compensate for things like
  340. * the legacy DOS partition table 63-sector offset. Low-level drivers
  341. * should call this function for devices whose first sector is not
  342. * naturally aligned.
  343. */
  344. void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
  345. {
  346. q->limits.alignment_offset =
  347. offset & (q->limits.physical_block_size - 1);
  348. q->limits.misaligned = 0;
  349. }
  350. EXPORT_SYMBOL(blk_queue_alignment_offset);
  351. /**
  352. * blk_limits_io_min - set minimum request size for a device
  353. * @limits: the queue limits
  354. * @min: smallest I/O size in bytes
  355. *
  356. * Description:
  357. * Some devices have an internal block size bigger than the reported
  358. * hardware sector size. This function can be used to signal the
  359. * smallest I/O the device can perform without incurring a performance
  360. * penalty.
  361. */
  362. void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
  363. {
  364. limits->io_min = min;
  365. if (limits->io_min < limits->logical_block_size)
  366. limits->io_min = limits->logical_block_size;
  367. if (limits->io_min < limits->physical_block_size)
  368. limits->io_min = limits->physical_block_size;
  369. }
  370. EXPORT_SYMBOL(blk_limits_io_min);
  371. /**
  372. * blk_queue_io_min - set minimum request size for the queue
  373. * @q: the request queue for the device
  374. * @min: smallest I/O size in bytes
  375. *
  376. * Description:
  377. * Storage devices may report a granularity or preferred minimum I/O
  378. * size which is the smallest request the device can perform without
  379. * incurring a performance penalty. For disk drives this is often the
  380. * physical block size. For RAID arrays it is often the stripe chunk
  381. * size. A properly aligned multiple of minimum_io_size is the
  382. * preferred request size for workloads where a high number of I/O
  383. * operations is desired.
  384. */
  385. void blk_queue_io_min(struct request_queue *q, unsigned int min)
  386. {
  387. blk_limits_io_min(&q->limits, min);
  388. }
  389. EXPORT_SYMBOL(blk_queue_io_min);
  390. /**
  391. * blk_limits_io_opt - set optimal request size for a device
  392. * @limits: the queue limits
  393. * @opt: smallest I/O size in bytes
  394. *
  395. * Description:
  396. * Storage devices may report an optimal I/O size, which is the
  397. * device's preferred unit for sustained I/O. This is rarely reported
  398. * for disk drives. For RAID arrays it is usually the stripe width or
  399. * the internal track size. A properly aligned multiple of
  400. * optimal_io_size is the preferred request size for workloads where
  401. * sustained throughput is desired.
  402. */
  403. void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
  404. {
  405. limits->io_opt = opt;
  406. }
  407. EXPORT_SYMBOL(blk_limits_io_opt);
  408. /**
  409. * blk_queue_io_opt - set optimal request size for the queue
  410. * @q: the request queue for the device
  411. * @opt: optimal request size in bytes
  412. *
  413. * Description:
  414. * Storage devices may report an optimal I/O size, which is the
  415. * device's preferred unit for sustained I/O. This is rarely reported
  416. * for disk drives. For RAID arrays it is usually the stripe width or
  417. * the internal track size. A properly aligned multiple of
  418. * optimal_io_size is the preferred request size for workloads where
  419. * sustained throughput is desired.
  420. */
  421. void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
  422. {
  423. blk_limits_io_opt(&q->limits, opt);
  424. }
  425. EXPORT_SYMBOL(blk_queue_io_opt);
  426. /*
  427. * Returns the minimum that is _not_ zero, unless both are zero.
  428. */
  429. #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
  430. /**
  431. * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
  432. * @t: the stacking driver (top)
  433. * @b: the underlying device (bottom)
  434. **/
  435. void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  436. {
  437. blk_stack_limits(&t->limits, &b->limits, 0);
  438. if (!t->queue_lock)
  439. WARN_ON_ONCE(1);
  440. else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  441. unsigned long flags;
  442. spin_lock_irqsave(t->queue_lock, flags);
  443. queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
  444. spin_unlock_irqrestore(t->queue_lock, flags);
  445. }
  446. }
  447. EXPORT_SYMBOL(blk_queue_stack_limits);
  448. /**
  449. * blk_stack_limits - adjust queue_limits for stacked devices
  450. * @t: the stacking driver limits (top)
  451. * @b: the underlying queue limits (bottom)
  452. * @offset: offset to beginning of data within component device
  453. *
  454. * Description:
  455. * Merges two queue_limit structs. Returns 0 if alignment didn't
  456. * change. Returns -1 if adding the bottom device caused
  457. * misalignment.
  458. */
  459. int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
  460. sector_t offset)
  461. {
  462. t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  463. t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
  464. t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
  465. t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
  466. b->seg_boundary_mask);
  467. t->max_phys_segments = min_not_zero(t->max_phys_segments,
  468. b->max_phys_segments);
  469. t->max_hw_segments = min_not_zero(t->max_hw_segments,
  470. b->max_hw_segments);
  471. t->max_segment_size = min_not_zero(t->max_segment_size,
  472. b->max_segment_size);
  473. t->logical_block_size = max(t->logical_block_size,
  474. b->logical_block_size);
  475. t->physical_block_size = max(t->physical_block_size,
  476. b->physical_block_size);
  477. t->io_min = max(t->io_min, b->io_min);
  478. t->no_cluster |= b->no_cluster;
  479. /* Bottom device offset aligned? */
  480. if (offset &&
  481. (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
  482. t->misaligned = 1;
  483. return -1;
  484. }
  485. /* If top has no alignment offset, inherit from bottom */
  486. if (!t->alignment_offset)
  487. t->alignment_offset =
  488. b->alignment_offset & (b->physical_block_size - 1);
  489. /* Top device aligned on logical block boundary? */
  490. if (t->alignment_offset & (t->logical_block_size - 1)) {
  491. t->misaligned = 1;
  492. return -1;
  493. }
  494. /* Find lcm() of optimal I/O size */
  495. if (t->io_opt && b->io_opt)
  496. t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
  497. else if (b->io_opt)
  498. t->io_opt = b->io_opt;
  499. /* Verify that optimal I/O size is a multiple of io_min */
  500. if (t->io_min && t->io_opt % t->io_min)
  501. return -1;
  502. return 0;
  503. }
  504. EXPORT_SYMBOL(blk_stack_limits);
  505. /**
  506. * disk_stack_limits - adjust queue limits for stacked drivers
  507. * @disk: MD/DM gendisk (top)
  508. * @bdev: the underlying block device (bottom)
  509. * @offset: offset to beginning of data within component device
  510. *
  511. * Description:
  512. * Merges the limits for two queues. Returns 0 if alignment
  513. * didn't change. Returns -1 if adding the bottom device caused
  514. * misalignment.
  515. */
  516. void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  517. sector_t offset)
  518. {
  519. struct request_queue *t = disk->queue;
  520. struct request_queue *b = bdev_get_queue(bdev);
  521. offset += get_start_sect(bdev) << 9;
  522. if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
  523. char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
  524. disk_name(disk, 0, top);
  525. bdevname(bdev, bottom);
  526. printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
  527. top, bottom);
  528. }
  529. if (!t->queue_lock)
  530. WARN_ON_ONCE(1);
  531. else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
  532. unsigned long flags;
  533. spin_lock_irqsave(t->queue_lock, flags);
  534. if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
  535. queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
  536. spin_unlock_irqrestore(t->queue_lock, flags);
  537. }
  538. }
  539. EXPORT_SYMBOL(disk_stack_limits);
  540. /**
  541. * blk_queue_dma_pad - set pad mask
  542. * @q: the request queue for the device
  543. * @mask: pad mask
  544. *
  545. * Set dma pad mask.
  546. *
  547. * Appending pad buffer to a request modifies the last entry of a
  548. * scatter list such that it includes the pad buffer.
  549. **/
  550. void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  551. {
  552. q->dma_pad_mask = mask;
  553. }
  554. EXPORT_SYMBOL(blk_queue_dma_pad);
  555. /**
  556. * blk_queue_update_dma_pad - update pad mask
  557. * @q: the request queue for the device
  558. * @mask: pad mask
  559. *
  560. * Update dma pad mask.
  561. *
  562. * Appending pad buffer to a request modifies the last entry of a
  563. * scatter list such that it includes the pad buffer.
  564. **/
  565. void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  566. {
  567. if (mask > q->dma_pad_mask)
  568. q->dma_pad_mask = mask;
  569. }
  570. EXPORT_SYMBOL(blk_queue_update_dma_pad);
  571. /**
  572. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  573. * @q: the request queue for the device
  574. * @dma_drain_needed: fn which returns non-zero if drain is necessary
  575. * @buf: physically contiguous buffer
  576. * @size: size of the buffer in bytes
  577. *
  578. * Some devices have excess DMA problems and can't simply discard (or
  579. * zero fill) the unwanted piece of the transfer. They have to have a
  580. * real area of memory to transfer it into. The use case for this is
  581. * ATAPI devices in DMA mode. If the packet command causes a transfer
  582. * bigger than the transfer size some HBAs will lock up if there
  583. * aren't DMA elements to contain the excess transfer. What this API
  584. * does is adjust the queue so that the buf is always appended
  585. * silently to the scatterlist.
  586. *
  587. * Note: This routine adjusts max_hw_segments to make room for
  588. * appending the drain buffer. If you call
  589. * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
  590. * calling this routine, you must set the limit to one fewer than your
  591. * device can support otherwise there won't be room for the drain
  592. * buffer.
  593. */
  594. int blk_queue_dma_drain(struct request_queue *q,
  595. dma_drain_needed_fn *dma_drain_needed,
  596. void *buf, unsigned int size)
  597. {
  598. if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
  599. return -EINVAL;
  600. /* make room for appending the drain */
  601. blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
  602. blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
  603. q->dma_drain_needed = dma_drain_needed;
  604. q->dma_drain_buffer = buf;
  605. q->dma_drain_size = size;
  606. return 0;
  607. }
  608. EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  609. /**
  610. * blk_queue_segment_boundary - set boundary rules for segment merging
  611. * @q: the request queue for the device
  612. * @mask: the memory boundary mask
  613. **/
  614. void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  615. {
  616. if (mask < PAGE_CACHE_SIZE - 1) {
  617. mask = PAGE_CACHE_SIZE - 1;
  618. printk(KERN_INFO "%s: set to minimum %lx\n",
  619. __func__, mask);
  620. }
  621. q->limits.seg_boundary_mask = mask;
  622. }
  623. EXPORT_SYMBOL(blk_queue_segment_boundary);
  624. /**
  625. * blk_queue_dma_alignment - set dma length and memory alignment
  626. * @q: the request queue for the device
  627. * @mask: alignment mask
  628. *
  629. * description:
  630. * set required memory and length alignment for direct dma transactions.
  631. * this is used when building direct io requests for the queue.
  632. *
  633. **/
  634. void blk_queue_dma_alignment(struct request_queue *q, int mask)
  635. {
  636. q->dma_alignment = mask;
  637. }
  638. EXPORT_SYMBOL(blk_queue_dma_alignment);
  639. /**
  640. * blk_queue_update_dma_alignment - update dma length and memory alignment
  641. * @q: the request queue for the device
  642. * @mask: alignment mask
  643. *
  644. * description:
  645. * update required memory and length alignment for direct dma transactions.
  646. * If the requested alignment is larger than the current alignment, then
  647. * the current queue alignment is updated to the new value, otherwise it
  648. * is left alone. The design of this is to allow multiple objects
  649. * (driver, device, transport etc) to set their respective
  650. * alignments without having them interfere.
  651. *
  652. **/
  653. void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  654. {
  655. BUG_ON(mask > PAGE_SIZE);
  656. if (mask > q->dma_alignment)
  657. q->dma_alignment = mask;
  658. }
  659. EXPORT_SYMBOL(blk_queue_update_dma_alignment);
  660. static int __init blk_settings_init(void)
  661. {
  662. blk_max_low_pfn = max_low_pfn - 1;
  663. blk_max_pfn = max_pfn - 1;
  664. return 0;
  665. }
  666. subsys_initcall(blk_settings_init);