|
@@ -91,10 +91,9 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
|
|
|
*/
|
|
|
void blk_set_default_limits(struct queue_limits *lim)
|
|
|
{
|
|
|
- lim->max_phys_segments = MAX_PHYS_SEGMENTS;
|
|
|
- lim->max_hw_segments = MAX_HW_SEGMENTS;
|
|
|
+ lim->max_segments = BLK_MAX_SEGMENTS;
|
|
|
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
|
|
- lim->max_segment_size = MAX_SEGMENT_SIZE;
|
|
|
+ lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
|
|
lim->max_sectors = BLK_DEF_MAX_SECTORS;
|
|
|
lim->max_hw_sectors = INT_MAX;
|
|
|
lim->max_discard_sectors = 0;
|
|
@@ -154,7 +153,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
|
|
|
q->unplug_timer.data = (unsigned long)q;
|
|
|
|
|
|
blk_set_default_limits(&q->limits);
|
|
|
- blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
|
|
|
+ blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
|
|
|
|
|
|
/*
|
|
|
* If the caller didn't supply a lock, fall back to our embedded
|
|
@@ -210,37 +209,32 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
|
|
|
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
|
|
|
|
|
/**
|
|
|
- * blk_queue_max_sectors - set max sectors for a request for this queue
|
|
|
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
|
|
* @q: the request queue for the device
|
|
|
- * @max_sectors: max sectors in the usual 512b unit
|
|
|
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
|
|
|
*
|
|
|
* Description:
|
|
|
- * Enables a low level driver to set an upper limit on the size of
|
|
|
- * received requests.
|
|
|
+ * Enables a low level driver to set a hard upper limit,
|
|
|
+ * max_hw_sectors, on the size of requests. max_hw_sectors is set by
|
|
|
+ * the device driver based upon the combined capabilities of I/O
|
|
|
+ * controller and storage device.
|
|
|
+ *
|
|
|
+ * max_sectors is a soft limit imposed by the block layer for
|
|
|
+ * filesystem type requests. This value can be overridden on a
|
|
|
+ * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
|
|
+ * The soft limit can not exceed max_hw_sectors.
|
|
|
**/
|
|
|
-void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
|
|
|
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
|
|
{
|
|
|
- if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
|
|
|
- max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
|
|
+ if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
|
|
+ max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
|
|
printk(KERN_INFO "%s: set to minimum %d\n",
|
|
|
- __func__, max_sectors);
|
|
|
+ __func__, max_hw_sectors);
|
|
|
}
|
|
|
|
|
|
- if (BLK_DEF_MAX_SECTORS > max_sectors)
|
|
|
- q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
|
|
|
- else {
|
|
|
- q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
|
|
|
- q->limits.max_hw_sectors = max_sectors;
|
|
|
- }
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(blk_queue_max_sectors);
|
|
|
-
|
|
|
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
|
|
|
-{
|
|
|
- if (BLK_DEF_MAX_SECTORS > max_sectors)
|
|
|
- q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
|
|
|
- else
|
|
|
- q->limits.max_hw_sectors = max_sectors;
|
|
|
+ q->limits.max_hw_sectors = max_hw_sectors;
|
|
|
+ q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
|
|
|
+ BLK_DEF_MAX_SECTORS);
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
|
|
|
|
@@ -257,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
|
|
|
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
|
|
|
|
|
|
/**
|
|
|
- * blk_queue_max_phys_segments - set max phys segments for a request for this queue
|
|
|
+ * blk_queue_max_segments - set max hw segments for a request for this queue
|
|
|
* @q: the request queue for the device
|
|
|
* @max_segments: max number of segments
|
|
|
*
|
|
|
* Description:
|
|
|
* Enables a low level driver to set an upper limit on the number of
|
|
|
- * physical data segments in a request. This would be the largest sized
|
|
|
- * scatter list the driver could handle.
|
|
|
+ * hw data segments in a request.
|
|
|
**/
|
|
|
-void blk_queue_max_phys_segments(struct request_queue *q,
|
|
|
- unsigned short max_segments)
|
|
|
+void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
|
|
|
{
|
|
|
if (!max_segments) {
|
|
|
max_segments = 1;
|
|
@@ -275,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q,
|
|
|
__func__, max_segments);
|
|
|
}
|
|
|
|
|
|
- q->limits.max_phys_segments = max_segments;
|
|
|
+ q->limits.max_segments = max_segments;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(blk_queue_max_phys_segments);
|
|
|
-
|
|
|
-/**
|
|
|
- * blk_queue_max_hw_segments - set max hw segments for a request for this queue
|
|
|
- * @q: the request queue for the device
|
|
|
- * @max_segments: max number of segments
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Enables a low level driver to set an upper limit on the number of
|
|
|
- * hw data segments in a request. This would be the largest number of
|
|
|
- * address/length pairs the host adapter can actually give at once
|
|
|
- * to the device.
|
|
|
- **/
|
|
|
-void blk_queue_max_hw_segments(struct request_queue *q,
|
|
|
- unsigned short max_segments)
|
|
|
-{
|
|
|
- if (!max_segments) {
|
|
|
- max_segments = 1;
|
|
|
- printk(KERN_INFO "%s: set to minimum %d\n",
|
|
|
- __func__, max_segments);
|
|
|
- }
|
|
|
-
|
|
|
- q->limits.max_hw_segments = max_segments;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(blk_queue_max_hw_segments);
|
|
|
+EXPORT_SYMBOL(blk_queue_max_segments);
|
|
|
|
|
|
/**
|
|
|
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
|
|
@@ -536,11 +504,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|
|
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
|
|
|
b->seg_boundary_mask);
|
|
|
|
|
|
- t->max_phys_segments = min_not_zero(t->max_phys_segments,
|
|
|
- b->max_phys_segments);
|
|
|
-
|
|
|
- t->max_hw_segments = min_not_zero(t->max_hw_segments,
|
|
|
- b->max_hw_segments);
|
|
|
+ t->max_segments = min_not_zero(t->max_segments, b->max_segments);
|
|
|
|
|
|
t->max_segment_size = min_not_zero(t->max_segment_size,
|
|
|
b->max_segment_size);
|
|
@@ -744,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad);
|
|
|
* does is adjust the queue so that the buf is always appended
|
|
|
* silently to the scatterlist.
|
|
|
*
|
|
|
- * Note: This routine adjusts max_hw_segments to make room for
|
|
|
- * appending the drain buffer. If you call
|
|
|
- * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
|
|
|
- * calling this routine, you must set the limit to one fewer than your
|
|
|
- * device can support otherwise there won't be room for the drain
|
|
|
- * buffer.
|
|
|
+ * Note: This routine adjusts max_hw_segments to make room for appending
|
|
|
+ * the drain buffer. If you call blk_queue_max_segments() after calling
|
|
|
+ * this routine, you must set the limit to one fewer than your device
|
|
|
+ * can support otherwise there won't be room for the drain buffer.
|
|
|
*/
|
|
|
int blk_queue_dma_drain(struct request_queue *q,
|
|
|
dma_drain_needed_fn *dma_drain_needed,
|
|
|
void *buf, unsigned int size)
|
|
|
{
|
|
|
- if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
|
|
|
+ if (queue_max_segments(q) < 2)
|
|
|
return -EINVAL;
|
|
|
/* make room for appending the drain */
|
|
|
- blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
|
|
|
- blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
|
|
|
+ blk_queue_max_segments(q, queue_max_segments(q) - 1);
|
|
|
q->dma_drain_needed = dma_drain_needed;
|
|
|
q->dma_drain_buffer = buf;
|
|
|
q->dma_drain_size = size;
|