Browse Source

Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] Fixup cciss error handling
  [PATCH] Allow as-iosched to be unloaded
  [PATCH 2/2] cciss: remove calls to pci_disable_device
  [PATCH 1/2] cciss: map out more memory for config table
  [PATCH] Propagate down request sync flag

Resolve trivial whitespace conflict in drivers/block/cciss.c manually.
Linus Torvalds 18 years ago
parent
commit
5faad62026
4 changed files with 34 additions and 29 deletions
  1. 1 14
      block/as-iosched.c
  2. 12 6
      block/cfq-iosched.c
  3. 20 8
      block/ll_rw_blk.c
  4. 1 1
      drivers/block/cciss.c

+ 1 - 14
block/as-iosched.c

@@ -1462,20 +1462,7 @@ static struct elevator_type iosched_as = {
 
 static int __init as_init(void)
 {
-	int ret;
-
-	ret = elv_register(&iosched_as);
-	if (!ret) {
-		/*
-		 * don't allow AS to get unregistered, since we would have
-		 * to browse all tasks in the system and release their
-		 * as_io_context first
-		 */
-		__module_get(THIS_MODULE);
-		return 0;
-	}
-
-	return ret;
+	return elv_register(&iosched_as);
 }
 
 static void __exit as_exit(void)

+ 12 - 6
block/cfq-iosched.c

@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
 	return !cfqd->busy_queues;
 }
 
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
 {
-	if (rw == READ || rw == WRITE_SYNC)
+	/*
+	 * Use the per-process queue, for read requests and syncronous writes
+	 */
+	if (!(rw & REQ_RW) || is_sync)
 		return task->pid;
 
 	return CFQ_KEY_ASYNC;
@@ -473,7 +476,7 @@ static struct request *
 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 {
 	struct task_struct *tsk = current;
-	pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+	pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
 	struct cfq_queue *cfqq;
 
 	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
 	struct cfq_data *cfqd = q->elevator->elevator_data;
 	struct task_struct *tsk = current;
 	struct cfq_queue *cfqq;
+	unsigned int key;
+
+	key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
 
 	/*
 	 * don't force setup of a queue from here, as a call to may_queue
@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
 	 * so just lookup a possibly existing queue, or return 'may queue'
 	 * if that fails
 	 */
-	cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
 	if (cfqq) {
 		cfq_init_prio_data(cfqq);
 		cfq_prio_boost(cfqq);
@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
 	struct task_struct *tsk = current;
 	struct cfq_io_context *cic;
 	const int rw = rq_data_dir(rq);
-	pid_t key = cfq_queue_pid(tsk, rw);
+	const int is_sync = rq_is_sync(rq);
+	pid_t key = cfq_queue_pid(tsk, rw, is_sync);
 	struct cfq_queue *cfqq;
 	unsigned long flags;
-	int is_sync = key != CFQ_KEY_ASYNC;
 
 	might_sleep_if(gfp_mask & __GFP_WAIT);
 

+ 20 - 8
block/ll_rw_blk.c

@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
  * Returns NULL on failure, with queue_lock held.
  * Returns !NULL on success, with queue_lock *not held*.
  */
-static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
-				   gfp_t gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw_flags,
+				   struct bio *bio, gfp_t gfp_mask)
 {
 	struct request *rq = NULL;
 	struct request_list *rl = &q->rq;
 	struct io_context *ioc = NULL;
+	const int rw = rw_flags & 0x01;
 	int may_queue, priv;
 
-	may_queue = elv_may_queue(q, rw);
+	may_queue = elv_may_queue(q, rw_flags);
 	if (may_queue == ELV_MQUEUE_NO)
 		goto rq_starved;
 
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
 
 	spin_unlock_irq(q->queue_lock);
 
-	rq = blk_alloc_request(q, rw, priv, gfp_mask);
+	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
 	if (unlikely(!rq)) {
 		/*
 		 * Allocation failed presumably due to memory. Undo anything
@@ -2162,12 +2163,13 @@ out:
  *
  * Called with q->queue_lock held, and returns with it unlocked.
  */
-static struct request *get_request_wait(request_queue_t *q, int rw,
+static struct request *get_request_wait(request_queue_t *q, int rw_flags,
 					struct bio *bio)
 {
+	const int rw = rw_flags & 0x01;
 	struct request *rq;
 
-	rq = get_request(q, rw, bio, GFP_NOIO);
+	rq = get_request(q, rw_flags, bio, GFP_NOIO);
 	while (!rq) {
 		DEFINE_WAIT(wait);
 		struct request_list *rl = &q->rq;
@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
 		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
 				TASK_UNINTERRUPTIBLE);
 
-		rq = get_request(q, rw, bio, GFP_NOIO);
+		rq = get_request(q, rw_flags, bio, GFP_NOIO);
 
 		if (!rq) {
 			struct io_context *ioc;
@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
 	int el_ret, nr_sectors, barrier, err;
 	const unsigned short prio = bio_prio(bio);
 	const int sync = bio_sync(bio);
+	int rw_flags;
 
 	nr_sectors = bio_sectors(bio);
 
@@ -2983,11 +2986,20 @@ static int __make_request(request_queue_t *q, struct bio *bio)
 	}
 
 get_rq:
+	/*
+	 * This sync check and mask will be re-done in init_request_from_bio(),
+	 * but we need to set it earlier to expose the sync flag to the
+	 * rq allocator and io schedulers.
+	 */
+	rw_flags = bio_data_dir(bio);
+	if (sync)
+		rw_flags |= REQ_RW_SYNC;
+
 	/*
 	 * Grab a free request. This is might sleep but can not fail.
 	 * Returns with the queue unlocked.
 	 */
-	req = get_request_wait(q, bio_data_dir(bio), bio);
+	req = get_request_wait(q, rw_flags, bio);
 
 	/*
 	 * After dropping the lock and possibly sleeping here, our request

+ 1 - 1
drivers/block/cciss.c

@@ -3004,7 +3004,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 	}
 	return 0;
 
-      err_out_free_res:
+err_out_free_res:
 	/*
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Smart Array controllers that pci_enable_device does not undo