|
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
|
|
|
int idle;
|
|
|
};
|
|
|
|
|
|
+static int __btrfs_start_workers(struct btrfs_workers *workers);
|
|
|
+
|
|
|
/*
|
|
|
* btrfs_start_workers uses kthread_run, which can block waiting for memory
|
|
|
* for a very long time. It will actually throttle on page writeback,
|
|
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
|
|
|
{
|
|
|
struct worker_start *start;
|
|
|
start = container_of(work, struct worker_start, work);
|
|
|
- btrfs_start_workers(start->queue, 1);
|
|
|
+ __btrfs_start_workers(start->queue);
|
|
|
kfree(start);
|
|
|
}
|
|
|
|
|
|
-static int start_new_worker(struct btrfs_workers *queue)
|
|
|
-{
|
|
|
- struct worker_start *start;
|
|
|
- int ret;
|
|
|
-
|
|
|
- start = kzalloc(sizeof(*start), GFP_NOFS);
|
|
|
- if (!start)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- start->work.func = start_new_worker_func;
|
|
|
- start->queue = queue;
|
|
|
- ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
|
|
|
- if (ret)
|
|
|
- kfree(start);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* helper function to move a thread onto the idle list after it
|
|
|
* has finished some requests.
|
|
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
|
|
|
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
|
|
|
{
|
|
|
struct btrfs_workers *workers = worker->workers;
|
|
|
+ struct worker_start *start;
|
|
|
unsigned long flags;
|
|
|
|
|
|
rmb();
|
|
|
if (!workers->atomic_start_pending)
|
|
|
return;
|
|
|
|
|
|
+ start = kzalloc(sizeof(*start), GFP_NOFS);
|
|
|
+ if (!start)
|
|
|
+ return;
|
|
|
+
|
|
|
+ start->work.func = start_new_worker_func;
|
|
|
+ start->queue = workers;
|
|
|
+
|
|
|
spin_lock_irqsave(&workers->lock, flags);
|
|
|
if (!workers->atomic_start_pending)
|
|
|
goto out;
|
|
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
|
|
|
|
|
|
workers->num_workers_starting += 1;
|
|
|
spin_unlock_irqrestore(&workers->lock, flags);
|
|
|
- start_new_worker(workers);
|
|
|
+ btrfs_queue_worker(workers->atomic_worker_start, &start->work);
|
|
|
return;
|
|
|
|
|
|
out:
|
|
|
+ kfree(start);
|
|
|
spin_unlock_irqrestore(&workers->lock, flags);
|
|
|
}
|
|
|
|
|
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
|
|
|
* starts new worker threads. This does not enforce the max worker
|
|
|
* count in case you need to temporarily go past it.
|
|
|
*/
|
|
|
-static int __btrfs_start_workers(struct btrfs_workers *workers,
|
|
|
- int num_workers)
|
|
|
+static int __btrfs_start_workers(struct btrfs_workers *workers)
|
|
|
{
|
|
|
struct btrfs_worker_thread *worker;
|
|
|
int ret = 0;
|
|
|
- int i;
|
|
|
|
|
|
- for (i = 0; i < num_workers; i++) {
|
|
|
- worker = kzalloc(sizeof(*worker), GFP_NOFS);
|
|
|
- if (!worker) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto fail;
|
|
|
- }
|
|
|
+ worker = kzalloc(sizeof(*worker), GFP_NOFS);
|
|
|
+ if (!worker) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
|
|
|
- INIT_LIST_HEAD(&worker->pending);
|
|
|
- INIT_LIST_HEAD(&worker->prio_pending);
|
|
|
- INIT_LIST_HEAD(&worker->worker_list);
|
|
|
- spin_lock_init(&worker->lock);
|
|
|
-
|
|
|
- atomic_set(&worker->num_pending, 0);
|
|
|
- atomic_set(&worker->refs, 1);
|
|
|
- worker->workers = workers;
|
|
|
- worker->task = kthread_run(worker_loop, worker,
|
|
|
- "btrfs-%s-%d", workers->name,
|
|
|
- workers->num_workers + i);
|
|
|
- if (IS_ERR(worker->task)) {
|
|
|
- ret = PTR_ERR(worker->task);
|
|
|
- kfree(worker);
|
|
|
- goto fail;
|
|
|
- }
|
|
|
- spin_lock_irq(&workers->lock);
|
|
|
- list_add_tail(&worker->worker_list, &workers->idle_list);
|
|
|
- worker->idle = 1;
|
|
|
- workers->num_workers++;
|
|
|
- workers->num_workers_starting--;
|
|
|
- WARN_ON(workers->num_workers_starting < 0);
|
|
|
- spin_unlock_irq(&workers->lock);
|
|
|
+ INIT_LIST_HEAD(&worker->pending);
|
|
|
+ INIT_LIST_HEAD(&worker->prio_pending);
|
|
|
+ INIT_LIST_HEAD(&worker->worker_list);
|
|
|
+ spin_lock_init(&worker->lock);
|
|
|
+
|
|
|
+ atomic_set(&worker->num_pending, 0);
|
|
|
+ atomic_set(&worker->refs, 1);
|
|
|
+ worker->workers = workers;
|
|
|
+ worker->task = kthread_run(worker_loop, worker,
|
|
|
+ "btrfs-%s-%d", workers->name,
|
|
|
+ workers->num_workers + 1);
|
|
|
+ if (IS_ERR(worker->task)) {
|
|
|
+ ret = PTR_ERR(worker->task);
|
|
|
+ kfree(worker);
|
|
|
+ goto fail;
|
|
|
}
|
|
|
+ spin_lock_irq(&workers->lock);
|
|
|
+ list_add_tail(&worker->worker_list, &workers->idle_list);
|
|
|
+ worker->idle = 1;
|
|
|
+ workers->num_workers++;
|
|
|
+ workers->num_workers_starting--;
|
|
|
+ WARN_ON(workers->num_workers_starting < 0);
|
|
|
+ spin_unlock_irq(&workers->lock);
|
|
|
+
|
|
|
return 0;
|
|
|
fail:
|
|
|
- btrfs_stop_workers(workers);
|
|
|
+ spin_lock_irq(&workers->lock);
|
|
|
+ workers->num_workers_starting--;
|
|
|
+ spin_unlock_irq(&workers->lock);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
|
|
|
+int btrfs_start_workers(struct btrfs_workers *workers)
|
|
|
{
|
|
|
spin_lock_irq(&workers->lock);
|
|
|
- workers->num_workers_starting += num_workers;
|
|
|
+ workers->num_workers_starting++;
|
|
|
spin_unlock_irq(&workers->lock);
|
|
|
- return __btrfs_start_workers(workers, num_workers);
|
|
|
+ return __btrfs_start_workers(workers);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
|
|
|
struct btrfs_worker_thread *worker;
|
|
|
unsigned long flags;
|
|
|
struct list_head *fallback;
|
|
|
+ int ret;
|
|
|
|
|
|
again:
|
|
|
spin_lock_irqsave(&workers->lock, flags);
|
|
@@ -584,7 +578,9 @@ again:
|
|
|
workers->num_workers_starting++;
|
|
|
spin_unlock_irqrestore(&workers->lock, flags);
|
|
|
/* we're below the limit, start another worker */
|
|
|
- __btrfs_start_workers(workers, 1);
|
|
|
+ ret = __btrfs_start_workers(workers);
|
|
|
+ if (ret)
|
|
|
+ goto fallback;
|
|
|
goto again;
|
|
|
}
|
|
|
}
|
|
@@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
|
|
|
/*
|
|
|
* places a struct btrfs_work into the pending queue of one of the kthreads
|
|
|
*/
|
|
|
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
|
|
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
|
|
{
|
|
|
struct btrfs_worker_thread *worker;
|
|
|
unsigned long flags;
|
|
@@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
|
|
|
|
|
/* don't requeue something already on a list */
|
|
|
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
|
|
|
- goto out;
|
|
|
+ return;
|
|
|
|
|
|
worker = find_worker(workers);
|
|
|
if (workers->ordered) {
|
|
@@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
|
|
if (wake)
|
|
|
wake_up_process(worker->task);
|
|
|
spin_unlock_irqrestore(&worker->lock, flags);
|
|
|
-
|
|
|
-out:
|
|
|
- return 0;
|
|
|
}
|