|
@@ -339,7 +339,7 @@ static int bch_allocator_thread(void *arg)
|
|
|
allocator_wait(ca, !fifo_full(&ca->free));
|
|
|
|
|
|
fifo_push(&ca->free, bucket);
|
|
|
- closure_wake_up(&ca->set->bucket_wait);
|
|
|
+ wake_up(&ca->set->bucket_wait);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -365,16 +365,41 @@ static int bch_allocator_thread(void *arg)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
|
|
|
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
|
|
|
{
|
|
|
- long r = -1;
|
|
|
-again:
|
|
|
+ DEFINE_WAIT(w);
|
|
|
+ struct bucket *b;
|
|
|
+ long r;
|
|
|
+
|
|
|
+ /* fastpath */
|
|
|
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
|
|
|
+ fifo_pop(&ca->free, r);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!wait)
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
|
|
|
+ fifo_pop(&ca->free, r);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ prepare_to_wait(&ca->set->bucket_wait, &w,
|
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
|
+
|
|
|
+ mutex_unlock(&ca->set->bucket_lock);
|
|
|
+ schedule();
|
|
|
+ mutex_lock(&ca->set->bucket_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ finish_wait(&ca->set->bucket_wait, &w);
|
|
|
+out:
|
|
|
wake_up_process(ca->alloc_thread);
|
|
|
|
|
|
- if (fifo_used(&ca->free) > ca->watermark[watermark] &&
|
|
|
- fifo_pop(&ca->free, r)) {
|
|
|
- struct bucket *b = ca->buckets + r;
|
|
|
#ifdef CONFIG_BCACHE_EDEBUG
|
|
|
+ {
|
|
|
size_t iter;
|
|
|
long i;
|
|
|
|
|
@@ -387,36 +412,23 @@ again:
|
|
|
BUG_ON(i == r);
|
|
|
fifo_for_each(i, &ca->unused, iter)
|
|
|
BUG_ON(i == r);
|
|
|
-#endif
|
|
|
- BUG_ON(atomic_read(&b->pin) != 1);
|
|
|
-
|
|
|
- SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
|
|
|
-
|
|
|
- if (watermark <= WATERMARK_METADATA) {
|
|
|
- SET_GC_MARK(b, GC_MARK_METADATA);
|
|
|
- b->prio = BTREE_PRIO;
|
|
|
- } else {
|
|
|
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
|
|
|
- b->prio = INITIAL_PRIO;
|
|
|
- }
|
|
|
-
|
|
|
- return r;
|
|
|
}
|
|
|
+#endif
|
|
|
+ b = ca->buckets + r;
|
|
|
|
|
|
- trace_bcache_alloc_fail(ca);
|
|
|
+ BUG_ON(atomic_read(&b->pin) != 1);
|
|
|
|
|
|
- if (cl) {
|
|
|
- closure_wait(&ca->set->bucket_wait, cl);
|
|
|
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
|
|
|
|
|
|
- if (closure_blocking(cl)) {
|
|
|
- mutex_unlock(&ca->set->bucket_lock);
|
|
|
- closure_sync(cl);
|
|
|
- mutex_lock(&ca->set->bucket_lock);
|
|
|
- goto again;
|
|
|
- }
|
|
|
+ if (watermark <= WATERMARK_METADATA) {
|
|
|
+ SET_GC_MARK(b, GC_MARK_METADATA);
|
|
|
+ b->prio = BTREE_PRIO;
|
|
|
+ } else {
|
|
|
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
|
|
|
+ b->prio = INITIAL_PRIO;
|
|
|
}
|
|
|
|
|
|
- return -1;
|
|
|
+ return r;
|
|
|
}
|
|
|
|
|
|
void bch_bucket_free(struct cache_set *c, struct bkey *k)
|
|
@@ -433,7 +445,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
|
|
|
}
|
|
|
|
|
|
int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
|
|
|
- struct bkey *k, int n, struct closure *cl)
|
|
|
+ struct bkey *k, int n, bool wait)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
@@ -446,7 +458,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
struct cache *ca = c->cache_by_alloc[i];
|
|
|
- long b = bch_bucket_alloc(ca, watermark, cl);
|
|
|
+ long b = bch_bucket_alloc(ca, watermark, wait);
|
|
|
|
|
|
if (b == -1)
|
|
|
goto err;
|
|
@@ -466,11 +478,11 @@ err:
|
|
|
}
|
|
|
|
|
|
int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
|
|
|
- struct bkey *k, int n, struct closure *cl)
|
|
|
+ struct bkey *k, int n, bool wait)
|
|
|
{
|
|
|
int ret;
|
|
|
mutex_lock(&c->bucket_lock);
|
|
|
- ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
|
|
|
+ ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
|
|
|
mutex_unlock(&c->bucket_lock);
|
|
|
return ret;
|
|
|
}
|