|
@@ -251,6 +251,20 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
|
|
|
}
|
|
|
|
|
|
+void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
|
|
|
+ u64 nodeid, u64 nlookup)
|
|
|
+{
|
|
|
+ forget->forget_one.nodeid = nodeid;
|
|
|
+ forget->forget_one.nlookup = nlookup;
|
|
|
+
|
|
|
+ spin_lock(&fc->lock);
|
|
|
+ fc->forget_list_tail->next = forget;
|
|
|
+ fc->forget_list_tail = forget;
|
|
|
+ wake_up(&fc->waitq);
|
|
|
+ kill_fasync(&fc->fasync, SIGIO, POLL_IN);
|
|
|
+ spin_unlock(&fc->lock);
|
|
|
+}
|
|
|
+
|
|
|
static void flush_bg_queue(struct fuse_conn *fc)
|
|
|
{
|
|
|
while (fc->active_background < fc->max_background &&
|
|
@@ -438,12 +452,6 @@ static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
-{
|
|
|
- req->isreply = 0;
|
|
|
- fuse_request_send_nowait(fc, req);
|
|
|
-}
|
|
|
-
|
|
|
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
|
|
|
{
|
|
|
req->isreply = 1;
|
|
@@ -896,9 +904,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static int forget_pending(struct fuse_conn *fc)
|
|
|
+{
|
|
|
+ return fc->forget_list_head.next != NULL;
|
|
|
+}
|
|
|
+
|
|
|
static int request_pending(struct fuse_conn *fc)
|
|
|
{
|
|
|
- return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
|
|
|
+ return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
|
|
|
+ forget_pending(fc);
|
|
|
}
|
|
|
|
|
|
/* Wait until a request is available on the pending list */
|
|
@@ -960,6 +974,120 @@ __releases(fc->lock)
|
|
|
return err ? err : reqsize;
|
|
|
}
|
|
|
|
|
|
+static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
|
|
|
+ unsigned max,
|
|
|
+ unsigned *countp)
|
|
|
+{
|
|
|
+ struct fuse_forget_link *head = fc->forget_list_head.next;
|
|
|
+ struct fuse_forget_link **newhead = &head;
|
|
|
+ unsigned count;
|
|
|
+
|
|
|
+ for (count = 0; *newhead != NULL && count < max; count++)
|
|
|
+ newhead = &(*newhead)->next;
|
|
|
+
|
|
|
+ fc->forget_list_head.next = *newhead;
|
|
|
+ *newhead = NULL;
|
|
|
+ if (fc->forget_list_head.next == NULL)
|
|
|
+ fc->forget_list_tail = &fc->forget_list_head;
|
|
|
+
|
|
|
+ if (countp != NULL)
|
|
|
+ *countp = count;
|
|
|
+
|
|
|
+ return head;
|
|
|
+}
|
|
|
+
|
|
|
+static int fuse_read_single_forget(struct fuse_conn *fc,
|
|
|
+ struct fuse_copy_state *cs,
|
|
|
+ size_t nbytes)
|
|
|
+__releases(fc->lock)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
|
|
|
+ struct fuse_forget_in arg = {
|
|
|
+ .nlookup = forget->forget_one.nlookup,
|
|
|
+ };
|
|
|
+ struct fuse_in_header ih = {
|
|
|
+ .opcode = FUSE_FORGET,
|
|
|
+ .nodeid = forget->forget_one.nodeid,
|
|
|
+ .unique = fuse_get_unique(fc),
|
|
|
+ .len = sizeof(ih) + sizeof(arg),
|
|
|
+ };
|
|
|
+
|
|
|
+ spin_unlock(&fc->lock);
|
|
|
+ kfree(forget);
|
|
|
+ if (nbytes < ih.len)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ err = fuse_copy_one(cs, &ih, sizeof(ih));
|
|
|
+ if (!err)
|
|
|
+ err = fuse_copy_one(cs, &arg, sizeof(arg));
|
|
|
+ fuse_copy_finish(cs);
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ return ih.len;
|
|
|
+}
|
|
|
+
|
|
|
+static int fuse_read_batch_forget(struct fuse_conn *fc,
|
|
|
+ struct fuse_copy_state *cs, size_t nbytes)
|
|
|
+__releases(fc->lock)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ unsigned max_forgets;
|
|
|
+ unsigned count;
|
|
|
+ struct fuse_forget_link *head;
|
|
|
+ struct fuse_batch_forget_in arg = { .count = 0 };
|
|
|
+ struct fuse_in_header ih = {
|
|
|
+ .opcode = FUSE_BATCH_FORGET,
|
|
|
+ .unique = fuse_get_unique(fc),
|
|
|
+ .len = sizeof(ih) + sizeof(arg),
|
|
|
+ };
|
|
|
+
|
|
|
+ if (nbytes < ih.len) {
|
|
|
+ spin_unlock(&fc->lock);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
|
|
|
+ head = dequeue_forget(fc, max_forgets, &count);
|
|
|
+ spin_unlock(&fc->lock);
|
|
|
+
|
|
|
+ arg.count = count;
|
|
|
+ ih.len += count * sizeof(struct fuse_forget_one);
|
|
|
+ err = fuse_copy_one(cs, &ih, sizeof(ih));
|
|
|
+ if (!err)
|
|
|
+ err = fuse_copy_one(cs, &arg, sizeof(arg));
|
|
|
+
|
|
|
+ while (head) {
|
|
|
+ struct fuse_forget_link *forget = head;
|
|
|
+
|
|
|
+ if (!err) {
|
|
|
+ err = fuse_copy_one(cs, &forget->forget_one,
|
|
|
+ sizeof(forget->forget_one));
|
|
|
+ }
|
|
|
+ head = forget->next;
|
|
|
+ kfree(forget);
|
|
|
+ }
|
|
|
+
|
|
|
+ fuse_copy_finish(cs);
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ return ih.len;
|
|
|
+}
|
|
|
+
|
|
|
+static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
|
|
|
+ size_t nbytes)
|
|
|
+__releases(fc->lock)
|
|
|
+{
|
|
|
+ if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
|
|
|
+ return fuse_read_single_forget(fc, cs, nbytes);
|
|
|
+ else
|
|
|
+ return fuse_read_batch_forget(fc, cs, nbytes);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Read a single request into the userspace filesystem's buffer. This
|
|
|
* function waits until a request is available, then removes it from
|
|
@@ -998,6 +1126,14 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
|
|
|
return fuse_read_interrupt(fc, cs, nbytes, req);
|
|
|
}
|
|
|
|
|
|
+ if (forget_pending(fc)) {
|
|
|
+ if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
|
|
|
+ return fuse_read_forget(fc, cs, nbytes);
|
|
|
+
|
|
|
+ if (fc->forget_batch <= -8)
|
|
|
+ fc->forget_batch = 16;
|
|
|
+ }
|
|
|
+
|
|
|
req = list_entry(fc->pending.next, struct fuse_req, list);
|
|
|
req->state = FUSE_REQ_READING;
|
|
|
list_move(&req->list, &fc->io);
|
|
@@ -1090,7 +1226,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
|
|
|
if (!fc)
|
|
|
return -EPERM;
|
|
|
|
|
|
- bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
|
|
|
+ bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
|
|
|
if (!bufs)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1626,7 +1762,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
|
|
|
if (!fc)
|
|
|
return -EPERM;
|
|
|
|
|
|
- bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
|
|
|
+ bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
|
|
|
if (!bufs)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1770,6 +1906,8 @@ __acquires(fc->lock)
|
|
|
flush_bg_queue(fc);
|
|
|
end_requests(fc, &fc->pending);
|
|
|
end_requests(fc, &fc->processing);
|
|
|
+ while (forget_pending(fc))
|
|
|
+ kfree(dequeue_forget(fc, 1, NULL));
|
|
|
}
|
|
|
|
|
|
/*
|