|
@@ -71,6 +71,36 @@ static void send_op(struct plock_op *op)
|
|
|
wake_up(&send_wq);
|
|
|
}
|
|
|
|
|
|
+/* If a process was killed while waiting for the only plock on a file,
|
|
|
+ locks_remove_posix will not see any lock on the file so it won't
|
|
|
+ send an unlock-close to us to pass on to userspace to clean up the
|
|
|
+ abandoned waiter. So, we have to insert the unlock-close when the
|
|
|
+ lock call is interrupted. */
|
|
|
+
|
|
|
+static void do_unlock_close(struct dlm_ls *ls, u64 number,
|
|
|
+ struct file *file, struct file_lock *fl)
|
|
|
+{
|
|
|
+ struct plock_op *op;
|
|
|
+
|
|
|
+ op = kzalloc(sizeof(*op), GFP_NOFS);
|
|
|
+ if (!op)
|
|
|
+ return;
|
|
|
+
|
|
|
+ op->info.optype = DLM_PLOCK_OP_UNLOCK;
|
|
|
+ op->info.pid = fl->fl_pid;
|
|
|
+ op->info.fsid = ls->ls_global_id;
|
|
|
+ op->info.number = number;
|
|
|
+ op->info.start = 0;
|
|
|
+ op->info.end = OFFSET_MAX;
|
|
|
+ if (fl->fl_lmops && fl->fl_lmops->fl_grant)
|
|
|
+ op->info.owner = (__u64) fl->fl_pid;
|
|
|
+ else
|
|
|
+ op->info.owner = (__u64)(long) fl->fl_owner;
|
|
|
+
|
|
|
+ op->info.flags |= DLM_PLOCK_FL_CLOSE;
|
|
|
+ send_op(op);
|
|
|
+}
|
|
|
+
|
|
|
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
|
|
int cmd, struct file_lock *fl)
|
|
|
{
|
|
@@ -114,9 +144,19 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
|
|
|
|
|
send_op(op);
|
|
|
|
|
|
- if (xop->callback == NULL)
|
|
|
- wait_event(recv_wq, (op->done != 0));
|
|
|
- else {
|
|
|
+ if (xop->callback == NULL) {
|
|
|
+ rv = wait_event_killable(recv_wq, (op->done != 0));
|
|
|
+ if (rv == -ERESTARTSYS) {
|
|
|
+ log_debug(ls, "dlm_posix_lock: wait killed %llx",
|
|
|
+ (unsigned long long)number);
|
|
|
+ spin_lock(&ops_lock);
|
|
|
+ list_del(&op->list);
|
|
|
+ spin_unlock(&ops_lock);
|
|
|
+ kfree(xop);
|
|
|
+ do_unlock_close(ls, number, file, fl);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
rv = FILE_LOCK_DEFERRED;
|
|
|
goto out;
|
|
|
}
|
|
@@ -233,6 +273,13 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
|
|
|
else
|
|
|
op->info.owner = (__u64)(long) fl->fl_owner;
|
|
|
|
|
|
+ if (fl->fl_flags & FL_CLOSE) {
|
|
|
+ op->info.flags |= DLM_PLOCK_FL_CLOSE;
|
|
|
+ send_op(op);
|
|
|
+ rv = 0;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
send_op(op);
|
|
|
wait_event(recv_wq, (op->done != 0));
|
|
|
|
|
@@ -334,7 +381,10 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
|
|
|
spin_lock(&ops_lock);
|
|
|
if (!list_empty(&send_list)) {
|
|
|
op = list_entry(send_list.next, struct plock_op, list);
|
|
|
- list_move(&op->list, &recv_list);
|
|
|
+ if (op->info.flags & DLM_PLOCK_FL_CLOSE)
|
|
|
+ list_del(&op->list);
|
|
|
+ else
|
|
|
+ list_move(&op->list, &recv_list);
|
|
|
memcpy(&info, &op->info, sizeof(info));
|
|
|
}
|
|
|
spin_unlock(&ops_lock);
|
|
@@ -342,6 +392,13 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
|
|
|
if (!op)
|
|
|
return -EAGAIN;
|
|
|
|
|
|
+ /* there is no need to get a reply from userspace for unlocks
|
|
|
+ that were generated by the vfs cleaning up for a close
|
|
|
+ (the process did not make an unlock call). */
|
|
|
+
|
|
|
+ if (op->info.flags & DLM_PLOCK_FL_CLOSE)
|
|
|
+ kfree(op);
|
|
|
+
|
|
|
if (copy_to_user(u, &info, sizeof(info)))
|
|
|
return -EFAULT;
|
|
|
return sizeof(info);
|