plock.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
  3. *
  4. * This copyrighted material is made available to anyone wishing to use,
  5. * modify, copy, or redistribute it subject to the terms and conditions
  6. * of the GNU General Public License version 2.
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/poll.h>
  11. #include <linux/dlm.h>
  12. #include <linux/dlm_plock.h>
  13. #include "dlm_internal.h"
  14. #include "lockspace.h"
  15. static spinlock_t ops_lock;
  16. static struct list_head send_list;
  17. static struct list_head recv_list;
  18. static wait_queue_head_t send_wq;
  19. static wait_queue_head_t recv_wq;
  20. struct plock_op {
  21. struct list_head list;
  22. int done;
  23. struct dlm_plock_info info;
  24. };
  25. struct plock_xop {
  26. struct plock_op xop;
  27. void *callback;
  28. void *fl;
  29. void *file;
  30. struct file_lock flc;
  31. };
  32. static inline void set_version(struct dlm_plock_info *info)
  33. {
  34. info->version[0] = DLM_PLOCK_VERSION_MAJOR;
  35. info->version[1] = DLM_PLOCK_VERSION_MINOR;
  36. info->version[2] = DLM_PLOCK_VERSION_PATCH;
  37. }
  38. static int check_version(struct dlm_plock_info *info)
  39. {
  40. if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
  41. (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
  42. log_print("plock device version mismatch: "
  43. "kernel (%u.%u.%u), user (%u.%u.%u)",
  44. DLM_PLOCK_VERSION_MAJOR,
  45. DLM_PLOCK_VERSION_MINOR,
  46. DLM_PLOCK_VERSION_PATCH,
  47. info->version[0],
  48. info->version[1],
  49. info->version[2]);
  50. return -EINVAL;
  51. }
  52. return 0;
  53. }
  54. static void send_op(struct plock_op *op)
  55. {
  56. set_version(&op->info);
  57. INIT_LIST_HEAD(&op->list);
  58. spin_lock(&ops_lock);
  59. list_add_tail(&op->list, &send_list);
  60. spin_unlock(&ops_lock);
  61. wake_up(&send_wq);
  62. }
  63. int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
  64. int cmd, struct file_lock *fl)
  65. {
  66. struct dlm_ls *ls;
  67. struct plock_op *op;
  68. struct plock_xop *xop;
  69. int rv;
  70. ls = dlm_find_lockspace_local(lockspace);
  71. if (!ls)
  72. return -EINVAL;
  73. xop = kzalloc(sizeof(*xop), GFP_KERNEL);
  74. if (!xop) {
  75. rv = -ENOMEM;
  76. goto out;
  77. }
  78. op = &xop->xop;
  79. op->info.optype = DLM_PLOCK_OP_LOCK;
  80. op->info.pid = fl->fl_pid;
  81. op->info.ex = (fl->fl_type == F_WRLCK);
  82. op->info.wait = IS_SETLKW(cmd);
  83. op->info.fsid = ls->ls_global_id;
  84. op->info.number = number;
  85. op->info.start = fl->fl_start;
  86. op->info.end = fl->fl_end;
  87. if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
  88. /* fl_owner is lockd which doesn't distinguish
  89. processes on the nfs client */
  90. op->info.owner = (__u64) fl->fl_pid;
  91. xop->callback = fl->fl_lmops->fl_grant;
  92. locks_init_lock(&xop->flc);
  93. locks_copy_lock(&xop->flc, fl);
  94. xop->fl = fl;
  95. xop->file = file;
  96. } else {
  97. op->info.owner = (__u64)(long) fl->fl_owner;
  98. xop->callback = NULL;
  99. }
  100. send_op(op);
  101. if (xop->callback == NULL)
  102. wait_event(recv_wq, (op->done != 0));
  103. else {
  104. rv = FILE_LOCK_DEFERRED;
  105. goto out;
  106. }
  107. spin_lock(&ops_lock);
  108. if (!list_empty(&op->list)) {
  109. log_error(ls, "dlm_posix_lock: op on list %llx",
  110. (unsigned long long)number);
  111. list_del(&op->list);
  112. }
  113. spin_unlock(&ops_lock);
  114. rv = op->info.rv;
  115. if (!rv) {
  116. if (posix_lock_file_wait(file, fl) < 0)
  117. log_error(ls, "dlm_posix_lock: vfs lock error %llx",
  118. (unsigned long long)number);
  119. }
  120. kfree(xop);
  121. out:
  122. dlm_put_lockspace(ls);
  123. return rv;
  124. }
  125. EXPORT_SYMBOL_GPL(dlm_posix_lock);
  126. /* Returns failure iff a succesful lock operation should be canceled */
  127. static int dlm_plock_callback(struct plock_op *op)
  128. {
  129. struct file *file;
  130. struct file_lock *fl;
  131. struct file_lock *flc;
  132. int (*notify)(void *, void *, int) = NULL;
  133. struct plock_xop *xop = (struct plock_xop *)op;
  134. int rv = 0;
  135. spin_lock(&ops_lock);
  136. if (!list_empty(&op->list)) {
  137. log_print("dlm_plock_callback: op on list %llx",
  138. (unsigned long long)op->info.number);
  139. list_del(&op->list);
  140. }
  141. spin_unlock(&ops_lock);
  142. /* check if the following 2 are still valid or make a copy */
  143. file = xop->file;
  144. flc = &xop->flc;
  145. fl = xop->fl;
  146. notify = xop->callback;
  147. if (op->info.rv) {
  148. notify(flc, NULL, op->info.rv);
  149. goto out;
  150. }
  151. /* got fs lock; bookkeep locally as well: */
  152. flc->fl_flags &= ~FL_SLEEP;
  153. if (posix_lock_file(file, flc, NULL)) {
  154. /*
  155. * This can only happen in the case of kmalloc() failure.
  156. * The filesystem's own lock is the authoritative lock,
  157. * so a failure to get the lock locally is not a disaster.
  158. * As long as the fs cannot reliably cancel locks (especially
  159. * in a low-memory situation), we're better off ignoring
  160. * this failure than trying to recover.
  161. */
  162. log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
  163. (unsigned long long)op->info.number, file, fl);
  164. }
  165. rv = notify(flc, NULL, 0);
  166. if (rv) {
  167. /* XXX: We need to cancel the fs lock here: */
  168. log_print("dlm_plock_callback: lock granted after lock request "
  169. "failed; dangling lock!\n");
  170. goto out;
  171. }
  172. out:
  173. kfree(xop);
  174. return rv;
  175. }
  176. int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
  177. struct file_lock *fl)
  178. {
  179. struct dlm_ls *ls;
  180. struct plock_op *op;
  181. int rv;
  182. ls = dlm_find_lockspace_local(lockspace);
  183. if (!ls)
  184. return -EINVAL;
  185. op = kzalloc(sizeof(*op), GFP_KERNEL);
  186. if (!op) {
  187. rv = -ENOMEM;
  188. goto out;
  189. }
  190. if (posix_lock_file_wait(file, fl) < 0)
  191. log_error(ls, "dlm_posix_unlock: vfs unlock error %llx",
  192. (unsigned long long)number);
  193. op->info.optype = DLM_PLOCK_OP_UNLOCK;
  194. op->info.pid = fl->fl_pid;
  195. op->info.fsid = ls->ls_global_id;
  196. op->info.number = number;
  197. op->info.start = fl->fl_start;
  198. op->info.end = fl->fl_end;
  199. if (fl->fl_lmops && fl->fl_lmops->fl_grant)
  200. op->info.owner = (__u64) fl->fl_pid;
  201. else
  202. op->info.owner = (__u64)(long) fl->fl_owner;
  203. send_op(op);
  204. wait_event(recv_wq, (op->done != 0));
  205. spin_lock(&ops_lock);
  206. if (!list_empty(&op->list)) {
  207. log_error(ls, "dlm_posix_unlock: op on list %llx",
  208. (unsigned long long)number);
  209. list_del(&op->list);
  210. }
  211. spin_unlock(&ops_lock);
  212. rv = op->info.rv;
  213. if (rv == -ENOENT)
  214. rv = 0;
  215. kfree(op);
  216. out:
  217. dlm_put_lockspace(ls);
  218. return rv;
  219. }
  220. EXPORT_SYMBOL_GPL(dlm_posix_unlock);
  221. int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
  222. struct file_lock *fl)
  223. {
  224. struct dlm_ls *ls;
  225. struct plock_op *op;
  226. int rv;
  227. ls = dlm_find_lockspace_local(lockspace);
  228. if (!ls)
  229. return -EINVAL;
  230. op = kzalloc(sizeof(*op), GFP_KERNEL);
  231. if (!op) {
  232. rv = -ENOMEM;
  233. goto out;
  234. }
  235. op->info.optype = DLM_PLOCK_OP_GET;
  236. op->info.pid = fl->fl_pid;
  237. op->info.ex = (fl->fl_type == F_WRLCK);
  238. op->info.fsid = ls->ls_global_id;
  239. op->info.number = number;
  240. op->info.start = fl->fl_start;
  241. op->info.end = fl->fl_end;
  242. if (fl->fl_lmops && fl->fl_lmops->fl_grant)
  243. op->info.owner = (__u64) fl->fl_pid;
  244. else
  245. op->info.owner = (__u64)(long) fl->fl_owner;
  246. send_op(op);
  247. wait_event(recv_wq, (op->done != 0));
  248. spin_lock(&ops_lock);
  249. if (!list_empty(&op->list)) {
  250. log_error(ls, "dlm_posix_get: op on list %llx",
  251. (unsigned long long)number);
  252. list_del(&op->list);
  253. }
  254. spin_unlock(&ops_lock);
  255. /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
  256. -ENOENT if there are no locks on the file */
  257. rv = op->info.rv;
  258. fl->fl_type = F_UNLCK;
  259. if (rv == -ENOENT)
  260. rv = 0;
  261. else if (rv > 0) {
  262. fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
  263. fl->fl_pid = op->info.pid;
  264. fl->fl_start = op->info.start;
  265. fl->fl_end = op->info.end;
  266. rv = 0;
  267. }
  268. kfree(op);
  269. out:
  270. dlm_put_lockspace(ls);
  271. return rv;
  272. }
  273. EXPORT_SYMBOL_GPL(dlm_posix_get);
  274. /* a read copies out one plock request from the send list */
  275. static ssize_t dev_read(struct file *file, char __user *u, size_t count,
  276. loff_t *ppos)
  277. {
  278. struct dlm_plock_info info;
  279. struct plock_op *op = NULL;
  280. if (count < sizeof(info))
  281. return -EINVAL;
  282. spin_lock(&ops_lock);
  283. if (!list_empty(&send_list)) {
  284. op = list_entry(send_list.next, struct plock_op, list);
  285. list_move(&op->list, &recv_list);
  286. memcpy(&info, &op->info, sizeof(info));
  287. }
  288. spin_unlock(&ops_lock);
  289. if (!op)
  290. return -EAGAIN;
  291. if (copy_to_user(u, &info, sizeof(info)))
  292. return -EFAULT;
  293. return sizeof(info);
  294. }
  295. /* a write copies in one plock result that should match a plock_op
  296. on the recv list */
  297. static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
  298. loff_t *ppos)
  299. {
  300. struct dlm_plock_info info;
  301. struct plock_op *op;
  302. int found = 0;
  303. if (count != sizeof(info))
  304. return -EINVAL;
  305. if (copy_from_user(&info, u, sizeof(info)))
  306. return -EFAULT;
  307. if (check_version(&info))
  308. return -EINVAL;
  309. spin_lock(&ops_lock);
  310. list_for_each_entry(op, &recv_list, list) {
  311. if (op->info.fsid == info.fsid && op->info.number == info.number &&
  312. op->info.owner == info.owner) {
  313. list_del_init(&op->list);
  314. found = 1;
  315. op->done = 1;
  316. memcpy(&op->info, &info, sizeof(info));
  317. break;
  318. }
  319. }
  320. spin_unlock(&ops_lock);
  321. if (found) {
  322. struct plock_xop *xop;
  323. xop = (struct plock_xop *)op;
  324. if (xop->callback)
  325. dlm_plock_callback(op);
  326. else
  327. wake_up(&recv_wq);
  328. } else
  329. log_print("dev_write no op %x %llx", info.fsid,
  330. (unsigned long long)info.number);
  331. return count;
  332. }
  333. static unsigned int dev_poll(struct file *file, poll_table *wait)
  334. {
  335. unsigned int mask = 0;
  336. poll_wait(file, &send_wq, wait);
  337. spin_lock(&ops_lock);
  338. if (!list_empty(&send_list))
  339. mask = POLLIN | POLLRDNORM;
  340. spin_unlock(&ops_lock);
  341. return mask;
  342. }
  343. static const struct file_operations dev_fops = {
  344. .read = dev_read,
  345. .write = dev_write,
  346. .poll = dev_poll,
  347. .owner = THIS_MODULE
  348. };
  349. static struct miscdevice plock_dev_misc = {
  350. .minor = MISC_DYNAMIC_MINOR,
  351. .name = DLM_PLOCK_MISC_NAME,
  352. .fops = &dev_fops
  353. };
  354. int dlm_plock_init(void)
  355. {
  356. int rv;
  357. spin_lock_init(&ops_lock);
  358. INIT_LIST_HEAD(&send_list);
  359. INIT_LIST_HEAD(&recv_list);
  360. init_waitqueue_head(&send_wq);
  361. init_waitqueue_head(&recv_wq);
  362. rv = misc_register(&plock_dev_misc);
  363. if (rv)
  364. log_print("dlm_plock_init: misc_register failed %d", rv);
  365. return rv;
  366. }
  367. void dlm_plock_exit(void)
  368. {
  369. if (misc_deregister(&plock_dev_misc) < 0)
  370. log_print("dlm_plock_exit: misc_deregister failed");
  371. }