namei.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. /* CacheFiles path walking and related routines
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/file.h>
  14. #include <linux/fs.h>
  15. #include <linux/fsnotify.h>
  16. #include <linux/quotaops.h>
  17. #include <linux/xattr.h>
  18. #include <linux/mount.h>
  19. #include <linux/namei.h>
  20. #include <linux/security.h>
  21. #include <linux/slab.h>
  22. #include "internal.h"
  23. #define CACHEFILES_KEYBUF_SIZE 512
  24. /*
  25. * dump debugging info about an object
  26. */
  27. static noinline
  28. void __cachefiles_printk_object(struct cachefiles_object *object,
  29. const char *prefix,
  30. u8 *keybuf)
  31. {
  32. struct fscache_cookie *cookie;
  33. unsigned keylen, loop;
  34. printk(KERN_ERR "%sobject: OBJ%x\n",
  35. prefix, object->fscache.debug_id);
  36. printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
  37. prefix, fscache_object_states[object->fscache.state],
  38. object->fscache.flags, object->fscache.work.flags,
  39. object->fscache.events,
  40. object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
  41. printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
  42. prefix, object->fscache.n_ops, object->fscache.n_in_progress,
  43. object->fscache.n_exclusive);
  44. printk(KERN_ERR "%sparent=%p\n",
  45. prefix, object->fscache.parent);
  46. spin_lock(&object->fscache.lock);
  47. cookie = object->fscache.cookie;
  48. if (cookie) {
  49. printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
  50. prefix,
  51. object->fscache.cookie,
  52. object->fscache.cookie->parent,
  53. object->fscache.cookie->netfs_data,
  54. object->fscache.cookie->flags);
  55. if (keybuf)
  56. keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
  57. CACHEFILES_KEYBUF_SIZE);
  58. else
  59. keylen = 0;
  60. } else {
  61. printk(KERN_ERR "%scookie=NULL\n", prefix);
  62. keylen = 0;
  63. }
  64. spin_unlock(&object->fscache.lock);
  65. if (keylen) {
  66. printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
  67. for (loop = 0; loop < keylen; loop++)
  68. printk("%02x", keybuf[loop]);
  69. printk("'\n");
  70. }
  71. }
  72. /*
  73. * dump debugging info about a pair of objects
  74. */
  75. static noinline void cachefiles_printk_object(struct cachefiles_object *object,
  76. struct cachefiles_object *xobject)
  77. {
  78. u8 *keybuf;
  79. keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
  80. if (object)
  81. __cachefiles_printk_object(object, "", keybuf);
  82. if (xobject)
  83. __cachefiles_printk_object(xobject, "x", keybuf);
  84. kfree(keybuf);
  85. }
  86. /*
  87. * mark the owner of a dentry, if there is one, to indicate that that dentry
  88. * has been preemptively deleted
  89. * - the caller must hold the i_mutex on the dentry's parent as required to
  90. * call vfs_unlink(), vfs_rmdir() or vfs_rename()
  91. */
  92. static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
  93. struct dentry *dentry)
  94. {
  95. struct cachefiles_object *object;
  96. struct rb_node *p;
  97. _enter(",'%*.*s'",
  98. dentry->d_name.len, dentry->d_name.len, dentry->d_name.name);
  99. write_lock(&cache->active_lock);
  100. p = cache->active_nodes.rb_node;
  101. while (p) {
  102. object = rb_entry(p, struct cachefiles_object, active_node);
  103. if (object->dentry > dentry)
  104. p = p->rb_left;
  105. else if (object->dentry < dentry)
  106. p = p->rb_right;
  107. else
  108. goto found_dentry;
  109. }
  110. write_unlock(&cache->active_lock);
  111. _leave(" [no owner]");
  112. return;
  113. /* found the dentry for */
  114. found_dentry:
  115. kdebug("preemptive burial: OBJ%x [%s] %p",
  116. object->fscache.debug_id,
  117. fscache_object_states[object->fscache.state],
  118. dentry);
  119. if (object->fscache.state < FSCACHE_OBJECT_DYING) {
  120. printk(KERN_ERR "\n");
  121. printk(KERN_ERR "CacheFiles: Error:"
  122. " Can't preemptively bury live object\n");
  123. cachefiles_printk_object(object, NULL);
  124. } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
  125. printk(KERN_ERR "CacheFiles: Error:"
  126. " Object already preemptively buried\n");
  127. }
  128. write_unlock(&cache->active_lock);
  129. _leave(" [owner marked]");
  130. }
  131. /*
  132. * record the fact that an object is now active
  133. */
  134. static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
  135. struct cachefiles_object *object)
  136. {
  137. struct cachefiles_object *xobject;
  138. struct rb_node **_p, *_parent = NULL;
  139. struct dentry *dentry;
  140. _enter(",%p", object);
  141. try_again:
  142. write_lock(&cache->active_lock);
  143. if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
  144. printk(KERN_ERR "CacheFiles: Error: Object already active\n");
  145. cachefiles_printk_object(object, NULL);
  146. BUG();
  147. }
  148. dentry = object->dentry;
  149. _p = &cache->active_nodes.rb_node;
  150. while (*_p) {
  151. _parent = *_p;
  152. xobject = rb_entry(_parent,
  153. struct cachefiles_object, active_node);
  154. ASSERT(xobject != object);
  155. if (xobject->dentry > dentry)
  156. _p = &(*_p)->rb_left;
  157. else if (xobject->dentry < dentry)
  158. _p = &(*_p)->rb_right;
  159. else
  160. goto wait_for_old_object;
  161. }
  162. rb_link_node(&object->active_node, _parent, _p);
  163. rb_insert_color(&object->active_node, &cache->active_nodes);
  164. write_unlock(&cache->active_lock);
  165. _leave(" = 0");
  166. return 0;
  167. /* an old object from a previous incarnation is hogging the slot - we
  168. * need to wait for it to be destroyed */
  169. wait_for_old_object:
  170. if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
  171. printk(KERN_ERR "\n");
  172. printk(KERN_ERR "CacheFiles: Error:"
  173. " Unexpected object collision\n");
  174. cachefiles_printk_object(object, xobject);
  175. BUG();
  176. }
  177. atomic_inc(&xobject->usage);
  178. write_unlock(&cache->active_lock);
  179. if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
  180. wait_queue_head_t *wq;
  181. signed long timeout = 60 * HZ;
  182. wait_queue_t wait;
  183. bool requeue;
  184. /* if the object we're waiting for is queued for processing,
  185. * then just put ourselves on the queue behind it */
  186. if (slow_work_is_queued(&xobject->fscache.work)) {
  187. _debug("queue OBJ%x behind OBJ%x immediately",
  188. object->fscache.debug_id,
  189. xobject->fscache.debug_id);
  190. goto requeue;
  191. }
  192. /* otherwise we sleep until either the object we're waiting for
  193. * is done, or the slow-work facility wants the thread back to
  194. * do other work */
  195. wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
  196. init_wait(&wait);
  197. requeue = false;
  198. do {
  199. prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
  200. if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
  201. break;
  202. requeue = slow_work_sleep_till_thread_needed(
  203. &object->fscache.work, &timeout);
  204. } while (timeout > 0 && !requeue);
  205. finish_wait(wq, &wait);
  206. if (requeue &&
  207. test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
  208. _debug("queue OBJ%x behind OBJ%x after wait",
  209. object->fscache.debug_id,
  210. xobject->fscache.debug_id);
  211. goto requeue;
  212. }
  213. if (timeout <= 0) {
  214. printk(KERN_ERR "\n");
  215. printk(KERN_ERR "CacheFiles: Error: Overlong"
  216. " wait for old active object to go away\n");
  217. cachefiles_printk_object(object, xobject);
  218. goto requeue;
  219. }
  220. }
  221. ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
  222. cache->cache.ops->put_object(&xobject->fscache);
  223. goto try_again;
  224. requeue:
  225. clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
  226. cache->cache.ops->put_object(&xobject->fscache);
  227. _leave(" = -ETIMEDOUT");
  228. return -ETIMEDOUT;
  229. }
  230. /*
  231. * delete an object representation from the cache
  232. * - file backed objects are unlinked
  233. * - directory backed objects are stuffed into the graveyard for userspace to
  234. * delete
  235. * - unlocks the directory mutex
  236. */
  237. static int cachefiles_bury_object(struct cachefiles_cache *cache,
  238. struct dentry *dir,
  239. struct dentry *rep,
  240. bool preemptive)
  241. {
  242. struct dentry *grave, *trap;
  243. char nbuffer[8 + 8 + 1];
  244. int ret;
  245. _enter(",'%*.*s','%*.*s'",
  246. dir->d_name.len, dir->d_name.len, dir->d_name.name,
  247. rep->d_name.len, rep->d_name.len, rep->d_name.name);
  248. _debug("remove %p from %p", rep, dir);
  249. /* non-directories can just be unlinked */
  250. if (!S_ISDIR(rep->d_inode->i_mode)) {
  251. _debug("unlink stale object");
  252. ret = vfs_unlink(dir->d_inode, rep);
  253. if (preemptive)
  254. cachefiles_mark_object_buried(cache, rep);
  255. mutex_unlock(&dir->d_inode->i_mutex);
  256. if (ret == -EIO)
  257. cachefiles_io_error(cache, "Unlink failed");
  258. _leave(" = %d", ret);
  259. return ret;
  260. }
  261. /* directories have to be moved to the graveyard */
  262. _debug("move stale object to graveyard");
  263. mutex_unlock(&dir->d_inode->i_mutex);
  264. try_again:
  265. /* first step is to make up a grave dentry in the graveyard */
  266. sprintf(nbuffer, "%08x%08x",
  267. (uint32_t) get_seconds(),
  268. (uint32_t) atomic_inc_return(&cache->gravecounter));
  269. /* do the multiway lock magic */
  270. trap = lock_rename(cache->graveyard, dir);
  271. /* do some checks before getting the grave dentry */
  272. if (rep->d_parent != dir) {
  273. /* the entry was probably culled when we dropped the parent dir
  274. * lock */
  275. unlock_rename(cache->graveyard, dir);
  276. _leave(" = 0 [culled?]");
  277. return 0;
  278. }
  279. if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
  280. unlock_rename(cache->graveyard, dir);
  281. cachefiles_io_error(cache, "Graveyard no longer a directory");
  282. return -EIO;
  283. }
  284. if (trap == rep) {
  285. unlock_rename(cache->graveyard, dir);
  286. cachefiles_io_error(cache, "May not make directory loop");
  287. return -EIO;
  288. }
  289. if (d_mountpoint(rep)) {
  290. unlock_rename(cache->graveyard, dir);
  291. cachefiles_io_error(cache, "Mountpoint in cache");
  292. return -EIO;
  293. }
  294. grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
  295. if (IS_ERR(grave)) {
  296. unlock_rename(cache->graveyard, dir);
  297. if (PTR_ERR(grave) == -ENOMEM) {
  298. _leave(" = -ENOMEM");
  299. return -ENOMEM;
  300. }
  301. cachefiles_io_error(cache, "Lookup error %ld",
  302. PTR_ERR(grave));
  303. return -EIO;
  304. }
  305. if (grave->d_inode) {
  306. unlock_rename(cache->graveyard, dir);
  307. dput(grave);
  308. grave = NULL;
  309. cond_resched();
  310. goto try_again;
  311. }
  312. if (d_mountpoint(grave)) {
  313. unlock_rename(cache->graveyard, dir);
  314. dput(grave);
  315. cachefiles_io_error(cache, "Mountpoint in graveyard");
  316. return -EIO;
  317. }
  318. /* target should not be an ancestor of source */
  319. if (trap == grave) {
  320. unlock_rename(cache->graveyard, dir);
  321. dput(grave);
  322. cachefiles_io_error(cache, "May not make directory loop");
  323. return -EIO;
  324. }
  325. /* attempt the rename */
  326. ret = vfs_rename(dir->d_inode, rep, cache->graveyard->d_inode, grave);
  327. if (ret != 0 && ret != -ENOMEM)
  328. cachefiles_io_error(cache, "Rename failed with error %d", ret);
  329. if (preemptive)
  330. cachefiles_mark_object_buried(cache, rep);
  331. unlock_rename(cache->graveyard, dir);
  332. dput(grave);
  333. _leave(" = 0");
  334. return 0;
  335. }
  336. /*
  337. * delete an object representation from the cache
  338. */
  339. int cachefiles_delete_object(struct cachefiles_cache *cache,
  340. struct cachefiles_object *object)
  341. {
  342. struct dentry *dir;
  343. int ret;
  344. _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
  345. ASSERT(object->dentry);
  346. ASSERT(object->dentry->d_inode);
  347. ASSERT(object->dentry->d_parent);
  348. dir = dget_parent(object->dentry);
  349. mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
  350. if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
  351. /* object allocation for the same key preemptively deleted this
  352. * object's file so that it could create its own file */
  353. _debug("object preemptively buried");
  354. mutex_unlock(&dir->d_inode->i_mutex);
  355. ret = 0;
  356. } else {
  357. /* we need to check that our parent is _still_ our parent - it
  358. * may have been renamed */
  359. if (dir == object->dentry->d_parent) {
  360. ret = cachefiles_bury_object(cache, dir,
  361. object->dentry, false);
  362. } else {
  363. /* it got moved, presumably by cachefilesd culling it,
  364. * so it's no longer in the key path and we can ignore
  365. * it */
  366. mutex_unlock(&dir->d_inode->i_mutex);
  367. ret = 0;
  368. }
  369. }
  370. dput(dir);
  371. _leave(" = %d", ret);
  372. return ret;
  373. }
  374. /*
  375. * walk from the parent object to the child object through the backing
  376. * filesystem, creating directories as we go
  377. */
  378. int cachefiles_walk_to_object(struct cachefiles_object *parent,
  379. struct cachefiles_object *object,
  380. const char *key,
  381. struct cachefiles_xattr *auxdata)
  382. {
  383. struct cachefiles_cache *cache;
  384. struct dentry *dir, *next = NULL;
  385. unsigned long start;
  386. const char *name;
  387. int ret, nlen;
  388. _enter("OBJ%x{%p},OBJ%x,%s,",
  389. parent->fscache.debug_id, parent->dentry,
  390. object->fscache.debug_id, key);
  391. cache = container_of(parent->fscache.cache,
  392. struct cachefiles_cache, cache);
  393. ASSERT(parent->dentry);
  394. ASSERT(parent->dentry->d_inode);
  395. if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
  396. // TODO: convert file to dir
  397. _leave("looking up in none directory");
  398. return -ENOBUFS;
  399. }
  400. dir = dget(parent->dentry);
  401. advance:
  402. /* attempt to transit the first directory component */
  403. name = key;
  404. nlen = strlen(key);
  405. /* key ends in a double NUL */
  406. key = key + nlen + 1;
  407. if (!*key)
  408. key = NULL;
  409. lookup_again:
  410. /* search the current directory for the element name */
  411. _debug("lookup '%s'", name);
  412. mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
  413. start = jiffies;
  414. next = lookup_one_len(name, dir, nlen);
  415. cachefiles_hist(cachefiles_lookup_histogram, start);
  416. if (IS_ERR(next))
  417. goto lookup_error;
  418. _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative");
  419. if (!key)
  420. object->new = !next->d_inode;
  421. /* if this element of the path doesn't exist, then the lookup phase
  422. * failed, and we can release any readers in the certain knowledge that
  423. * there's nothing for them to actually read */
  424. if (!next->d_inode)
  425. fscache_object_lookup_negative(&object->fscache);
  426. /* we need to create the object if it's negative */
  427. if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
  428. /* index objects and intervening tree levels must be subdirs */
  429. if (!next->d_inode) {
  430. ret = cachefiles_has_space(cache, 1, 0);
  431. if (ret < 0)
  432. goto create_error;
  433. start = jiffies;
  434. ret = vfs_mkdir(dir->d_inode, next, 0);
  435. cachefiles_hist(cachefiles_mkdir_histogram, start);
  436. if (ret < 0)
  437. goto create_error;
  438. ASSERT(next->d_inode);
  439. _debug("mkdir -> %p{%p{ino=%lu}}",
  440. next, next->d_inode, next->d_inode->i_ino);
  441. } else if (!S_ISDIR(next->d_inode->i_mode)) {
  442. kerror("inode %lu is not a directory",
  443. next->d_inode->i_ino);
  444. ret = -ENOBUFS;
  445. goto error;
  446. }
  447. } else {
  448. /* non-index objects start out life as files */
  449. if (!next->d_inode) {
  450. ret = cachefiles_has_space(cache, 1, 0);
  451. if (ret < 0)
  452. goto create_error;
  453. start = jiffies;
  454. ret = vfs_create(dir->d_inode, next, S_IFREG, NULL);
  455. cachefiles_hist(cachefiles_create_histogram, start);
  456. if (ret < 0)
  457. goto create_error;
  458. ASSERT(next->d_inode);
  459. _debug("create -> %p{%p{ino=%lu}}",
  460. next, next->d_inode, next->d_inode->i_ino);
  461. } else if (!S_ISDIR(next->d_inode->i_mode) &&
  462. !S_ISREG(next->d_inode->i_mode)
  463. ) {
  464. kerror("inode %lu is not a file or directory",
  465. next->d_inode->i_ino);
  466. ret = -ENOBUFS;
  467. goto error;
  468. }
  469. }
  470. /* process the next component */
  471. if (key) {
  472. _debug("advance");
  473. mutex_unlock(&dir->d_inode->i_mutex);
  474. dput(dir);
  475. dir = next;
  476. next = NULL;
  477. goto advance;
  478. }
  479. /* we've found the object we were looking for */
  480. object->dentry = next;
  481. /* if we've found that the terminal object exists, then we need to
  482. * check its attributes and delete it if it's out of date */
  483. if (!object->new) {
  484. _debug("validate '%*.*s'",
  485. next->d_name.len, next->d_name.len, next->d_name.name);
  486. ret = cachefiles_check_object_xattr(object, auxdata);
  487. if (ret == -ESTALE) {
  488. /* delete the object (the deleter drops the directory
  489. * mutex) */
  490. object->dentry = NULL;
  491. ret = cachefiles_bury_object(cache, dir, next, true);
  492. dput(next);
  493. next = NULL;
  494. if (ret < 0)
  495. goto delete_error;
  496. _debug("redo lookup");
  497. goto lookup_again;
  498. }
  499. }
  500. /* note that we're now using this object */
  501. ret = cachefiles_mark_object_active(cache, object);
  502. mutex_unlock(&dir->d_inode->i_mutex);
  503. dput(dir);
  504. dir = NULL;
  505. if (ret == -ETIMEDOUT)
  506. goto mark_active_timed_out;
  507. _debug("=== OBTAINED_OBJECT ===");
  508. if (object->new) {
  509. /* attach data to a newly constructed terminal object */
  510. ret = cachefiles_set_object_xattr(object, auxdata);
  511. if (ret < 0)
  512. goto check_error;
  513. } else {
  514. /* always update the atime on an object we've just looked up
  515. * (this is used to keep track of culling, and atimes are only
  516. * updated by read, write and readdir but not lookup or
  517. * open) */
  518. touch_atime(cache->mnt, next);
  519. }
  520. /* open a file interface onto a data file */
  521. if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
  522. if (S_ISREG(object->dentry->d_inode->i_mode)) {
  523. const struct address_space_operations *aops;
  524. ret = -EPERM;
  525. aops = object->dentry->d_inode->i_mapping->a_ops;
  526. if (!aops->bmap)
  527. goto check_error;
  528. object->backer = object->dentry;
  529. } else {
  530. BUG(); // TODO: open file in data-class subdir
  531. }
  532. }
  533. object->new = 0;
  534. fscache_obtained_object(&object->fscache);
  535. _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino);
  536. return 0;
  537. create_error:
  538. _debug("create error %d", ret);
  539. if (ret == -EIO)
  540. cachefiles_io_error(cache, "Create/mkdir failed");
  541. goto error;
  542. mark_active_timed_out:
  543. _debug("mark active timed out");
  544. goto release_dentry;
  545. check_error:
  546. _debug("check error %d", ret);
  547. write_lock(&cache->active_lock);
  548. rb_erase(&object->active_node, &cache->active_nodes);
  549. clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
  550. wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
  551. write_unlock(&cache->active_lock);
  552. release_dentry:
  553. dput(object->dentry);
  554. object->dentry = NULL;
  555. goto error_out;
  556. delete_error:
  557. _debug("delete error %d", ret);
  558. goto error_out2;
  559. lookup_error:
  560. _debug("lookup error %ld", PTR_ERR(next));
  561. ret = PTR_ERR(next);
  562. if (ret == -EIO)
  563. cachefiles_io_error(cache, "Lookup failed");
  564. next = NULL;
  565. error:
  566. mutex_unlock(&dir->d_inode->i_mutex);
  567. dput(next);
  568. error_out2:
  569. dput(dir);
  570. error_out:
  571. _leave(" = error %d", -ret);
  572. return ret;
  573. }
  574. /*
  575. * get a subdirectory
  576. */
  577. struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
  578. struct dentry *dir,
  579. const char *dirname)
  580. {
  581. struct dentry *subdir;
  582. unsigned long start;
  583. int ret;
  584. _enter(",,%s", dirname);
  585. /* search the current directory for the element name */
  586. mutex_lock(&dir->d_inode->i_mutex);
  587. start = jiffies;
  588. subdir = lookup_one_len(dirname, dir, strlen(dirname));
  589. cachefiles_hist(cachefiles_lookup_histogram, start);
  590. if (IS_ERR(subdir)) {
  591. if (PTR_ERR(subdir) == -ENOMEM)
  592. goto nomem_d_alloc;
  593. goto lookup_error;
  594. }
  595. _debug("subdir -> %p %s",
  596. subdir, subdir->d_inode ? "positive" : "negative");
  597. /* we need to create the subdir if it doesn't exist yet */
  598. if (!subdir->d_inode) {
  599. ret = cachefiles_has_space(cache, 1, 0);
  600. if (ret < 0)
  601. goto mkdir_error;
  602. _debug("attempt mkdir");
  603. ret = vfs_mkdir(dir->d_inode, subdir, 0700);
  604. if (ret < 0)
  605. goto mkdir_error;
  606. ASSERT(subdir->d_inode);
  607. _debug("mkdir -> %p{%p{ino=%lu}}",
  608. subdir,
  609. subdir->d_inode,
  610. subdir->d_inode->i_ino);
  611. }
  612. mutex_unlock(&dir->d_inode->i_mutex);
  613. /* we need to make sure the subdir is a directory */
  614. ASSERT(subdir->d_inode);
  615. if (!S_ISDIR(subdir->d_inode->i_mode)) {
  616. kerror("%s is not a directory", dirname);
  617. ret = -EIO;
  618. goto check_error;
  619. }
  620. ret = -EPERM;
  621. if (!subdir->d_inode->i_op ||
  622. !subdir->d_inode->i_op->setxattr ||
  623. !subdir->d_inode->i_op->getxattr ||
  624. !subdir->d_inode->i_op->lookup ||
  625. !subdir->d_inode->i_op->mkdir ||
  626. !subdir->d_inode->i_op->create ||
  627. !subdir->d_inode->i_op->rename ||
  628. !subdir->d_inode->i_op->rmdir ||
  629. !subdir->d_inode->i_op->unlink)
  630. goto check_error;
  631. _leave(" = [%lu]", subdir->d_inode->i_ino);
  632. return subdir;
  633. check_error:
  634. dput(subdir);
  635. _leave(" = %d [check]", ret);
  636. return ERR_PTR(ret);
  637. mkdir_error:
  638. mutex_unlock(&dir->d_inode->i_mutex);
  639. dput(subdir);
  640. kerror("mkdir %s failed with error %d", dirname, ret);
  641. return ERR_PTR(ret);
  642. lookup_error:
  643. mutex_unlock(&dir->d_inode->i_mutex);
  644. ret = PTR_ERR(subdir);
  645. kerror("Lookup %s failed with error %d", dirname, ret);
  646. return ERR_PTR(ret);
  647. nomem_d_alloc:
  648. mutex_unlock(&dir->d_inode->i_mutex);
  649. _leave(" = -ENOMEM");
  650. return ERR_PTR(-ENOMEM);
  651. }
  652. /*
  653. * find out if an object is in use or not
  654. * - if finds object and it's not in use:
  655. * - returns a pointer to the object and a reference on it
  656. * - returns with the directory locked
  657. */
  658. static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
  659. struct dentry *dir,
  660. char *filename)
  661. {
  662. struct cachefiles_object *object;
  663. struct rb_node *_n;
  664. struct dentry *victim;
  665. unsigned long start;
  666. int ret;
  667. //_enter(",%*.*s/,%s",
  668. // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
  669. /* look up the victim */
  670. mutex_lock_nested(&dir->d_inode->i_mutex, 1);
  671. start = jiffies;
  672. victim = lookup_one_len(filename, dir, strlen(filename));
  673. cachefiles_hist(cachefiles_lookup_histogram, start);
  674. if (IS_ERR(victim))
  675. goto lookup_error;
  676. //_debug("victim -> %p %s",
  677. // victim, victim->d_inode ? "positive" : "negative");
  678. /* if the object is no longer there then we probably retired the object
  679. * at the netfs's request whilst the cull was in progress
  680. */
  681. if (!victim->d_inode) {
  682. mutex_unlock(&dir->d_inode->i_mutex);
  683. dput(victim);
  684. _leave(" = -ENOENT [absent]");
  685. return ERR_PTR(-ENOENT);
  686. }
  687. /* check to see if we're using this object */
  688. read_lock(&cache->active_lock);
  689. _n = cache->active_nodes.rb_node;
  690. while (_n) {
  691. object = rb_entry(_n, struct cachefiles_object, active_node);
  692. if (object->dentry > victim)
  693. _n = _n->rb_left;
  694. else if (object->dentry < victim)
  695. _n = _n->rb_right;
  696. else
  697. goto object_in_use;
  698. }
  699. read_unlock(&cache->active_lock);
  700. //_leave(" = %p", victim);
  701. return victim;
  702. object_in_use:
  703. read_unlock(&cache->active_lock);
  704. mutex_unlock(&dir->d_inode->i_mutex);
  705. dput(victim);
  706. //_leave(" = -EBUSY [in use]");
  707. return ERR_PTR(-EBUSY);
  708. lookup_error:
  709. mutex_unlock(&dir->d_inode->i_mutex);
  710. ret = PTR_ERR(victim);
  711. if (ret == -ENOENT) {
  712. /* file or dir now absent - probably retired by netfs */
  713. _leave(" = -ESTALE [absent]");
  714. return ERR_PTR(-ESTALE);
  715. }
  716. if (ret == -EIO) {
  717. cachefiles_io_error(cache, "Lookup failed");
  718. } else if (ret != -ENOMEM) {
  719. kerror("Internal error: %d", ret);
  720. ret = -EIO;
  721. }
  722. _leave(" = %d", ret);
  723. return ERR_PTR(ret);
  724. }
  725. /*
  726. * cull an object if it's not in use
  727. * - called only by cache manager daemon
  728. */
  729. int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
  730. char *filename)
  731. {
  732. struct dentry *victim;
  733. int ret;
  734. _enter(",%*.*s/,%s",
  735. dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
  736. victim = cachefiles_check_active(cache, dir, filename);
  737. if (IS_ERR(victim))
  738. return PTR_ERR(victim);
  739. _debug("victim -> %p %s",
  740. victim, victim->d_inode ? "positive" : "negative");
  741. /* okay... the victim is not being used so we can cull it
  742. * - start by marking it as stale
  743. */
  744. _debug("victim is cullable");
  745. ret = cachefiles_remove_object_xattr(cache, victim);
  746. if (ret < 0)
  747. goto error_unlock;
  748. /* actually remove the victim (drops the dir mutex) */
  749. _debug("bury");
  750. ret = cachefiles_bury_object(cache, dir, victim, false);
  751. if (ret < 0)
  752. goto error;
  753. dput(victim);
  754. _leave(" = 0");
  755. return 0;
  756. error_unlock:
  757. mutex_unlock(&dir->d_inode->i_mutex);
  758. error:
  759. dput(victim);
  760. if (ret == -ENOENT) {
  761. /* file or dir now absent - probably retired by netfs */
  762. _leave(" = -ESTALE [absent]");
  763. return -ESTALE;
  764. }
  765. if (ret != -ENOMEM) {
  766. kerror("Internal error: %d", ret);
  767. ret = -EIO;
  768. }
  769. _leave(" = %d", ret);
  770. return ret;
  771. }
  772. /*
  773. * find out if an object is in use or not
  774. * - called only by cache manager daemon
  775. * - returns -EBUSY or 0 to indicate whether an object is in use or not
  776. */
  777. int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
  778. char *filename)
  779. {
  780. struct dentry *victim;
  781. //_enter(",%*.*s/,%s",
  782. // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
  783. victim = cachefiles_check_active(cache, dir, filename);
  784. if (IS_ERR(victim))
  785. return PTR_ERR(victim);
  786. mutex_unlock(&dir->d_inode->i_mutex);
  787. dput(victim);
  788. //_leave(" = 0");
  789. return 0;
  790. }