cache.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849
  1. /*
  2. * net/sunrpc/cache.c
  3. *
  4. * Generic code for various authentication-related caches
  5. * used by sunrpc clients and servers.
  6. *
  7. * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
  8. *
  9. * Released under terms in GPL version 2. See COPYING.
  10. *
  11. */
  12. #include <linux/types.h>
  13. #include <linux/fs.h>
  14. #include <linux/file.h>
  15. #include <linux/slab.h>
  16. #include <linux/signal.h>
  17. #include <linux/sched.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <asm/uaccess.h>
  23. #include <linux/poll.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/net.h>
  27. #include <linux/workqueue.h>
  28. #include <linux/mutex.h>
  29. #include <linux/pagemap.h>
  30. #include <asm/ioctls.h>
  31. #include <linux/sunrpc/types.h>
  32. #include <linux/sunrpc/cache.h>
  33. #include <linux/sunrpc/stats.h>
  34. #include <linux/sunrpc/rpc_pipe_fs.h>
  35. #include "netns.h"
  36. #define RPCDBG_FACILITY RPCDBG_CACHE
  37. static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  38. static void cache_revisit_request(struct cache_head *item);
  39. static void cache_init(struct cache_head *h)
  40. {
  41. time_t now = seconds_since_boot();
  42. h->next = NULL;
  43. h->flags = 0;
  44. kref_init(&h->ref);
  45. h->expiry_time = now + CACHE_NEW_EXPIRY;
  46. h->last_refresh = now;
  47. }
  48. static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
  49. {
  50. return (h->expiry_time < seconds_since_boot()) ||
  51. (detail->flush_time > h->last_refresh);
  52. }
  53. struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
  54. struct cache_head *key, int hash)
  55. {
  56. struct cache_head **head, **hp;
  57. struct cache_head *new = NULL, *freeme = NULL;
  58. head = &detail->hash_table[hash];
  59. read_lock(&detail->hash_lock);
  60. for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
  61. struct cache_head *tmp = *hp;
  62. if (detail->match(tmp, key)) {
  63. if (cache_is_expired(detail, tmp))
  64. /* This entry is expired, we will discard it. */
  65. break;
  66. cache_get(tmp);
  67. read_unlock(&detail->hash_lock);
  68. return tmp;
  69. }
  70. }
  71. read_unlock(&detail->hash_lock);
  72. /* Didn't find anything, insert an empty entry */
  73. new = detail->alloc();
  74. if (!new)
  75. return NULL;
  76. /* must fully initialise 'new', else
  77. * we might get lose if we need to
  78. * cache_put it soon.
  79. */
  80. cache_init(new);
  81. detail->init(new, key);
  82. write_lock(&detail->hash_lock);
  83. /* check if entry appeared while we slept */
  84. for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
  85. struct cache_head *tmp = *hp;
  86. if (detail->match(tmp, key)) {
  87. if (cache_is_expired(detail, tmp)) {
  88. *hp = tmp->next;
  89. tmp->next = NULL;
  90. detail->entries --;
  91. freeme = tmp;
  92. break;
  93. }
  94. cache_get(tmp);
  95. write_unlock(&detail->hash_lock);
  96. cache_put(new, detail);
  97. return tmp;
  98. }
  99. }
  100. new->next = *head;
  101. *head = new;
  102. detail->entries++;
  103. cache_get(new);
  104. write_unlock(&detail->hash_lock);
  105. if (freeme)
  106. cache_put(freeme, detail);
  107. return new;
  108. }
  109. EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
  110. static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
  111. static void cache_fresh_locked(struct cache_head *head, time_t expiry)
  112. {
  113. head->expiry_time = expiry;
  114. head->last_refresh = seconds_since_boot();
  115. smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
  116. set_bit(CACHE_VALID, &head->flags);
  117. }
  118. static void cache_fresh_unlocked(struct cache_head *head,
  119. struct cache_detail *detail)
  120. {
  121. if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
  122. cache_revisit_request(head);
  123. cache_dequeue(detail, head);
  124. }
  125. }
  126. struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
  127. struct cache_head *new, struct cache_head *old, int hash)
  128. {
  129. /* The 'old' entry is to be replaced by 'new'.
  130. * If 'old' is not VALID, we update it directly,
  131. * otherwise we need to replace it
  132. */
  133. struct cache_head **head;
  134. struct cache_head *tmp;
  135. if (!test_bit(CACHE_VALID, &old->flags)) {
  136. write_lock(&detail->hash_lock);
  137. if (!test_bit(CACHE_VALID, &old->flags)) {
  138. if (test_bit(CACHE_NEGATIVE, &new->flags))
  139. set_bit(CACHE_NEGATIVE, &old->flags);
  140. else
  141. detail->update(old, new);
  142. cache_fresh_locked(old, new->expiry_time);
  143. write_unlock(&detail->hash_lock);
  144. cache_fresh_unlocked(old, detail);
  145. return old;
  146. }
  147. write_unlock(&detail->hash_lock);
  148. }
  149. /* We need to insert a new entry */
  150. tmp = detail->alloc();
  151. if (!tmp) {
  152. cache_put(old, detail);
  153. return NULL;
  154. }
  155. cache_init(tmp);
  156. detail->init(tmp, old);
  157. head = &detail->hash_table[hash];
  158. write_lock(&detail->hash_lock);
  159. if (test_bit(CACHE_NEGATIVE, &new->flags))
  160. set_bit(CACHE_NEGATIVE, &tmp->flags);
  161. else
  162. detail->update(tmp, new);
  163. tmp->next = *head;
  164. *head = tmp;
  165. detail->entries++;
  166. cache_get(tmp);
  167. cache_fresh_locked(tmp, new->expiry_time);
  168. cache_fresh_locked(old, 0);
  169. write_unlock(&detail->hash_lock);
  170. cache_fresh_unlocked(tmp, detail);
  171. cache_fresh_unlocked(old, detail);
  172. cache_put(old, detail);
  173. return tmp;
  174. }
  175. EXPORT_SYMBOL_GPL(sunrpc_cache_update);
  176. static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
  177. {
  178. if (cd->cache_upcall)
  179. return cd->cache_upcall(cd, h);
  180. return sunrpc_cache_pipe_upcall(cd, h);
  181. }
  182. static inline int cache_is_valid(struct cache_head *h)
  183. {
  184. if (!test_bit(CACHE_VALID, &h->flags))
  185. return -EAGAIN;
  186. else {
  187. /* entry is valid */
  188. if (test_bit(CACHE_NEGATIVE, &h->flags))
  189. return -ENOENT;
  190. else {
  191. /*
  192. * In combination with write barrier in
  193. * sunrpc_cache_update, ensures that anyone
  194. * using the cache entry after this sees the
  195. * updated contents:
  196. */
  197. smp_rmb();
  198. return 0;
  199. }
  200. }
  201. }
  202. static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
  203. {
  204. int rv;
  205. write_lock(&detail->hash_lock);
  206. rv = cache_is_valid(h);
  207. if (rv == -EAGAIN) {
  208. set_bit(CACHE_NEGATIVE, &h->flags);
  209. cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
  210. rv = -ENOENT;
  211. }
  212. write_unlock(&detail->hash_lock);
  213. cache_fresh_unlocked(h, detail);
  214. return rv;
  215. }
  216. /*
  217. * This is the generic cache management routine for all
  218. * the authentication caches.
  219. * It checks the currency of a cache item and will (later)
  220. * initiate an upcall to fill it if needed.
  221. *
  222. *
  223. * Returns 0 if the cache_head can be used, or cache_puts it and returns
  224. * -EAGAIN if upcall is pending and request has been queued
  225. * -ETIMEDOUT if upcall failed or request could not be queue or
  226. * upcall completed but item is still invalid (implying that
  227. * the cache item has been replaced with a newer one).
  228. * -ENOENT if cache entry was negative
  229. */
  230. int cache_check(struct cache_detail *detail,
  231. struct cache_head *h, struct cache_req *rqstp)
  232. {
  233. int rv;
  234. long refresh_age, age;
  235. /* First decide return status as best we can */
  236. rv = cache_is_valid(h);
  237. /* now see if we want to start an upcall */
  238. refresh_age = (h->expiry_time - h->last_refresh);
  239. age = seconds_since_boot() - h->last_refresh;
  240. if (rqstp == NULL) {
  241. if (rv == -EAGAIN)
  242. rv = -ENOENT;
  243. } else if (rv == -EAGAIN || age > refresh_age/2) {
  244. dprintk("RPC: Want update, refage=%ld, age=%ld\n",
  245. refresh_age, age);
  246. if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
  247. switch (cache_make_upcall(detail, h)) {
  248. case -EINVAL:
  249. rv = try_to_negate_entry(detail, h);
  250. break;
  251. case -EAGAIN:
  252. cache_fresh_unlocked(h, detail);
  253. break;
  254. }
  255. }
  256. }
  257. if (rv == -EAGAIN) {
  258. if (!cache_defer_req(rqstp, h)) {
  259. /*
  260. * Request was not deferred; handle it as best
  261. * we can ourselves:
  262. */
  263. rv = cache_is_valid(h);
  264. if (rv == -EAGAIN)
  265. rv = -ETIMEDOUT;
  266. }
  267. }
  268. if (rv)
  269. cache_put(h, detail);
  270. return rv;
  271. }
  272. EXPORT_SYMBOL_GPL(cache_check);
  273. /*
  274. * caches need to be periodically cleaned.
  275. * For this we maintain a list of cache_detail and
  276. * a current pointer into that list and into the table
  277. * for that entry.
  278. *
  279. * Each time clean_cache is called it finds the next non-empty entry
  280. * in the current table and walks the list in that entry
  281. * looking for entries that can be removed.
  282. *
  283. * An entry gets removed if:
  284. * - The expiry is before current time
  285. * - The last_refresh time is before the flush_time for that cache
  286. *
  287. * later we might drop old entries with non-NEVER expiry if that table
  288. * is getting 'full' for some definition of 'full'
  289. *
  290. * The question of "how often to scan a table" is an interesting one
  291. * and is answered in part by the use of the "nextcheck" field in the
  292. * cache_detail.
  293. * When a scan of a table begins, the nextcheck field is set to a time
  294. * that is well into the future.
  295. * While scanning, if an expiry time is found that is earlier than the
  296. * current nextcheck time, nextcheck is set to that expiry time.
  297. * If the flush_time is ever set to a time earlier than the nextcheck
  298. * time, the nextcheck time is then set to that flush_time.
  299. *
  300. * A table is then only scanned if the current time is at least
  301. * the nextcheck time.
  302. *
  303. */
  304. static LIST_HEAD(cache_list);
  305. static DEFINE_SPINLOCK(cache_list_lock);
  306. static struct cache_detail *current_detail;
  307. static int current_index;
  308. static void do_cache_clean(struct work_struct *work);
  309. static struct delayed_work cache_cleaner;
  310. void sunrpc_init_cache_detail(struct cache_detail *cd)
  311. {
  312. rwlock_init(&cd->hash_lock);
  313. INIT_LIST_HEAD(&cd->queue);
  314. spin_lock(&cache_list_lock);
  315. cd->nextcheck = 0;
  316. cd->entries = 0;
  317. atomic_set(&cd->readers, 0);
  318. cd->last_close = 0;
  319. cd->last_warn = -1;
  320. list_add(&cd->others, &cache_list);
  321. spin_unlock(&cache_list_lock);
  322. /* start the cleaning process */
  323. schedule_delayed_work(&cache_cleaner, 0);
  324. }
  325. EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
  326. void sunrpc_destroy_cache_detail(struct cache_detail *cd)
  327. {
  328. cache_purge(cd);
  329. spin_lock(&cache_list_lock);
  330. write_lock(&cd->hash_lock);
  331. if (cd->entries || atomic_read(&cd->inuse)) {
  332. write_unlock(&cd->hash_lock);
  333. spin_unlock(&cache_list_lock);
  334. goto out;
  335. }
  336. if (current_detail == cd)
  337. current_detail = NULL;
  338. list_del_init(&cd->others);
  339. write_unlock(&cd->hash_lock);
  340. spin_unlock(&cache_list_lock);
  341. if (list_empty(&cache_list)) {
  342. /* module must be being unloaded so its safe to kill the worker */
  343. cancel_delayed_work_sync(&cache_cleaner);
  344. }
  345. return;
  346. out:
  347. printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
  348. }
  349. EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
  350. /* clean cache tries to find something to clean
  351. * and cleans it.
  352. * It returns 1 if it cleaned something,
  353. * 0 if it didn't find anything this time
  354. * -1 if it fell off the end of the list.
  355. */
  356. static int cache_clean(void)
  357. {
  358. int rv = 0;
  359. struct list_head *next;
  360. spin_lock(&cache_list_lock);
  361. /* find a suitable table if we don't already have one */
  362. while (current_detail == NULL ||
  363. current_index >= current_detail->hash_size) {
  364. if (current_detail)
  365. next = current_detail->others.next;
  366. else
  367. next = cache_list.next;
  368. if (next == &cache_list) {
  369. current_detail = NULL;
  370. spin_unlock(&cache_list_lock);
  371. return -1;
  372. }
  373. current_detail = list_entry(next, struct cache_detail, others);
  374. if (current_detail->nextcheck > seconds_since_boot())
  375. current_index = current_detail->hash_size;
  376. else {
  377. current_index = 0;
  378. current_detail->nextcheck = seconds_since_boot()+30*60;
  379. }
  380. }
  381. /* find a non-empty bucket in the table */
  382. while (current_detail &&
  383. current_index < current_detail->hash_size &&
  384. current_detail->hash_table[current_index] == NULL)
  385. current_index++;
  386. /* find a cleanable entry in the bucket and clean it, or set to next bucket */
  387. if (current_detail && current_index < current_detail->hash_size) {
  388. struct cache_head *ch, **cp;
  389. struct cache_detail *d;
  390. write_lock(&current_detail->hash_lock);
  391. /* Ok, now to clean this strand */
  392. cp = & current_detail->hash_table[current_index];
  393. for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
  394. if (current_detail->nextcheck > ch->expiry_time)
  395. current_detail->nextcheck = ch->expiry_time+1;
  396. if (!cache_is_expired(current_detail, ch))
  397. continue;
  398. *cp = ch->next;
  399. ch->next = NULL;
  400. current_detail->entries--;
  401. rv = 1;
  402. break;
  403. }
  404. write_unlock(&current_detail->hash_lock);
  405. d = current_detail;
  406. if (!ch)
  407. current_index ++;
  408. spin_unlock(&cache_list_lock);
  409. if (ch) {
  410. cache_fresh_unlocked(ch, d);
  411. cache_put(ch, d);
  412. }
  413. } else
  414. spin_unlock(&cache_list_lock);
  415. return rv;
  416. }
  417. /*
  418. * We want to regularly clean the cache, so we need to schedule some work ...
  419. */
  420. static void do_cache_clean(struct work_struct *work)
  421. {
  422. int delay = 5;
  423. if (cache_clean() == -1)
  424. delay = round_jiffies_relative(30*HZ);
  425. if (list_empty(&cache_list))
  426. delay = 0;
  427. if (delay)
  428. schedule_delayed_work(&cache_cleaner, delay);
  429. }
  430. /*
  431. * Clean all caches promptly. This just calls cache_clean
  432. * repeatedly until we are sure that every cache has had a chance to
  433. * be fully cleaned
  434. */
  435. void cache_flush(void)
  436. {
  437. while (cache_clean() != -1)
  438. cond_resched();
  439. while (cache_clean() != -1)
  440. cond_resched();
  441. }
  442. EXPORT_SYMBOL_GPL(cache_flush);
  443. void cache_purge(struct cache_detail *detail)
  444. {
  445. detail->flush_time = LONG_MAX;
  446. detail->nextcheck = seconds_since_boot();
  447. cache_flush();
  448. detail->flush_time = 1;
  449. }
  450. EXPORT_SYMBOL_GPL(cache_purge);
  451. /*
  452. * Deferral and Revisiting of Requests.
  453. *
  454. * If a cache lookup finds a pending entry, we
  455. * need to defer the request and revisit it later.
  456. * All deferred requests are stored in a hash table,
  457. * indexed by "struct cache_head *".
  458. * As it may be wasteful to store a whole request
  459. * structure, we allow the request to provide a
  460. * deferred form, which must contain a
  461. * 'struct cache_deferred_req'
  462. * This cache_deferred_req contains a method to allow
  463. * it to be revisited when cache info is available
  464. */
  465. #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
  466. #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
  467. #define DFR_MAX 300 /* ??? */
  468. static DEFINE_SPINLOCK(cache_defer_lock);
  469. static LIST_HEAD(cache_defer_list);
  470. static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
  471. static int cache_defer_cnt;
  472. static void __unhash_deferred_req(struct cache_deferred_req *dreq)
  473. {
  474. hlist_del_init(&dreq->hash);
  475. if (!list_empty(&dreq->recent)) {
  476. list_del_init(&dreq->recent);
  477. cache_defer_cnt--;
  478. }
  479. }
  480. static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
  481. {
  482. int hash = DFR_HASH(item);
  483. INIT_LIST_HEAD(&dreq->recent);
  484. hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
  485. }
  486. static void setup_deferral(struct cache_deferred_req *dreq,
  487. struct cache_head *item,
  488. int count_me)
  489. {
  490. dreq->item = item;
  491. spin_lock(&cache_defer_lock);
  492. __hash_deferred_req(dreq, item);
  493. if (count_me) {
  494. cache_defer_cnt++;
  495. list_add(&dreq->recent, &cache_defer_list);
  496. }
  497. spin_unlock(&cache_defer_lock);
  498. }
  499. struct thread_deferred_req {
  500. struct cache_deferred_req handle;
  501. struct completion completion;
  502. };
  503. static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
  504. {
  505. struct thread_deferred_req *dr =
  506. container_of(dreq, struct thread_deferred_req, handle);
  507. complete(&dr->completion);
  508. }
  509. static void cache_wait_req(struct cache_req *req, struct cache_head *item)
  510. {
  511. struct thread_deferred_req sleeper;
  512. struct cache_deferred_req *dreq = &sleeper.handle;
  513. sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
  514. dreq->revisit = cache_restart_thread;
  515. setup_deferral(dreq, item, 0);
  516. if (!test_bit(CACHE_PENDING, &item->flags) ||
  517. wait_for_completion_interruptible_timeout(
  518. &sleeper.completion, req->thread_wait) <= 0) {
  519. /* The completion wasn't completed, so we need
  520. * to clean up
  521. */
  522. spin_lock(&cache_defer_lock);
  523. if (!hlist_unhashed(&sleeper.handle.hash)) {
  524. __unhash_deferred_req(&sleeper.handle);
  525. spin_unlock(&cache_defer_lock);
  526. } else {
  527. /* cache_revisit_request already removed
  528. * this from the hash table, but hasn't
  529. * called ->revisit yet. It will very soon
  530. * and we need to wait for it.
  531. */
  532. spin_unlock(&cache_defer_lock);
  533. wait_for_completion(&sleeper.completion);
  534. }
  535. }
  536. }
  537. static void cache_limit_defers(void)
  538. {
  539. /* Make sure we haven't exceed the limit of allowed deferred
  540. * requests.
  541. */
  542. struct cache_deferred_req *discard = NULL;
  543. if (cache_defer_cnt <= DFR_MAX)
  544. return;
  545. spin_lock(&cache_defer_lock);
  546. /* Consider removing either the first or the last */
  547. if (cache_defer_cnt > DFR_MAX) {
  548. if (net_random() & 1)
  549. discard = list_entry(cache_defer_list.next,
  550. struct cache_deferred_req, recent);
  551. else
  552. discard = list_entry(cache_defer_list.prev,
  553. struct cache_deferred_req, recent);
  554. __unhash_deferred_req(discard);
  555. }
  556. spin_unlock(&cache_defer_lock);
  557. if (discard)
  558. discard->revisit(discard, 1);
  559. }
  560. /* Return true if and only if a deferred request is queued. */
  561. static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
  562. {
  563. struct cache_deferred_req *dreq;
  564. if (req->thread_wait) {
  565. cache_wait_req(req, item);
  566. if (!test_bit(CACHE_PENDING, &item->flags))
  567. return false;
  568. }
  569. dreq = req->defer(req);
  570. if (dreq == NULL)
  571. return false;
  572. setup_deferral(dreq, item, 1);
  573. if (!test_bit(CACHE_PENDING, &item->flags))
  574. /* Bit could have been cleared before we managed to
  575. * set up the deferral, so need to revisit just in case
  576. */
  577. cache_revisit_request(item);
  578. cache_limit_defers();
  579. return true;
  580. }
  581. static void cache_revisit_request(struct cache_head *item)
  582. {
  583. struct cache_deferred_req *dreq;
  584. struct list_head pending;
  585. struct hlist_node *tmp;
  586. int hash = DFR_HASH(item);
  587. INIT_LIST_HEAD(&pending);
  588. spin_lock(&cache_defer_lock);
  589. hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
  590. if (dreq->item == item) {
  591. __unhash_deferred_req(dreq);
  592. list_add(&dreq->recent, &pending);
  593. }
  594. spin_unlock(&cache_defer_lock);
  595. while (!list_empty(&pending)) {
  596. dreq = list_entry(pending.next, struct cache_deferred_req, recent);
  597. list_del_init(&dreq->recent);
  598. dreq->revisit(dreq, 0);
  599. }
  600. }
  601. void cache_clean_deferred(void *owner)
  602. {
  603. struct cache_deferred_req *dreq, *tmp;
  604. struct list_head pending;
  605. INIT_LIST_HEAD(&pending);
  606. spin_lock(&cache_defer_lock);
  607. list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
  608. if (dreq->owner == owner) {
  609. __unhash_deferred_req(dreq);
  610. list_add(&dreq->recent, &pending);
  611. }
  612. }
  613. spin_unlock(&cache_defer_lock);
  614. while (!list_empty(&pending)) {
  615. dreq = list_entry(pending.next, struct cache_deferred_req, recent);
  616. list_del_init(&dreq->recent);
  617. dreq->revisit(dreq, 1);
  618. }
  619. }
  620. /*
  621. * communicate with user-space
  622. *
  623. * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
  624. * On read, you get a full request, or block.
  625. * On write, an update request is processed.
  626. * Poll works if anything to read, and always allows write.
  627. *
  628. * Implemented by linked list of requests. Each open file has
  629. * a ->private that also exists in this list. New requests are added
  630. * to the end and may wakeup and preceding readers.
  631. * New readers are added to the head. If, on read, an item is found with
  632. * CACHE_UPCALLING clear, we free it from the list.
  633. *
  634. */
  635. static DEFINE_SPINLOCK(queue_lock);
  636. static DEFINE_MUTEX(queue_io_mutex);
  637. struct cache_queue {
  638. struct list_head list;
  639. int reader; /* if 0, then request */
  640. };
  641. struct cache_request {
  642. struct cache_queue q;
  643. struct cache_head *item;
  644. char * buf;
  645. int len;
  646. int readers;
  647. };
  648. struct cache_reader {
  649. struct cache_queue q;
  650. int offset; /* if non-0, we have a refcnt on next request */
  651. };
  652. static int cache_request(struct cache_detail *detail,
  653. struct cache_request *crq)
  654. {
  655. char *bp = crq->buf;
  656. int len = PAGE_SIZE;
  657. detail->cache_request(detail, crq->item, &bp, &len);
  658. if (len < 0)
  659. return -EAGAIN;
  660. return PAGE_SIZE - len;
  661. }
  662. static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
  663. loff_t *ppos, struct cache_detail *cd)
  664. {
  665. struct cache_reader *rp = filp->private_data;
  666. struct cache_request *rq;
  667. struct inode *inode = file_inode(filp);
  668. int err;
  669. if (count == 0)
  670. return 0;
  671. mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
  672. * readers on this file */
  673. again:
  674. spin_lock(&queue_lock);
  675. /* need to find next request */
  676. while (rp->q.list.next != &cd->queue &&
  677. list_entry(rp->q.list.next, struct cache_queue, list)
  678. ->reader) {
  679. struct list_head *next = rp->q.list.next;
  680. list_move(&rp->q.list, next);
  681. }
  682. if (rp->q.list.next == &cd->queue) {
  683. spin_unlock(&queue_lock);
  684. mutex_unlock(&inode->i_mutex);
  685. WARN_ON_ONCE(rp->offset);
  686. return 0;
  687. }
  688. rq = container_of(rp->q.list.next, struct cache_request, q.list);
  689. WARN_ON_ONCE(rq->q.reader);
  690. if (rp->offset == 0)
  691. rq->readers++;
  692. spin_unlock(&queue_lock);
  693. if (rq->len == 0) {
  694. err = cache_request(cd, rq);
  695. if (err < 0)
  696. goto out;
  697. rq->len = err;
  698. }
  699. if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
  700. err = -EAGAIN;
  701. spin_lock(&queue_lock);
  702. list_move(&rp->q.list, &rq->q.list);
  703. spin_unlock(&queue_lock);
  704. } else {
  705. if (rp->offset + count > rq->len)
  706. count = rq->len - rp->offset;
  707. err = -EFAULT;
  708. if (copy_to_user(buf, rq->buf + rp->offset, count))
  709. goto out;
  710. rp->offset += count;
  711. if (rp->offset >= rq->len) {
  712. rp->offset = 0;
  713. spin_lock(&queue_lock);
  714. list_move(&rp->q.list, &rq->q.list);
  715. spin_unlock(&queue_lock);
  716. }
  717. err = 0;
  718. }
  719. out:
  720. if (rp->offset == 0) {
  721. /* need to release rq */
  722. spin_lock(&queue_lock);
  723. rq->readers--;
  724. if (rq->readers == 0 &&
  725. !test_bit(CACHE_PENDING, &rq->item->flags)) {
  726. list_del(&rq->q.list);
  727. spin_unlock(&queue_lock);
  728. cache_put(rq->item, cd);
  729. kfree(rq->buf);
  730. kfree(rq);
  731. } else
  732. spin_unlock(&queue_lock);
  733. }
  734. if (err == -EAGAIN)
  735. goto again;
  736. mutex_unlock(&inode->i_mutex);
  737. return err ? err : count;
  738. }
  739. static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
  740. size_t count, struct cache_detail *cd)
  741. {
  742. ssize_t ret;
  743. if (count == 0)
  744. return -EINVAL;
  745. if (copy_from_user(kaddr, buf, count))
  746. return -EFAULT;
  747. kaddr[count] = '\0';
  748. ret = cd->cache_parse(cd, kaddr, count);
  749. if (!ret)
  750. ret = count;
  751. return ret;
  752. }
  753. static ssize_t cache_slow_downcall(const char __user *buf,
  754. size_t count, struct cache_detail *cd)
  755. {
  756. static char write_buf[8192]; /* protected by queue_io_mutex */
  757. ssize_t ret = -EINVAL;
  758. if (count >= sizeof(write_buf))
  759. goto out;
  760. mutex_lock(&queue_io_mutex);
  761. ret = cache_do_downcall(write_buf, buf, count, cd);
  762. mutex_unlock(&queue_io_mutex);
  763. out:
  764. return ret;
  765. }
  766. static ssize_t cache_downcall(struct address_space *mapping,
  767. const char __user *buf,
  768. size_t count, struct cache_detail *cd)
  769. {
  770. struct page *page;
  771. char *kaddr;
  772. ssize_t ret = -ENOMEM;
  773. if (count >= PAGE_CACHE_SIZE)
  774. goto out_slow;
  775. page = find_or_create_page(mapping, 0, GFP_KERNEL);
  776. if (!page)
  777. goto out_slow;
  778. kaddr = kmap(page);
  779. ret = cache_do_downcall(kaddr, buf, count, cd);
  780. kunmap(page);
  781. unlock_page(page);
  782. page_cache_release(page);
  783. return ret;
  784. out_slow:
  785. return cache_slow_downcall(buf, count, cd);
  786. }
  787. static ssize_t cache_write(struct file *filp, const char __user *buf,
  788. size_t count, loff_t *ppos,
  789. struct cache_detail *cd)
  790. {
  791. struct address_space *mapping = filp->f_mapping;
  792. struct inode *inode = file_inode(filp);
  793. ssize_t ret = -EINVAL;
  794. if (!cd->cache_parse)
  795. goto out;
  796. mutex_lock(&inode->i_mutex);
  797. ret = cache_downcall(mapping, buf, count, cd);
  798. mutex_unlock(&inode->i_mutex);
  799. out:
  800. return ret;
  801. }
  802. static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
  803. static unsigned int cache_poll(struct file *filp, poll_table *wait,
  804. struct cache_detail *cd)
  805. {
  806. unsigned int mask;
  807. struct cache_reader *rp = filp->private_data;
  808. struct cache_queue *cq;
  809. poll_wait(filp, &queue_wait, wait);
  810. /* alway allow write */
  811. mask = POLL_OUT | POLLWRNORM;
  812. if (!rp)
  813. return mask;
  814. spin_lock(&queue_lock);
  815. for (cq= &rp->q; &cq->list != &cd->queue;
  816. cq = list_entry(cq->list.next, struct cache_queue, list))
  817. if (!cq->reader) {
  818. mask |= POLLIN | POLLRDNORM;
  819. break;
  820. }
  821. spin_unlock(&queue_lock);
  822. return mask;
  823. }
  824. static int cache_ioctl(struct inode *ino, struct file *filp,
  825. unsigned int cmd, unsigned long arg,
  826. struct cache_detail *cd)
  827. {
  828. int len = 0;
  829. struct cache_reader *rp = filp->private_data;
  830. struct cache_queue *cq;
  831. if (cmd != FIONREAD || !rp)
  832. return -EINVAL;
  833. spin_lock(&queue_lock);
  834. /* only find the length remaining in current request,
  835. * or the length of the next request
  836. */
  837. for (cq= &rp->q; &cq->list != &cd->queue;
  838. cq = list_entry(cq->list.next, struct cache_queue, list))
  839. if (!cq->reader) {
  840. struct cache_request *cr =
  841. container_of(cq, struct cache_request, q);
  842. len = cr->len - rp->offset;
  843. break;
  844. }
  845. spin_unlock(&queue_lock);
  846. return put_user(len, (int __user *)arg);
  847. }
  848. static int cache_open(struct inode *inode, struct file *filp,
  849. struct cache_detail *cd)
  850. {
  851. struct cache_reader *rp = NULL;
  852. if (!cd || !try_module_get(cd->owner))
  853. return -EACCES;
  854. nonseekable_open(inode, filp);
  855. if (filp->f_mode & FMODE_READ) {
  856. rp = kmalloc(sizeof(*rp), GFP_KERNEL);
  857. if (!rp) {
  858. module_put(cd->owner);
  859. return -ENOMEM;
  860. }
  861. rp->offset = 0;
  862. rp->q.reader = 1;
  863. atomic_inc(&cd->readers);
  864. spin_lock(&queue_lock);
  865. list_add(&rp->q.list, &cd->queue);
  866. spin_unlock(&queue_lock);
  867. }
  868. filp->private_data = rp;
  869. return 0;
  870. }
  871. static int cache_release(struct inode *inode, struct file *filp,
  872. struct cache_detail *cd)
  873. {
  874. struct cache_reader *rp = filp->private_data;
  875. if (rp) {
  876. spin_lock(&queue_lock);
  877. if (rp->offset) {
  878. struct cache_queue *cq;
  879. for (cq= &rp->q; &cq->list != &cd->queue;
  880. cq = list_entry(cq->list.next, struct cache_queue, list))
  881. if (!cq->reader) {
  882. container_of(cq, struct cache_request, q)
  883. ->readers--;
  884. break;
  885. }
  886. rp->offset = 0;
  887. }
  888. list_del(&rp->q.list);
  889. spin_unlock(&queue_lock);
  890. filp->private_data = NULL;
  891. kfree(rp);
  892. cd->last_close = seconds_since_boot();
  893. atomic_dec(&cd->readers);
  894. }
  895. module_put(cd->owner);
  896. return 0;
  897. }
  898. static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
  899. {
  900. struct cache_queue *cq, *tmp;
  901. struct cache_request *cr;
  902. struct list_head dequeued;
  903. INIT_LIST_HEAD(&dequeued);
  904. spin_lock(&queue_lock);
  905. list_for_each_entry_safe(cq, tmp, &detail->queue, list)
  906. if (!cq->reader) {
  907. cr = container_of(cq, struct cache_request, q);
  908. if (cr->item != ch)
  909. continue;
  910. if (test_bit(CACHE_PENDING, &ch->flags))
  911. /* Lost a race and it is pending again */
  912. break;
  913. if (cr->readers != 0)
  914. continue;
  915. list_move(&cr->q.list, &dequeued);
  916. }
  917. spin_unlock(&queue_lock);
  918. while (!list_empty(&dequeued)) {
  919. cr = list_entry(dequeued.next, struct cache_request, q.list);
  920. list_del(&cr->q.list);
  921. cache_put(cr->item, detail);
  922. kfree(cr->buf);
  923. kfree(cr);
  924. }
  925. }
  926. /*
  927. * Support routines for text-based upcalls.
  928. * Fields are separated by spaces.
  929. * Fields are either mangled to quote space tab newline slosh with slosh
  930. * or a hexified with a leading \x
  931. * Record is terminated with newline.
  932. *
  933. */
  934. void qword_add(char **bpp, int *lp, char *str)
  935. {
  936. char *bp = *bpp;
  937. int len = *lp;
  938. char c;
  939. if (len < 0) return;
  940. while ((c=*str++) && len)
  941. switch(c) {
  942. case ' ':
  943. case '\t':
  944. case '\n':
  945. case '\\':
  946. if (len >= 4) {
  947. *bp++ = '\\';
  948. *bp++ = '0' + ((c & 0300)>>6);
  949. *bp++ = '0' + ((c & 0070)>>3);
  950. *bp++ = '0' + ((c & 0007)>>0);
  951. }
  952. len -= 4;
  953. break;
  954. default:
  955. *bp++ = c;
  956. len--;
  957. }
  958. if (c || len <1) len = -1;
  959. else {
  960. *bp++ = ' ';
  961. len--;
  962. }
  963. *bpp = bp;
  964. *lp = len;
  965. }
  966. EXPORT_SYMBOL_GPL(qword_add);
  967. void qword_addhex(char **bpp, int *lp, char *buf, int blen)
  968. {
  969. char *bp = *bpp;
  970. int len = *lp;
  971. if (len < 0) return;
  972. if (len > 2) {
  973. *bp++ = '\\';
  974. *bp++ = 'x';
  975. len -= 2;
  976. while (blen && len >= 2) {
  977. unsigned char c = *buf++;
  978. *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
  979. *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
  980. len -= 2;
  981. blen--;
  982. }
  983. }
  984. if (blen || len<1) len = -1;
  985. else {
  986. *bp++ = ' ';
  987. len--;
  988. }
  989. *bpp = bp;
  990. *lp = len;
  991. }
  992. EXPORT_SYMBOL_GPL(qword_addhex);
  993. static void warn_no_listener(struct cache_detail *detail)
  994. {
  995. if (detail->last_warn != detail->last_close) {
  996. detail->last_warn = detail->last_close;
  997. if (detail->warn_no_listener)
  998. detail->warn_no_listener(detail, detail->last_close != 0);
  999. }
  1000. }
  1001. static bool cache_listeners_exist(struct cache_detail *detail)
  1002. {
  1003. if (atomic_read(&detail->readers))
  1004. return true;
  1005. if (detail->last_close == 0)
  1006. /* This cache was never opened */
  1007. return false;
  1008. if (detail->last_close < seconds_since_boot() - 30)
  1009. /*
  1010. * We allow for the possibility that someone might
  1011. * restart a userspace daemon without restarting the
  1012. * server; but after 30 seconds, we give up.
  1013. */
  1014. return false;
  1015. return true;
  1016. }
  1017. /*
  1018. * register an upcall request to user-space and queue it up for read() by the
  1019. * upcall daemon.
  1020. *
  1021. * Each request is at most one page long.
  1022. */
  1023. int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
  1024. {
  1025. char *buf;
  1026. struct cache_request *crq;
  1027. int ret = 0;
  1028. if (!detail->cache_request)
  1029. return -EINVAL;
  1030. if (!cache_listeners_exist(detail)) {
  1031. warn_no_listener(detail);
  1032. return -EINVAL;
  1033. }
  1034. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  1035. if (!buf)
  1036. return -EAGAIN;
  1037. crq = kmalloc(sizeof (*crq), GFP_KERNEL);
  1038. if (!crq) {
  1039. kfree(buf);
  1040. return -EAGAIN;
  1041. }
  1042. crq->q.reader = 0;
  1043. crq->item = cache_get(h);
  1044. crq->buf = buf;
  1045. crq->len = 0;
  1046. crq->readers = 0;
  1047. spin_lock(&queue_lock);
  1048. if (test_bit(CACHE_PENDING, &h->flags))
  1049. list_add_tail(&crq->q.list, &detail->queue);
  1050. else
  1051. /* Lost a race, no longer PENDING, so don't enqueue */
  1052. ret = -EAGAIN;
  1053. spin_unlock(&queue_lock);
  1054. wake_up(&queue_wait);
  1055. if (ret == -EAGAIN) {
  1056. kfree(buf);
  1057. kfree(crq);
  1058. }
  1059. return ret;
  1060. }
  1061. EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
  1062. /*
  1063. * parse a message from user-space and pass it
  1064. * to an appropriate cache
  1065. * Messages are, like requests, separated into fields by
  1066. * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
  1067. *
  1068. * Message is
  1069. * reply cachename expiry key ... content....
  1070. *
  1071. * key and content are both parsed by cache
  1072. */
  1073. int qword_get(char **bpp, char *dest, int bufsize)
  1074. {
  1075. /* return bytes copied, or -1 on error */
  1076. char *bp = *bpp;
  1077. int len = 0;
  1078. while (*bp == ' ') bp++;
  1079. if (bp[0] == '\\' && bp[1] == 'x') {
  1080. /* HEX STRING */
  1081. bp += 2;
  1082. while (len < bufsize) {
  1083. int h, l;
  1084. h = hex_to_bin(bp[0]);
  1085. if (h < 0)
  1086. break;
  1087. l = hex_to_bin(bp[1]);
  1088. if (l < 0)
  1089. break;
  1090. *dest++ = (h << 4) | l;
  1091. bp += 2;
  1092. len++;
  1093. }
  1094. } else {
  1095. /* text with \nnn octal quoting */
  1096. while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
  1097. if (*bp == '\\' &&
  1098. isodigit(bp[1]) && (bp[1] <= '3') &&
  1099. isodigit(bp[2]) &&
  1100. isodigit(bp[3])) {
  1101. int byte = (*++bp -'0');
  1102. bp++;
  1103. byte = (byte << 3) | (*bp++ - '0');
  1104. byte = (byte << 3) | (*bp++ - '0');
  1105. *dest++ = byte;
  1106. len++;
  1107. } else {
  1108. *dest++ = *bp++;
  1109. len++;
  1110. }
  1111. }
  1112. }
  1113. if (*bp != ' ' && *bp != '\n' && *bp != '\0')
  1114. return -1;
  1115. while (*bp == ' ') bp++;
  1116. *bpp = bp;
  1117. *dest = '\0';
  1118. return len;
  1119. }
  1120. EXPORT_SYMBOL_GPL(qword_get);
  1121. /*
  1122. * support /proc/sunrpc/cache/$CACHENAME/content
  1123. * as a seqfile.
  1124. * We call ->cache_show passing NULL for the item to
  1125. * get a header, then pass each real item in the cache
  1126. */
  1127. struct handle {
  1128. struct cache_detail *cd;
  1129. };
  1130. static void *c_start(struct seq_file *m, loff_t *pos)
  1131. __acquires(cd->hash_lock)
  1132. {
  1133. loff_t n = *pos;
  1134. unsigned int hash, entry;
  1135. struct cache_head *ch;
  1136. struct cache_detail *cd = ((struct handle*)m->private)->cd;
  1137. read_lock(&cd->hash_lock);
  1138. if (!n--)
  1139. return SEQ_START_TOKEN;
  1140. hash = n >> 32;
  1141. entry = n & ((1LL<<32) - 1);
  1142. for (ch=cd->hash_table[hash]; ch; ch=ch->next)
  1143. if (!entry--)
  1144. return ch;
  1145. n &= ~((1LL<<32) - 1);
  1146. do {
  1147. hash++;
  1148. n += 1LL<<32;
  1149. } while(hash < cd->hash_size &&
  1150. cd->hash_table[hash]==NULL);
  1151. if (hash >= cd->hash_size)
  1152. return NULL;
  1153. *pos = n+1;
  1154. return cd->hash_table[hash];
  1155. }
  1156. static void *c_next(struct seq_file *m, void *p, loff_t *pos)
  1157. {
  1158. struct cache_head *ch = p;
  1159. int hash = (*pos >> 32);
  1160. struct cache_detail *cd = ((struct handle*)m->private)->cd;
  1161. if (p == SEQ_START_TOKEN)
  1162. hash = 0;
  1163. else if (ch->next == NULL) {
  1164. hash++;
  1165. *pos += 1LL<<32;
  1166. } else {
  1167. ++*pos;
  1168. return ch->next;
  1169. }
  1170. *pos &= ~((1LL<<32) - 1);
  1171. while (hash < cd->hash_size &&
  1172. cd->hash_table[hash] == NULL) {
  1173. hash++;
  1174. *pos += 1LL<<32;
  1175. }
  1176. if (hash >= cd->hash_size)
  1177. return NULL;
  1178. ++*pos;
  1179. return cd->hash_table[hash];
  1180. }
  1181. static void c_stop(struct seq_file *m, void *p)
  1182. __releases(cd->hash_lock)
  1183. {
  1184. struct cache_detail *cd = ((struct handle*)m->private)->cd;
  1185. read_unlock(&cd->hash_lock);
  1186. }
  1187. static int c_show(struct seq_file *m, void *p)
  1188. {
  1189. struct cache_head *cp = p;
  1190. struct cache_detail *cd = ((struct handle*)m->private)->cd;
  1191. if (p == SEQ_START_TOKEN)
  1192. return cd->cache_show(m, cd, NULL);
  1193. ifdebug(CACHE)
  1194. seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
  1195. convert_to_wallclock(cp->expiry_time),
  1196. atomic_read(&cp->ref.refcount), cp->flags);
  1197. cache_get(cp);
  1198. if (cache_check(cd, cp, NULL))
  1199. /* cache_check does a cache_put on failure */
  1200. seq_printf(m, "# ");
  1201. else {
  1202. if (cache_is_expired(cd, cp))
  1203. seq_printf(m, "# ");
  1204. cache_put(cp, cd);
  1205. }
  1206. return cd->cache_show(m, cd, cp);
  1207. }
  1208. static const struct seq_operations cache_content_op = {
  1209. .start = c_start,
  1210. .next = c_next,
  1211. .stop = c_stop,
  1212. .show = c_show,
  1213. };
  1214. static int content_open(struct inode *inode, struct file *file,
  1215. struct cache_detail *cd)
  1216. {
  1217. struct handle *han;
  1218. if (!cd || !try_module_get(cd->owner))
  1219. return -EACCES;
  1220. han = __seq_open_private(file, &cache_content_op, sizeof(*han));
  1221. if (han == NULL) {
  1222. module_put(cd->owner);
  1223. return -ENOMEM;
  1224. }
  1225. han->cd = cd;
  1226. return 0;
  1227. }
  1228. static int content_release(struct inode *inode, struct file *file,
  1229. struct cache_detail *cd)
  1230. {
  1231. int ret = seq_release_private(inode, file);
  1232. module_put(cd->owner);
  1233. return ret;
  1234. }
  1235. static int open_flush(struct inode *inode, struct file *file,
  1236. struct cache_detail *cd)
  1237. {
  1238. if (!cd || !try_module_get(cd->owner))
  1239. return -EACCES;
  1240. return nonseekable_open(inode, file);
  1241. }
  1242. static int release_flush(struct inode *inode, struct file *file,
  1243. struct cache_detail *cd)
  1244. {
  1245. module_put(cd->owner);
  1246. return 0;
  1247. }
  1248. static ssize_t read_flush(struct file *file, char __user *buf,
  1249. size_t count, loff_t *ppos,
  1250. struct cache_detail *cd)
  1251. {
  1252. char tbuf[22];
  1253. unsigned long p = *ppos;
  1254. size_t len;
  1255. snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
  1256. len = strlen(tbuf);
  1257. if (p >= len)
  1258. return 0;
  1259. len -= p;
  1260. if (len > count)
  1261. len = count;
  1262. if (copy_to_user(buf, (void*)(tbuf+p), len))
  1263. return -EFAULT;
  1264. *ppos += len;
  1265. return len;
  1266. }
  1267. static ssize_t write_flush(struct file *file, const char __user *buf,
  1268. size_t count, loff_t *ppos,
  1269. struct cache_detail *cd)
  1270. {
  1271. char tbuf[20];
  1272. char *bp, *ep;
  1273. if (*ppos || count > sizeof(tbuf)-1)
  1274. return -EINVAL;
  1275. if (copy_from_user(tbuf, buf, count))
  1276. return -EFAULT;
  1277. tbuf[count] = 0;
  1278. simple_strtoul(tbuf, &ep, 0);
  1279. if (*ep && *ep != '\n')
  1280. return -EINVAL;
  1281. bp = tbuf;
  1282. cd->flush_time = get_expiry(&bp);
  1283. cd->nextcheck = seconds_since_boot();
  1284. cache_flush();
  1285. *ppos += count;
  1286. return count;
  1287. }
  1288. static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
  1289. size_t count, loff_t *ppos)
  1290. {
  1291. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1292. return cache_read(filp, buf, count, ppos, cd);
  1293. }
  1294. static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
  1295. size_t count, loff_t *ppos)
  1296. {
  1297. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1298. return cache_write(filp, buf, count, ppos, cd);
  1299. }
  1300. static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
  1301. {
  1302. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1303. return cache_poll(filp, wait, cd);
  1304. }
  1305. static long cache_ioctl_procfs(struct file *filp,
  1306. unsigned int cmd, unsigned long arg)
  1307. {
  1308. struct inode *inode = file_inode(filp);
  1309. struct cache_detail *cd = PDE_DATA(inode);
  1310. return cache_ioctl(inode, filp, cmd, arg, cd);
  1311. }
  1312. static int cache_open_procfs(struct inode *inode, struct file *filp)
  1313. {
  1314. struct cache_detail *cd = PDE_DATA(inode);
  1315. return cache_open(inode, filp, cd);
  1316. }
  1317. static int cache_release_procfs(struct inode *inode, struct file *filp)
  1318. {
  1319. struct cache_detail *cd = PDE_DATA(inode);
  1320. return cache_release(inode, filp, cd);
  1321. }
  1322. static const struct file_operations cache_file_operations_procfs = {
  1323. .owner = THIS_MODULE,
  1324. .llseek = no_llseek,
  1325. .read = cache_read_procfs,
  1326. .write = cache_write_procfs,
  1327. .poll = cache_poll_procfs,
  1328. .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
  1329. .open = cache_open_procfs,
  1330. .release = cache_release_procfs,
  1331. };
  1332. static int content_open_procfs(struct inode *inode, struct file *filp)
  1333. {
  1334. struct cache_detail *cd = PDE_DATA(inode);
  1335. return content_open(inode, filp, cd);
  1336. }
  1337. static int content_release_procfs(struct inode *inode, struct file *filp)
  1338. {
  1339. struct cache_detail *cd = PDE_DATA(inode);
  1340. return content_release(inode, filp, cd);
  1341. }
  1342. static const struct file_operations content_file_operations_procfs = {
  1343. .open = content_open_procfs,
  1344. .read = seq_read,
  1345. .llseek = seq_lseek,
  1346. .release = content_release_procfs,
  1347. };
  1348. static int open_flush_procfs(struct inode *inode, struct file *filp)
  1349. {
  1350. struct cache_detail *cd = PDE_DATA(inode);
  1351. return open_flush(inode, filp, cd);
  1352. }
  1353. static int release_flush_procfs(struct inode *inode, struct file *filp)
  1354. {
  1355. struct cache_detail *cd = PDE_DATA(inode);
  1356. return release_flush(inode, filp, cd);
  1357. }
  1358. static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
  1359. size_t count, loff_t *ppos)
  1360. {
  1361. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1362. return read_flush(filp, buf, count, ppos, cd);
  1363. }
  1364. static ssize_t write_flush_procfs(struct file *filp,
  1365. const char __user *buf,
  1366. size_t count, loff_t *ppos)
  1367. {
  1368. struct cache_detail *cd = PDE_DATA(file_inode(filp));
  1369. return write_flush(filp, buf, count, ppos, cd);
  1370. }
  1371. static const struct file_operations cache_flush_operations_procfs = {
  1372. .open = open_flush_procfs,
  1373. .read = read_flush_procfs,
  1374. .write = write_flush_procfs,
  1375. .release = release_flush_procfs,
  1376. .llseek = no_llseek,
  1377. };
  1378. static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
  1379. {
  1380. struct sunrpc_net *sn;
  1381. if (cd->u.procfs.proc_ent == NULL)
  1382. return;
  1383. if (cd->u.procfs.flush_ent)
  1384. remove_proc_entry("flush", cd->u.procfs.proc_ent);
  1385. if (cd->u.procfs.channel_ent)
  1386. remove_proc_entry("channel", cd->u.procfs.proc_ent);
  1387. if (cd->u.procfs.content_ent)
  1388. remove_proc_entry("content", cd->u.procfs.proc_ent);
  1389. cd->u.procfs.proc_ent = NULL;
  1390. sn = net_generic(net, sunrpc_net_id);
  1391. remove_proc_entry(cd->name, sn->proc_net_rpc);
  1392. }
  1393. #ifdef CONFIG_PROC_FS
  1394. static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
  1395. {
  1396. struct proc_dir_entry *p;
  1397. struct sunrpc_net *sn;
  1398. sn = net_generic(net, sunrpc_net_id);
  1399. cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
  1400. if (cd->u.procfs.proc_ent == NULL)
  1401. goto out_nomem;
  1402. cd->u.procfs.channel_ent = NULL;
  1403. cd->u.procfs.content_ent = NULL;
  1404. p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
  1405. cd->u.procfs.proc_ent,
  1406. &cache_flush_operations_procfs, cd);
  1407. cd->u.procfs.flush_ent = p;
  1408. if (p == NULL)
  1409. goto out_nomem;
  1410. if (cd->cache_request || cd->cache_parse) {
  1411. p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
  1412. cd->u.procfs.proc_ent,
  1413. &cache_file_operations_procfs, cd);
  1414. cd->u.procfs.channel_ent = p;
  1415. if (p == NULL)
  1416. goto out_nomem;
  1417. }
  1418. if (cd->cache_show) {
  1419. p = proc_create_data("content", S_IFREG|S_IRUSR,
  1420. cd->u.procfs.proc_ent,
  1421. &content_file_operations_procfs, cd);
  1422. cd->u.procfs.content_ent = p;
  1423. if (p == NULL)
  1424. goto out_nomem;
  1425. }
  1426. return 0;
  1427. out_nomem:
  1428. remove_cache_proc_entries(cd, net);
  1429. return -ENOMEM;
  1430. }
  1431. #else /* CONFIG_PROC_FS */
  1432. static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
  1433. {
  1434. return 0;
  1435. }
  1436. #endif
  1437. void __init cache_initialize(void)
  1438. {
  1439. INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
  1440. }
  1441. int cache_register_net(struct cache_detail *cd, struct net *net)
  1442. {
  1443. int ret;
  1444. sunrpc_init_cache_detail(cd);
  1445. ret = create_cache_proc_entries(cd, net);
  1446. if (ret)
  1447. sunrpc_destroy_cache_detail(cd);
  1448. return ret;
  1449. }
  1450. EXPORT_SYMBOL_GPL(cache_register_net);
  1451. void cache_unregister_net(struct cache_detail *cd, struct net *net)
  1452. {
  1453. remove_cache_proc_entries(cd, net);
  1454. sunrpc_destroy_cache_detail(cd);
  1455. }
  1456. EXPORT_SYMBOL_GPL(cache_unregister_net);
  1457. struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
  1458. {
  1459. struct cache_detail *cd;
  1460. cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
  1461. if (cd == NULL)
  1462. return ERR_PTR(-ENOMEM);
  1463. cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
  1464. GFP_KERNEL);
  1465. if (cd->hash_table == NULL) {
  1466. kfree(cd);
  1467. return ERR_PTR(-ENOMEM);
  1468. }
  1469. cd->net = net;
  1470. return cd;
  1471. }
  1472. EXPORT_SYMBOL_GPL(cache_create_net);
  1473. void cache_destroy_net(struct cache_detail *cd, struct net *net)
  1474. {
  1475. kfree(cd->hash_table);
  1476. kfree(cd);
  1477. }
  1478. EXPORT_SYMBOL_GPL(cache_destroy_net);
  1479. static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
  1480. size_t count, loff_t *ppos)
  1481. {
  1482. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1483. return cache_read(filp, buf, count, ppos, cd);
  1484. }
  1485. static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
  1486. size_t count, loff_t *ppos)
  1487. {
  1488. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1489. return cache_write(filp, buf, count, ppos, cd);
  1490. }
  1491. static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
  1492. {
  1493. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1494. return cache_poll(filp, wait, cd);
  1495. }
  1496. static long cache_ioctl_pipefs(struct file *filp,
  1497. unsigned int cmd, unsigned long arg)
  1498. {
  1499. struct inode *inode = file_inode(filp);
  1500. struct cache_detail *cd = RPC_I(inode)->private;
  1501. return cache_ioctl(inode, filp, cmd, arg, cd);
  1502. }
  1503. static int cache_open_pipefs(struct inode *inode, struct file *filp)
  1504. {
  1505. struct cache_detail *cd = RPC_I(inode)->private;
  1506. return cache_open(inode, filp, cd);
  1507. }
  1508. static int cache_release_pipefs(struct inode *inode, struct file *filp)
  1509. {
  1510. struct cache_detail *cd = RPC_I(inode)->private;
  1511. return cache_release(inode, filp, cd);
  1512. }
  1513. const struct file_operations cache_file_operations_pipefs = {
  1514. .owner = THIS_MODULE,
  1515. .llseek = no_llseek,
  1516. .read = cache_read_pipefs,
  1517. .write = cache_write_pipefs,
  1518. .poll = cache_poll_pipefs,
  1519. .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
  1520. .open = cache_open_pipefs,
  1521. .release = cache_release_pipefs,
  1522. };
  1523. static int content_open_pipefs(struct inode *inode, struct file *filp)
  1524. {
  1525. struct cache_detail *cd = RPC_I(inode)->private;
  1526. return content_open(inode, filp, cd);
  1527. }
  1528. static int content_release_pipefs(struct inode *inode, struct file *filp)
  1529. {
  1530. struct cache_detail *cd = RPC_I(inode)->private;
  1531. return content_release(inode, filp, cd);
  1532. }
  1533. const struct file_operations content_file_operations_pipefs = {
  1534. .open = content_open_pipefs,
  1535. .read = seq_read,
  1536. .llseek = seq_lseek,
  1537. .release = content_release_pipefs,
  1538. };
  1539. static int open_flush_pipefs(struct inode *inode, struct file *filp)
  1540. {
  1541. struct cache_detail *cd = RPC_I(inode)->private;
  1542. return open_flush(inode, filp, cd);
  1543. }
  1544. static int release_flush_pipefs(struct inode *inode, struct file *filp)
  1545. {
  1546. struct cache_detail *cd = RPC_I(inode)->private;
  1547. return release_flush(inode, filp, cd);
  1548. }
  1549. static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
  1550. size_t count, loff_t *ppos)
  1551. {
  1552. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1553. return read_flush(filp, buf, count, ppos, cd);
  1554. }
  1555. static ssize_t write_flush_pipefs(struct file *filp,
  1556. const char __user *buf,
  1557. size_t count, loff_t *ppos)
  1558. {
  1559. struct cache_detail *cd = RPC_I(file_inode(filp))->private;
  1560. return write_flush(filp, buf, count, ppos, cd);
  1561. }
  1562. const struct file_operations cache_flush_operations_pipefs = {
  1563. .open = open_flush_pipefs,
  1564. .read = read_flush_pipefs,
  1565. .write = write_flush_pipefs,
  1566. .release = release_flush_pipefs,
  1567. .llseek = no_llseek,
  1568. };
  1569. int sunrpc_cache_register_pipefs(struct dentry *parent,
  1570. const char *name, umode_t umode,
  1571. struct cache_detail *cd)
  1572. {
  1573. struct qstr q;
  1574. struct dentry *dir;
  1575. int ret = 0;
  1576. q.name = name;
  1577. q.len = strlen(name);
  1578. q.hash = full_name_hash(q.name, q.len);
  1579. dir = rpc_create_cache_dir(parent, &q, umode, cd);
  1580. if (!IS_ERR(dir))
  1581. cd->u.pipefs.dir = dir;
  1582. else
  1583. ret = PTR_ERR(dir);
  1584. return ret;
  1585. }
  1586. EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
  1587. void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
  1588. {
  1589. rpc_remove_cache_dir(cd->u.pipefs.dir);
  1590. cd->u.pipefs.dir = NULL;
  1591. }
  1592. EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);