cache.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. /*
  2. * include/linux/sunrpc/cache.h
  3. *
  4. * Generic code for various authentication-related caches
  5. * used by sunrpc clients and servers.
  6. *
  7. * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
  8. *
  9. * Released under terms in GPL version 2. See COPYING.
  10. *
  11. */
  12. #ifndef _LINUX_SUNRPC_CACHE_H_
  13. #define _LINUX_SUNRPC_CACHE_H_
  14. #include <linux/slab.h>
  15. #include <asm/atomic.h>
  16. #include <linux/proc_fs.h>
  17. /*
  18. * Each cache requires:
  19. * - A 'struct cache_detail' which contains information specific to the cache
  20. * for common code to use.
  21. * - An item structure that must contain a "struct cache_head"
  22. * - A lookup function defined using DefineCacheLookup
  23. * - A 'put' function that can release a cache item. It will only
  24. * be called after cache_put has succeed, so there are guarantee
  25. * to be no references.
  26. * - A function to calculate a hash of an item's key.
  27. *
  28. * as well as assorted code fragments (e.g. compare keys) and numbers
  29. * (e.g. hash size, goal_age, etc).
  30. *
  31. * Each cache must be registered so that it can be cleaned regularly.
  32. * When the cache is unregistered, it is flushed completely.
  33. *
  34. * Entries have a ref count and a 'hashed' flag which counts the existance
  35. * in the hash table.
  36. * We only expire entries when refcount is zero.
  37. * Existance in the cache is counted the refcount.
  38. */
  39. /* Every cache item has a common header that is used
  40. * for expiring and refreshing entries.
  41. *
  42. */
  43. struct cache_head {
  44. struct cache_head * next;
  45. time_t expiry_time; /* After time time, don't use the data */
  46. time_t last_refresh; /* If CACHE_PENDING, this is when upcall
  47. * was sent, else this is when update was received
  48. */
  49. atomic_t refcnt;
  50. unsigned long flags;
  51. };
  52. #define CACHE_VALID 0 /* Entry contains valid data */
  53. #define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
  54. #define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
  55. #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
  56. struct cache_detail {
  57. int hash_size;
  58. struct cache_head ** hash_table;
  59. rwlock_t hash_lock;
  60. atomic_t inuse; /* active user-space update or lookup */
  61. char *name;
  62. void (*cache_put)(struct cache_head *,
  63. struct cache_detail*);
  64. void (*cache_request)(struct cache_detail *cd,
  65. struct cache_head *h,
  66. char **bpp, int *blen);
  67. int (*cache_parse)(struct cache_detail *,
  68. char *buf, int len);
  69. int (*cache_show)(struct seq_file *m,
  70. struct cache_detail *cd,
  71. struct cache_head *h);
  72. /* fields below this comment are for internal use
  73. * and should not be touched by cache owners
  74. */
  75. time_t flush_time; /* flush all cache items with last_refresh
  76. * earlier than this */
  77. struct list_head others;
  78. time_t nextcheck;
  79. int entries;
  80. /* fields for communication over channel */
  81. struct list_head queue;
  82. struct proc_dir_entry *proc_ent;
  83. struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
  84. atomic_t readers; /* how many time is /chennel open */
  85. time_t last_close; /* if no readers, when did last close */
  86. time_t last_warn; /* when we last warned about no readers */
  87. void (*warn_no_listener)(struct cache_detail *cd);
  88. };
  89. /* this must be embedded in any request structure that
  90. * identifies an object that will want a callback on
  91. * a cache fill
  92. */
  93. struct cache_req {
  94. struct cache_deferred_req *(*defer)(struct cache_req *req);
  95. };
  96. /* this must be embedded in a deferred_request that is being
  97. * delayed awaiting cache-fill
  98. */
  99. struct cache_deferred_req {
  100. struct list_head hash; /* on hash chain */
  101. struct list_head recent; /* on fifo */
  102. struct cache_head *item; /* cache item we wait on */
  103. time_t recv_time;
  104. void *owner; /* we might need to discard all defered requests
  105. * owned by someone */
  106. void (*revisit)(struct cache_deferred_req *req,
  107. int too_many);
  108. };
  109. /*
  110. * just like a template in C++, this macro does cache lookup
  111. * for us.
  112. * The function is passed some sort of HANDLE from which a cache_detail
  113. * structure can be determined (via SETUP, DETAIL), a template
  114. * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the
  115. * TEST, the function will try to find a matching cache entry in the cache.
  116. * If "set" == 0 :
  117. * If an entry is found, it is returned
  118. * If no entry is found, a new non-VALID entry is created.
  119. * If "set" == 1 and INPLACE == 0 :
  120. * If no entry is found a new one is inserted with data from "template"
  121. * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE
  122. * If a CACHE_VALID entry is found, a new entry is swapped in with data
  123. * from "template"
  124. * If set == 1, and INPLACE == 1 :
  125. * As above, except that if a CACHE_VALID entry is found, we UPDATE in place
  126. * instead of swapping in a new entry.
  127. *
  128. * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not
  129. * run but insteead CACHE_NEGATIVE is set in any new item.
  130. * In any case, the new entry is returned with a reference count.
  131. *
  132. *
  133. * RTN is a struct type for a cache entry
  134. * MEMBER is the member of the cache which is cache_head, which must be first
  135. * FNAME is the name for the function
  136. * ARGS are arguments to function and must contain RTN *item, int set. May
  137. * also contain something to be usedby SETUP or DETAIL to find cache_detail.
  138. * SETUP locates the cache detail and makes it available as...
  139. * DETAIL identifies the cache detail, possibly set up by SETUP
  140. * HASHFN returns a hash value of the cache entry "item"
  141. * TEST tests if "tmp" matches "item"
  142. * INIT copies key information from "item" to "new"
  143. * UPDATE copies content information from "item" to "tmp"
  144. * INPLACE is true if updates can happen inplace rather than allocating a new structure
  145. *
  146. * WARNING: any substantial changes to this must be reflected in
  147. * net/sunrpc/svcauth.c(auth_domain_lookup)
  148. * which is a similar routine that is open-coded.
  149. */
  150. #define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \
  151. RTN *FNAME ARGS \
  152. { \
  153. RTN *tmp, *new=NULL; \
  154. struct cache_head **hp, **head; \
  155. SETUP; \
  156. head = &(DETAIL)->hash_table[HASHFN]; \
  157. retry: \
  158. if (set||new) write_lock(&(DETAIL)->hash_lock); \
  159. else read_lock(&(DETAIL)->hash_lock); \
  160. for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \
  161. tmp = container_of(*hp, RTN, MEMBER); \
  162. if (TEST) { /* found a match */ \
  163. \
  164. if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
  165. break; \
  166. \
  167. if (new) \
  168. {INIT;} \
  169. if (set) { \
  170. if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
  171. { /* need to swap in new */ \
  172. RTN *t2; \
  173. \
  174. new->MEMBER.next = tmp->MEMBER.next; \
  175. *hp = &new->MEMBER; \
  176. tmp->MEMBER.next = NULL; \
  177. t2 = tmp; tmp = new; new = t2; \
  178. } \
  179. if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
  180. set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
  181. else { \
  182. UPDATE; \
  183. clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
  184. } \
  185. } \
  186. cache_get(&tmp->MEMBER); \
  187. if (set||new) write_unlock(&(DETAIL)->hash_lock); \
  188. else read_unlock(&(DETAIL)->hash_lock); \
  189. if (set) \
  190. cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
  191. if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
  192. if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \
  193. return tmp; \
  194. } \
  195. } \
  196. /* Didn't find anything */ \
  197. if (new) { \
  198. INIT; \
  199. new->MEMBER.next = *head; \
  200. *head = &new->MEMBER; \
  201. (DETAIL)->entries ++; \
  202. cache_get(&new->MEMBER); \
  203. if (set) { \
  204. tmp = new; \
  205. if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \
  206. set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \
  207. else {UPDATE;} \
  208. } \
  209. } \
  210. if (set||new) write_unlock(&(DETAIL)->hash_lock); \
  211. else read_unlock(&(DETAIL)->hash_lock); \
  212. if (new && set) \
  213. cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \
  214. if (new) \
  215. return new; \
  216. new = kmalloc(sizeof(*new), GFP_KERNEL); \
  217. if (new) { \
  218. cache_init(&new->MEMBER); \
  219. goto retry; \
  220. } \
  221. return NULL; \
  222. }
  223. #define DefineSimpleCacheLookup(STRUCT,INPLACE) \
  224. DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \
  225. & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\
  226. STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE)
  227. #define cache_for_each(pos, detail, index, member) \
  228. for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \
  229. ({if (index==0)read_unlock(&(detail)->hash_lock); index--;}); \
  230. ) \
  231. for (pos = container_of((detail)->hash_table[index], typeof(*pos), member); \
  232. &pos->member; \
  233. pos = container_of(pos->member.next, typeof(*pos), member))
  234. extern void cache_clean_deferred(void *owner);
  235. static inline struct cache_head *cache_get(struct cache_head *h)
  236. {
  237. atomic_inc(&h->refcnt);
  238. return h;
  239. }
  240. static inline int cache_put(struct cache_head *h, struct cache_detail *cd)
  241. {
  242. if (atomic_read(&h->refcnt) <= 2 &&
  243. h->expiry_time < cd->nextcheck)
  244. cd->nextcheck = h->expiry_time;
  245. return atomic_dec_and_test(&h->refcnt);
  246. }
  247. extern void cache_init(struct cache_head *h);
  248. extern void cache_fresh(struct cache_detail *detail,
  249. struct cache_head *head, time_t expiry);
  250. extern int cache_check(struct cache_detail *detail,
  251. struct cache_head *h, struct cache_req *rqstp);
  252. extern void cache_flush(void);
  253. extern void cache_purge(struct cache_detail *detail);
  254. #define NEVER (0x7FFFFFFF)
  255. extern void cache_register(struct cache_detail *cd);
  256. extern int cache_unregister(struct cache_detail *cd);
  257. extern void qword_add(char **bpp, int *lp, char *str);
  258. extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
  259. extern int qword_get(char **bpp, char *dest, int bufsize);
  260. static inline int get_int(char **bpp, int *anint)
  261. {
  262. char buf[50];
  263. char *ep;
  264. int rv;
  265. int len = qword_get(bpp, buf, 50);
  266. if (len < 0) return -EINVAL;
  267. if (len ==0) return -ENOENT;
  268. rv = simple_strtol(buf, &ep, 0);
  269. if (*ep) return -EINVAL;
  270. *anint = rv;
  271. return 0;
  272. }
  273. static inline time_t get_expiry(char **bpp)
  274. {
  275. int rv;
  276. if (get_int(bpp, &rv))
  277. return 0;
  278. if (rv < 0)
  279. return 0;
  280. return rv;
  281. }
  282. #endif /* _LINUX_SUNRPC_CACHE_H_ */