glock.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/delay.h>
  15. #include <linux/sort.h>
  16. #include <linux/jhash.h>
  17. #include <linux/kref.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <asm/uaccess.h>
  21. #include "gfs2.h"
  22. #include "lm_interface.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "lm.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "quota.h"
  31. #include "super.h"
  32. #include "util.h"
  33. /* Must be kept in sync with the beginning of struct gfs2_glock */
  34. struct glock_plug {
  35. struct list_head gl_list;
  36. unsigned long gl_flags;
  37. };
  38. struct greedy {
  39. struct gfs2_holder gr_gh;
  40. struct work_struct gr_work;
  41. };
  42. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  43. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  44. static int dump_glock(struct gfs2_glock *gl);
  45. /**
  46. * relaxed_state_ok - is a requested lock compatible with the current lock mode?
  47. * @actual: the current state of the lock
  48. * @requested: the lock state that was requested by the caller
  49. * @flags: the modifier flags passed in by the caller
  50. *
  51. * Returns: 1 if the locks are compatible, 0 otherwise
  52. */
  53. static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
  54. int flags)
  55. {
  56. if (actual == requested)
  57. return 1;
  58. if (flags & GL_EXACT)
  59. return 0;
  60. if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
  61. return 1;
  62. if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
  63. return 1;
  64. return 0;
  65. }
  66. /**
  67. * gl_hash() - Turn glock number into hash bucket number
  68. * @lock: The glock number
  69. *
  70. * Returns: The number of the corresponding hash bucket
  71. */
  72. static unsigned int gl_hash(struct lm_lockname *name)
  73. {
  74. unsigned int h;
  75. h = jhash(&name->ln_number, sizeof(uint64_t), 0);
  76. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  77. h &= GFS2_GL_HASH_MASK;
  78. return h;
  79. }
  80. /**
  81. * glock_free() - Perform a few checks and then release struct gfs2_glock
  82. * @gl: The glock to release
  83. *
  84. * Also calls lock module to release its internal structure for this glock.
  85. *
  86. */
  87. static void glock_free(struct gfs2_glock *gl)
  88. {
  89. struct gfs2_sbd *sdp = gl->gl_sbd;
  90. struct inode *aspace = gl->gl_aspace;
  91. gfs2_lm_put_lock(sdp, gl->gl_lock);
  92. if (aspace)
  93. gfs2_aspace_put(aspace);
  94. kmem_cache_free(gfs2_glock_cachep, gl);
  95. }
  96. /**
  97. * gfs2_glock_hold() - increment reference count on glock
  98. * @gl: The glock to hold
  99. *
  100. */
  101. void gfs2_glock_hold(struct gfs2_glock *gl)
  102. {
  103. kref_get(&gl->gl_ref);
  104. }
  105. /* All work is done after the return from kref_put() so we
  106. can release the write_lock before the free. */
  107. static void kill_glock(struct kref *kref)
  108. {
  109. struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
  110. struct gfs2_sbd *sdp = gl->gl_sbd;
  111. gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
  112. gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
  113. gfs2_assert(sdp, list_empty(&gl->gl_holders));
  114. gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
  115. gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
  116. gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
  117. }
  118. /**
  119. * gfs2_glock_put() - Decrement reference count on glock
  120. * @gl: The glock to put
  121. *
  122. */
  123. int gfs2_glock_put(struct gfs2_glock *gl)
  124. {
  125. struct gfs2_sbd *sdp = gl->gl_sbd;
  126. struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
  127. int rv = 0;
  128. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  129. write_lock(&bucket->hb_lock);
  130. if (kref_put(&gl->gl_ref, kill_glock)) {
  131. list_del_init(&gl->gl_list);
  132. write_unlock(&bucket->hb_lock);
  133. BUG_ON(spin_is_locked(&gl->gl_spin));
  134. glock_free(gl);
  135. rv = 1;
  136. goto out;
  137. }
  138. write_unlock(&bucket->hb_lock);
  139. out:
  140. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  141. return rv;
  142. }
  143. /**
  144. * queue_empty - check to see if a glock's queue is empty
  145. * @gl: the glock
  146. * @head: the head of the queue to check
  147. *
  148. * This function protects the list in the event that a process already
  149. * has a holder on the list and is adding a second holder for itself.
  150. * The glmutex lock is what generally prevents processes from working
  151. * on the same glock at once, but the special case of adding a second
  152. * holder for yourself ("recursive" locking) doesn't involve locking
  153. * glmutex, making the spin lock necessary.
  154. *
  155. * Returns: 1 if the queue is empty
  156. */
  157. static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
  158. {
  159. int empty;
  160. spin_lock(&gl->gl_spin);
  161. empty = list_empty(head);
  162. spin_unlock(&gl->gl_spin);
  163. return empty;
  164. }
  165. /**
  166. * search_bucket() - Find struct gfs2_glock by lock number
  167. * @bucket: the bucket to search
  168. * @name: The lock name
  169. *
  170. * Returns: NULL, or the struct gfs2_glock with the requested number
  171. */
  172. static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
  173. struct lm_lockname *name)
  174. {
  175. struct gfs2_glock *gl;
  176. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  177. if (test_bit(GLF_PLUG, &gl->gl_flags))
  178. continue;
  179. if (!lm_name_equal(&gl->gl_name, name))
  180. continue;
  181. kref_get(&gl->gl_ref);
  182. return gl;
  183. }
  184. return NULL;
  185. }
  186. /**
  187. * gfs2_glock_find() - Find glock by lock number
  188. * @sdp: The GFS2 superblock
  189. * @name: The lock name
  190. *
  191. * Returns: NULL, or the struct gfs2_glock with the requested number
  192. */
  193. static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
  194. struct lm_lockname *name)
  195. {
  196. struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
  197. struct gfs2_glock *gl;
  198. read_lock(&bucket->hb_lock);
  199. gl = search_bucket(bucket, name);
  200. read_unlock(&bucket->hb_lock);
  201. return gl;
  202. }
  203. /**
  204. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  205. * @sdp: The GFS2 superblock
  206. * @number: the lock number
  207. * @glops: The glock_operations to use
  208. * @create: If 0, don't create the glock if it doesn't exist
  209. * @glp: the glock is returned here
  210. *
  211. * This does not lock a glock, just finds/creates structures for one.
  212. *
  213. * Returns: errno
  214. */
  215. int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
  216. struct gfs2_glock_operations *glops, int create,
  217. struct gfs2_glock **glp)
  218. {
  219. struct lm_lockname name;
  220. struct gfs2_glock *gl, *tmp;
  221. struct gfs2_gl_hash_bucket *bucket;
  222. int error;
  223. name.ln_number = number;
  224. name.ln_type = glops->go_type;
  225. bucket = &sdp->sd_gl_hash[gl_hash(&name)];
  226. read_lock(&bucket->hb_lock);
  227. gl = search_bucket(bucket, &name);
  228. read_unlock(&bucket->hb_lock);
  229. if (gl || !create) {
  230. *glp = gl;
  231. return 0;
  232. }
  233. gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
  234. if (!gl)
  235. return -ENOMEM;
  236. memset(gl, 0, sizeof(struct gfs2_glock));
  237. INIT_LIST_HEAD(&gl->gl_list);
  238. gl->gl_name = name;
  239. kref_init(&gl->gl_ref);
  240. spin_lock_init(&gl->gl_spin);
  241. gl->gl_state = LM_ST_UNLOCKED;
  242. gl->gl_owner = NULL;
  243. gl->gl_ip = 0;
  244. INIT_LIST_HEAD(&gl->gl_holders);
  245. INIT_LIST_HEAD(&gl->gl_waiters1);
  246. INIT_LIST_HEAD(&gl->gl_waiters2);
  247. INIT_LIST_HEAD(&gl->gl_waiters3);
  248. gl->gl_ops = glops;
  249. gl->gl_bucket = bucket;
  250. INIT_LIST_HEAD(&gl->gl_reclaim);
  251. gl->gl_sbd = sdp;
  252. lops_init_le(&gl->gl_le, &gfs2_glock_lops);
  253. INIT_LIST_HEAD(&gl->gl_ail_list);
  254. /* If this glock protects actual on-disk data or metadata blocks,
  255. create a VFS inode to manage the pages/buffers holding them. */
  256. if (glops == &gfs2_inode_glops ||
  257. glops == &gfs2_rgrp_glops ||
  258. glops == &gfs2_meta_glops) {
  259. gl->gl_aspace = gfs2_aspace_get(sdp);
  260. if (!gl->gl_aspace) {
  261. error = -ENOMEM;
  262. goto fail;
  263. }
  264. }
  265. error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
  266. if (error)
  267. goto fail_aspace;
  268. write_lock(&bucket->hb_lock);
  269. tmp = search_bucket(bucket, &name);
  270. if (tmp) {
  271. write_unlock(&bucket->hb_lock);
  272. glock_free(gl);
  273. gl = tmp;
  274. } else {
  275. list_add_tail(&gl->gl_list, &bucket->hb_list);
  276. write_unlock(&bucket->hb_lock);
  277. }
  278. *glp = gl;
  279. return 0;
  280. fail_aspace:
  281. if (gl->gl_aspace)
  282. gfs2_aspace_put(gl->gl_aspace);
  283. fail:
  284. kmem_cache_free(gfs2_glock_cachep, gl);
  285. return error;
  286. }
  287. /**
  288. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  289. * @gl: the glock
  290. * @state: the state we're requesting
  291. * @flags: the modifier flags
  292. * @gh: the holder structure
  293. *
  294. */
  295. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  296. struct gfs2_holder *gh)
  297. {
  298. INIT_LIST_HEAD(&gh->gh_list);
  299. gh->gh_gl = gl;
  300. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  301. gh->gh_owner = current;
  302. gh->gh_state = state;
  303. gh->gh_flags = flags;
  304. gh->gh_error = 0;
  305. gh->gh_iflags = 0;
  306. init_completion(&gh->gh_wait);
  307. if (gh->gh_state == LM_ST_EXCLUSIVE)
  308. gh->gh_flags |= GL_LOCAL_EXCL;
  309. gfs2_glock_hold(gl);
  310. }
  311. /**
  312. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  313. * @state: the state we're requesting
  314. * @flags: the modifier flags
  315. * @gh: the holder structure
  316. *
  317. * Don't mess with the glock.
  318. *
  319. */
  320. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  321. {
  322. gh->gh_state = state;
  323. gh->gh_flags = flags;
  324. if (gh->gh_state == LM_ST_EXCLUSIVE)
  325. gh->gh_flags |= GL_LOCAL_EXCL;
  326. gh->gh_iflags &= 1 << HIF_ALLOCED;
  327. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  328. }
  329. /**
  330. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  331. * @gh: the holder structure
  332. *
  333. */
  334. void gfs2_holder_uninit(struct gfs2_holder *gh)
  335. {
  336. gfs2_glock_put(gh->gh_gl);
  337. gh->gh_gl = NULL;
  338. gh->gh_ip = 0;
  339. }
  340. /**
  341. * gfs2_holder_get - get a struct gfs2_holder structure
  342. * @gl: the glock
  343. * @state: the state we're requesting
  344. * @flags: the modifier flags
  345. * @gfp_flags: __GFP_NOFAIL
  346. *
  347. * Figure out how big an impact this function has. Either:
  348. * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
  349. * 2) Leave it like it is
  350. *
  351. * Returns: the holder structure, NULL on ENOMEM
  352. */
  353. static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
  354. unsigned int state,
  355. int flags, gfp_t gfp_flags)
  356. {
  357. struct gfs2_holder *gh;
  358. gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
  359. if (!gh)
  360. return NULL;
  361. gfs2_holder_init(gl, state, flags, gh);
  362. set_bit(HIF_ALLOCED, &gh->gh_iflags);
  363. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  364. return gh;
  365. }
  366. /**
  367. * gfs2_holder_put - get rid of a struct gfs2_holder structure
  368. * @gh: the holder structure
  369. *
  370. */
  371. static void gfs2_holder_put(struct gfs2_holder *gh)
  372. {
  373. gfs2_holder_uninit(gh);
  374. kfree(gh);
  375. }
  376. /**
  377. * rq_mutex - process a mutex request in the queue
  378. * @gh: the glock holder
  379. *
  380. * Returns: 1 if the queue is blocked
  381. */
  382. static int rq_mutex(struct gfs2_holder *gh)
  383. {
  384. struct gfs2_glock *gl = gh->gh_gl;
  385. list_del_init(&gh->gh_list);
  386. /* gh->gh_error never examined. */
  387. set_bit(GLF_LOCK, &gl->gl_flags);
  388. complete(&gh->gh_wait);
  389. return 1;
  390. }
  391. /**
  392. * rq_promote - process a promote request in the queue
  393. * @gh: the glock holder
  394. *
  395. * Acquire a new inter-node lock, or change a lock state to more restrictive.
  396. *
  397. * Returns: 1 if the queue is blocked
  398. */
  399. static int rq_promote(struct gfs2_holder *gh)
  400. {
  401. struct gfs2_glock *gl = gh->gh_gl;
  402. struct gfs2_sbd *sdp = gl->gl_sbd;
  403. struct gfs2_glock_operations *glops = gl->gl_ops;
  404. if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  405. if (list_empty(&gl->gl_holders)) {
  406. gl->gl_req_gh = gh;
  407. set_bit(GLF_LOCK, &gl->gl_flags);
  408. spin_unlock(&gl->gl_spin);
  409. if (atomic_read(&sdp->sd_reclaim_count) >
  410. gfs2_tune_get(sdp, gt_reclaim_limit) &&
  411. !(gh->gh_flags & LM_FLAG_PRIORITY)) {
  412. gfs2_reclaim_glock(sdp);
  413. gfs2_reclaim_glock(sdp);
  414. }
  415. glops->go_xmote_th(gl, gh->gh_state,
  416. gh->gh_flags);
  417. spin_lock(&gl->gl_spin);
  418. }
  419. return 1;
  420. }
  421. if (list_empty(&gl->gl_holders)) {
  422. set_bit(HIF_FIRST, &gh->gh_iflags);
  423. set_bit(GLF_LOCK, &gl->gl_flags);
  424. } else {
  425. struct gfs2_holder *next_gh;
  426. if (gh->gh_flags & GL_LOCAL_EXCL)
  427. return 1;
  428. next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
  429. gh_list);
  430. if (next_gh->gh_flags & GL_LOCAL_EXCL)
  431. return 1;
  432. }
  433. list_move_tail(&gh->gh_list, &gl->gl_holders);
  434. gh->gh_error = 0;
  435. set_bit(HIF_HOLDER, &gh->gh_iflags);
  436. complete(&gh->gh_wait);
  437. return 0;
  438. }
  439. /**
  440. * rq_demote - process a demote request in the queue
  441. * @gh: the glock holder
  442. *
  443. * Returns: 1 if the queue is blocked
  444. */
  445. static int rq_demote(struct gfs2_holder *gh)
  446. {
  447. struct gfs2_glock *gl = gh->gh_gl;
  448. struct gfs2_glock_operations *glops = gl->gl_ops;
  449. if (!list_empty(&gl->gl_holders))
  450. return 1;
  451. if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
  452. list_del_init(&gh->gh_list);
  453. gh->gh_error = 0;
  454. spin_unlock(&gl->gl_spin);
  455. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  456. gfs2_holder_put(gh);
  457. else
  458. complete(&gh->gh_wait);
  459. spin_lock(&gl->gl_spin);
  460. } else {
  461. gl->gl_req_gh = gh;
  462. set_bit(GLF_LOCK, &gl->gl_flags);
  463. spin_unlock(&gl->gl_spin);
  464. if (gh->gh_state == LM_ST_UNLOCKED ||
  465. gl->gl_state != LM_ST_EXCLUSIVE)
  466. glops->go_drop_th(gl);
  467. else
  468. glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
  469. spin_lock(&gl->gl_spin);
  470. }
  471. return 0;
  472. }
  473. /**
  474. * rq_greedy - process a queued request to drop greedy status
  475. * @gh: the glock holder
  476. *
  477. * Returns: 1 if the queue is blocked
  478. */
  479. static int rq_greedy(struct gfs2_holder *gh)
  480. {
  481. struct gfs2_glock *gl = gh->gh_gl;
  482. list_del_init(&gh->gh_list);
  483. /* gh->gh_error never examined. */
  484. clear_bit(GLF_GREEDY, &gl->gl_flags);
  485. spin_unlock(&gl->gl_spin);
  486. gfs2_holder_uninit(gh);
  487. kfree(container_of(gh, struct greedy, gr_gh));
  488. spin_lock(&gl->gl_spin);
  489. return 0;
  490. }
  491. /**
  492. * run_queue - process holder structures on a glock
  493. * @gl: the glock
  494. *
  495. */
  496. static void run_queue(struct gfs2_glock *gl)
  497. {
  498. struct gfs2_holder *gh;
  499. int blocked = 1;
  500. for (;;) {
  501. if (test_bit(GLF_LOCK, &gl->gl_flags))
  502. break;
  503. if (!list_empty(&gl->gl_waiters1)) {
  504. gh = list_entry(gl->gl_waiters1.next,
  505. struct gfs2_holder, gh_list);
  506. if (test_bit(HIF_MUTEX, &gh->gh_iflags))
  507. blocked = rq_mutex(gh);
  508. else
  509. gfs2_assert_warn(gl->gl_sbd, 0);
  510. } else if (!list_empty(&gl->gl_waiters2) &&
  511. !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
  512. gh = list_entry(gl->gl_waiters2.next,
  513. struct gfs2_holder, gh_list);
  514. if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
  515. blocked = rq_demote(gh);
  516. else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
  517. blocked = rq_greedy(gh);
  518. else
  519. gfs2_assert_warn(gl->gl_sbd, 0);
  520. } else if (!list_empty(&gl->gl_waiters3)) {
  521. gh = list_entry(gl->gl_waiters3.next,
  522. struct gfs2_holder, gh_list);
  523. if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
  524. blocked = rq_promote(gh);
  525. else
  526. gfs2_assert_warn(gl->gl_sbd, 0);
  527. } else
  528. break;
  529. if (blocked)
  530. break;
  531. }
  532. }
  533. /**
  534. * gfs2_glmutex_lock - acquire a local lock on a glock
  535. * @gl: the glock
  536. *
  537. * Gives caller exclusive access to manipulate a glock structure.
  538. */
  539. void gfs2_glmutex_lock(struct gfs2_glock *gl)
  540. {
  541. struct gfs2_holder gh;
  542. gfs2_holder_init(gl, 0, 0, &gh);
  543. set_bit(HIF_MUTEX, &gh.gh_iflags);
  544. spin_lock(&gl->gl_spin);
  545. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  546. list_add_tail(&gh.gh_list, &gl->gl_waiters1);
  547. else {
  548. gl->gl_owner = current;
  549. gl->gl_ip = (unsigned long)__builtin_return_address(0);
  550. complete(&gh.gh_wait);
  551. }
  552. spin_unlock(&gl->gl_spin);
  553. wait_for_completion(&gh.gh_wait);
  554. gfs2_holder_uninit(&gh);
  555. }
  556. /**
  557. * gfs2_glmutex_trylock - try to acquire a local lock on a glock
  558. * @gl: the glock
  559. *
  560. * Returns: 1 if the glock is acquired
  561. */
  562. static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
  563. {
  564. int acquired = 1;
  565. spin_lock(&gl->gl_spin);
  566. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  567. acquired = 0;
  568. else {
  569. gl->gl_owner = current;
  570. gl->gl_ip = (unsigned long)__builtin_return_address(0);
  571. }
  572. spin_unlock(&gl->gl_spin);
  573. return acquired;
  574. }
  575. /**
  576. * gfs2_glmutex_unlock - release a local lock on a glock
  577. * @gl: the glock
  578. *
  579. */
  580. void gfs2_glmutex_unlock(struct gfs2_glock *gl)
  581. {
  582. spin_lock(&gl->gl_spin);
  583. clear_bit(GLF_LOCK, &gl->gl_flags);
  584. gl->gl_owner = NULL;
  585. gl->gl_ip = 0;
  586. run_queue(gl);
  587. BUG_ON(!spin_is_locked(&gl->gl_spin));
  588. spin_unlock(&gl->gl_spin);
  589. }
  590. /**
  591. * handle_callback - add a demote request to a lock's queue
  592. * @gl: the glock
  593. * @state: the state the caller wants us to change to
  594. *
  595. */
  596. static void handle_callback(struct gfs2_glock *gl, unsigned int state)
  597. {
  598. struct gfs2_holder *gh, *new_gh = NULL;
  599. restart:
  600. spin_lock(&gl->gl_spin);
  601. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  602. if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
  603. gl->gl_req_gh != gh) {
  604. if (gh->gh_state != state)
  605. gh->gh_state = LM_ST_UNLOCKED;
  606. goto out;
  607. }
  608. }
  609. if (new_gh) {
  610. list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
  611. new_gh = NULL;
  612. } else {
  613. spin_unlock(&gl->gl_spin);
  614. new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY,
  615. GFP_KERNEL | __GFP_NOFAIL),
  616. set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
  617. set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
  618. goto restart;
  619. }
  620. out:
  621. spin_unlock(&gl->gl_spin);
  622. if (new_gh)
  623. gfs2_holder_put(new_gh);
  624. }
  625. /**
  626. * state_change - record that the glock is now in a different state
  627. * @gl: the glock
  628. * @new_state the new state
  629. *
  630. */
  631. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  632. {
  633. int held1, held2;
  634. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  635. held2 = (new_state != LM_ST_UNLOCKED);
  636. if (held1 != held2) {
  637. if (held2)
  638. gfs2_glock_hold(gl);
  639. else
  640. gfs2_glock_put(gl);
  641. }
  642. gl->gl_state = new_state;
  643. }
  644. /**
  645. * xmote_bh - Called after the lock module is done acquiring a lock
  646. * @gl: The glock in question
  647. * @ret: the int returned from the lock module
  648. *
  649. */
  650. static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
  651. {
  652. struct gfs2_sbd *sdp = gl->gl_sbd;
  653. struct gfs2_glock_operations *glops = gl->gl_ops;
  654. struct gfs2_holder *gh = gl->gl_req_gh;
  655. int prev_state = gl->gl_state;
  656. int op_done = 1;
  657. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  658. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  659. gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
  660. state_change(gl, ret & LM_OUT_ST_MASK);
  661. if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
  662. if (glops->go_inval)
  663. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  664. } else if (gl->gl_state == LM_ST_DEFERRED) {
  665. /* We might not want to do this here.
  666. Look at moving to the inode glops. */
  667. if (glops->go_inval)
  668. glops->go_inval(gl, DIO_DATA);
  669. }
  670. /* Deal with each possible exit condition */
  671. if (!gh)
  672. gl->gl_stamp = jiffies;
  673. else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  674. spin_lock(&gl->gl_spin);
  675. list_del_init(&gh->gh_list);
  676. gh->gh_error = -EIO;
  677. spin_unlock(&gl->gl_spin);
  678. } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
  679. spin_lock(&gl->gl_spin);
  680. list_del_init(&gh->gh_list);
  681. if (gl->gl_state == gh->gh_state ||
  682. gl->gl_state == LM_ST_UNLOCKED)
  683. gh->gh_error = 0;
  684. else {
  685. if (gfs2_assert_warn(sdp, gh->gh_flags &
  686. (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
  687. fs_warn(sdp, "ret = 0x%.8X\n", ret);
  688. gh->gh_error = GLR_TRYFAILED;
  689. }
  690. spin_unlock(&gl->gl_spin);
  691. if (ret & LM_OUT_CANCELED)
  692. handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
  693. } else if (ret & LM_OUT_CANCELED) {
  694. spin_lock(&gl->gl_spin);
  695. list_del_init(&gh->gh_list);
  696. gh->gh_error = GLR_CANCELED;
  697. spin_unlock(&gl->gl_spin);
  698. } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  699. spin_lock(&gl->gl_spin);
  700. list_move_tail(&gh->gh_list, &gl->gl_holders);
  701. gh->gh_error = 0;
  702. set_bit(HIF_HOLDER, &gh->gh_iflags);
  703. spin_unlock(&gl->gl_spin);
  704. set_bit(HIF_FIRST, &gh->gh_iflags);
  705. op_done = 0;
  706. } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  707. spin_lock(&gl->gl_spin);
  708. list_del_init(&gh->gh_list);
  709. gh->gh_error = GLR_TRYFAILED;
  710. spin_unlock(&gl->gl_spin);
  711. } else {
  712. if (gfs2_assert_withdraw(sdp, 0) == -1)
  713. fs_err(sdp, "ret = 0x%.8X\n", ret);
  714. }
  715. if (glops->go_xmote_bh)
  716. glops->go_xmote_bh(gl);
  717. if (op_done) {
  718. spin_lock(&gl->gl_spin);
  719. gl->gl_req_gh = NULL;
  720. gl->gl_req_bh = NULL;
  721. clear_bit(GLF_LOCK, &gl->gl_flags);
  722. run_queue(gl);
  723. spin_unlock(&gl->gl_spin);
  724. }
  725. gfs2_glock_put(gl);
  726. if (gh) {
  727. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  728. gfs2_holder_put(gh);
  729. else
  730. complete(&gh->gh_wait);
  731. }
  732. }
  733. /**
  734. * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
  735. * @gl: The glock in question
  736. * @state: the requested state
  737. * @flags: modifier flags to the lock call
  738. *
  739. */
  740. void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
  741. {
  742. struct gfs2_sbd *sdp = gl->gl_sbd;
  743. struct gfs2_glock_operations *glops = gl->gl_ops;
  744. int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
  745. LM_FLAG_NOEXP | LM_FLAG_ANY |
  746. LM_FLAG_PRIORITY);
  747. unsigned int lck_ret;
  748. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  749. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  750. gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
  751. gfs2_assert_warn(sdp, state != gl->gl_state);
  752. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  753. if (glops->go_sync)
  754. glops->go_sync(gl,
  755. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  756. }
  757. gfs2_glock_hold(gl);
  758. gl->gl_req_bh = xmote_bh;
  759. lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
  760. lck_flags);
  761. if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
  762. return;
  763. if (lck_ret & LM_OUT_ASYNC)
  764. gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
  765. else
  766. xmote_bh(gl, lck_ret);
  767. }
  768. /**
  769. * drop_bh - Called after a lock module unlock completes
  770. * @gl: the glock
  771. * @ret: the return status
  772. *
  773. * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
  774. * Doesn't drop the reference on the glock the top half took out
  775. *
  776. */
  777. static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
  778. {
  779. struct gfs2_sbd *sdp = gl->gl_sbd;
  780. struct gfs2_glock_operations *glops = gl->gl_ops;
  781. struct gfs2_holder *gh = gl->gl_req_gh;
  782. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  783. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  784. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  785. gfs2_assert_warn(sdp, !ret);
  786. state_change(gl, LM_ST_UNLOCKED);
  787. if (glops->go_inval)
  788. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  789. if (gh) {
  790. spin_lock(&gl->gl_spin);
  791. list_del_init(&gh->gh_list);
  792. gh->gh_error = 0;
  793. spin_unlock(&gl->gl_spin);
  794. }
  795. if (glops->go_drop_bh)
  796. glops->go_drop_bh(gl);
  797. spin_lock(&gl->gl_spin);
  798. gl->gl_req_gh = NULL;
  799. gl->gl_req_bh = NULL;
  800. clear_bit(GLF_LOCK, &gl->gl_flags);
  801. run_queue(gl);
  802. spin_unlock(&gl->gl_spin);
  803. gfs2_glock_put(gl);
  804. if (gh) {
  805. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  806. gfs2_holder_put(gh);
  807. else
  808. complete(&gh->gh_wait);
  809. }
  810. }
  811. /**
  812. * gfs2_glock_drop_th - call into the lock module to unlock a lock
  813. * @gl: the glock
  814. *
  815. */
  816. void gfs2_glock_drop_th(struct gfs2_glock *gl)
  817. {
  818. struct gfs2_sbd *sdp = gl->gl_sbd;
  819. struct gfs2_glock_operations *glops = gl->gl_ops;
  820. unsigned int ret;
  821. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  822. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  823. gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
  824. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  825. if (glops->go_sync)
  826. glops->go_sync(gl,
  827. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  828. }
  829. gfs2_glock_hold(gl);
  830. gl->gl_req_bh = drop_bh;
  831. ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
  832. if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
  833. return;
  834. if (!ret)
  835. drop_bh(gl, ret);
  836. else
  837. gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
  838. }
  839. /**
  840. * do_cancels - cancel requests for locks stuck waiting on an expire flag
  841. * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
  842. *
  843. * Don't cancel GL_NOCANCEL requests.
  844. */
  845. static void do_cancels(struct gfs2_holder *gh)
  846. {
  847. struct gfs2_glock *gl = gh->gh_gl;
  848. spin_lock(&gl->gl_spin);
  849. while (gl->gl_req_gh != gh &&
  850. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  851. !list_empty(&gh->gh_list)) {
  852. if (gl->gl_req_bh &&
  853. !(gl->gl_req_gh &&
  854. (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
  855. spin_unlock(&gl->gl_spin);
  856. gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
  857. msleep(100);
  858. spin_lock(&gl->gl_spin);
  859. } else {
  860. spin_unlock(&gl->gl_spin);
  861. msleep(100);
  862. spin_lock(&gl->gl_spin);
  863. }
  864. }
  865. spin_unlock(&gl->gl_spin);
  866. }
  867. /**
  868. * glock_wait_internal - wait on a glock acquisition
  869. * @gh: the glock holder
  870. *
  871. * Returns: 0 on success
  872. */
  873. static int glock_wait_internal(struct gfs2_holder *gh)
  874. {
  875. struct gfs2_glock *gl = gh->gh_gl;
  876. struct gfs2_sbd *sdp = gl->gl_sbd;
  877. struct gfs2_glock_operations *glops = gl->gl_ops;
  878. if (test_bit(HIF_ABORTED, &gh->gh_iflags))
  879. return -EIO;
  880. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  881. spin_lock(&gl->gl_spin);
  882. if (gl->gl_req_gh != gh &&
  883. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  884. !list_empty(&gh->gh_list)) {
  885. list_del_init(&gh->gh_list);
  886. gh->gh_error = GLR_TRYFAILED;
  887. run_queue(gl);
  888. spin_unlock(&gl->gl_spin);
  889. return gh->gh_error;
  890. }
  891. spin_unlock(&gl->gl_spin);
  892. }
  893. if (gh->gh_flags & LM_FLAG_PRIORITY)
  894. do_cancels(gh);
  895. wait_for_completion(&gh->gh_wait);
  896. if (gh->gh_error)
  897. return gh->gh_error;
  898. gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
  899. gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
  900. gh->gh_state,
  901. gh->gh_flags));
  902. if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
  903. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  904. if (glops->go_lock) {
  905. gh->gh_error = glops->go_lock(gh);
  906. if (gh->gh_error) {
  907. spin_lock(&gl->gl_spin);
  908. list_del_init(&gh->gh_list);
  909. spin_unlock(&gl->gl_spin);
  910. }
  911. }
  912. spin_lock(&gl->gl_spin);
  913. gl->gl_req_gh = NULL;
  914. gl->gl_req_bh = NULL;
  915. clear_bit(GLF_LOCK, &gl->gl_flags);
  916. run_queue(gl);
  917. spin_unlock(&gl->gl_spin);
  918. }
  919. return gh->gh_error;
  920. }
  921. static inline struct gfs2_holder *
  922. find_holder_by_owner(struct list_head *head, struct task_struct *owner)
  923. {
  924. struct gfs2_holder *gh;
  925. list_for_each_entry(gh, head, gh_list) {
  926. if (gh->gh_owner == owner)
  927. return gh;
  928. }
  929. return NULL;
  930. }
  931. /**
  932. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  933. * @gh: the holder structure to add
  934. *
  935. */
  936. static void add_to_queue(struct gfs2_holder *gh)
  937. {
  938. struct gfs2_glock *gl = gh->gh_gl;
  939. struct gfs2_holder *existing;
  940. BUG_ON(!gh->gh_owner);
  941. existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
  942. if (existing) {
  943. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  944. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  945. BUG();
  946. }
  947. existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
  948. if (existing) {
  949. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  950. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  951. BUG();
  952. }
  953. if (gh->gh_flags & LM_FLAG_PRIORITY)
  954. list_add(&gh->gh_list, &gl->gl_waiters3);
  955. else
  956. list_add_tail(&gh->gh_list, &gl->gl_waiters3);
  957. }
  958. /**
  959. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  960. * @gh: the holder structure
  961. *
  962. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  963. *
  964. * Returns: 0, GLR_TRYFAILED, or errno on failure
  965. */
  966. int gfs2_glock_nq(struct gfs2_holder *gh)
  967. {
  968. struct gfs2_glock *gl = gh->gh_gl;
  969. struct gfs2_sbd *sdp = gl->gl_sbd;
  970. int error = 0;
  971. restart:
  972. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  973. set_bit(HIF_ABORTED, &gh->gh_iflags);
  974. return -EIO;
  975. }
  976. set_bit(HIF_PROMOTE, &gh->gh_iflags);
  977. spin_lock(&gl->gl_spin);
  978. add_to_queue(gh);
  979. run_queue(gl);
  980. spin_unlock(&gl->gl_spin);
  981. if (!(gh->gh_flags & GL_ASYNC)) {
  982. error = glock_wait_internal(gh);
  983. if (error == GLR_CANCELED) {
  984. msleep(100);
  985. goto restart;
  986. }
  987. }
  988. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  989. if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
  990. dump_glock(gl);
  991. return error;
  992. }
  993. /**
  994. * gfs2_glock_poll - poll to see if an async request has been completed
  995. * @gh: the holder
  996. *
  997. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  998. */
  999. int gfs2_glock_poll(struct gfs2_holder *gh)
  1000. {
  1001. struct gfs2_glock *gl = gh->gh_gl;
  1002. int ready = 0;
  1003. spin_lock(&gl->gl_spin);
  1004. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1005. ready = 1;
  1006. else if (list_empty(&gh->gh_list)) {
  1007. if (gh->gh_error == GLR_CANCELED) {
  1008. spin_unlock(&gl->gl_spin);
  1009. msleep(100);
  1010. if (gfs2_glock_nq(gh))
  1011. return 1;
  1012. return 0;
  1013. } else
  1014. ready = 1;
  1015. }
  1016. spin_unlock(&gl->gl_spin);
  1017. return ready;
  1018. }
  1019. /**
  1020. * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
  1021. * @gh: the holder structure
  1022. *
  1023. * Returns: 0, GLR_TRYFAILED, or errno on failure
  1024. */
  1025. int gfs2_glock_wait(struct gfs2_holder *gh)
  1026. {
  1027. int error;
  1028. error = glock_wait_internal(gh);
  1029. if (error == GLR_CANCELED) {
  1030. msleep(100);
  1031. gh->gh_flags &= ~GL_ASYNC;
  1032. error = gfs2_glock_nq(gh);
  1033. }
  1034. return error;
  1035. }
  1036. /**
  1037. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  1038. * @gh: the glock holder
  1039. *
  1040. */
  1041. void gfs2_glock_dq(struct gfs2_holder *gh)
  1042. {
  1043. struct gfs2_glock *gl = gh->gh_gl;
  1044. struct gfs2_glock_operations *glops = gl->gl_ops;
  1045. if (gh->gh_flags & GL_SYNC)
  1046. set_bit(GLF_SYNC, &gl->gl_flags);
  1047. if (gh->gh_flags & GL_NOCACHE)
  1048. handle_callback(gl, LM_ST_UNLOCKED);
  1049. gfs2_glmutex_lock(gl);
  1050. spin_lock(&gl->gl_spin);
  1051. list_del_init(&gh->gh_list);
  1052. if (list_empty(&gl->gl_holders)) {
  1053. spin_unlock(&gl->gl_spin);
  1054. if (glops->go_unlock)
  1055. glops->go_unlock(gh);
  1056. if (test_bit(GLF_SYNC, &gl->gl_flags)) {
  1057. if (glops->go_sync)
  1058. glops->go_sync(gl, DIO_METADATA | DIO_DATA);
  1059. }
  1060. gl->gl_stamp = jiffies;
  1061. spin_lock(&gl->gl_spin);
  1062. }
  1063. clear_bit(GLF_LOCK, &gl->gl_flags);
  1064. run_queue(gl);
  1065. spin_unlock(&gl->gl_spin);
  1066. }
  1067. /**
  1068. * gfs2_glock_prefetch - Try to prefetch a glock
  1069. * @gl: the glock
  1070. * @state: the state to prefetch in
  1071. * @flags: flags passed to go_xmote_th()
  1072. *
  1073. */
  1074. static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
  1075. int flags)
  1076. {
  1077. struct gfs2_glock_operations *glops = gl->gl_ops;
  1078. spin_lock(&gl->gl_spin);
  1079. if (test_bit(GLF_LOCK, &gl->gl_flags) ||
  1080. !list_empty(&gl->gl_holders) ||
  1081. !list_empty(&gl->gl_waiters1) ||
  1082. !list_empty(&gl->gl_waiters2) ||
  1083. !list_empty(&gl->gl_waiters3) ||
  1084. relaxed_state_ok(gl->gl_state, state, flags)) {
  1085. spin_unlock(&gl->gl_spin);
  1086. return;
  1087. }
  1088. set_bit(GLF_PREFETCH, &gl->gl_flags);
  1089. set_bit(GLF_LOCK, &gl->gl_flags);
  1090. spin_unlock(&gl->gl_spin);
  1091. glops->go_xmote_th(gl, state, flags);
  1092. }
  1093. static void greedy_work(void *data)
  1094. {
  1095. struct greedy *gr = data;
  1096. struct gfs2_holder *gh = &gr->gr_gh;
  1097. struct gfs2_glock *gl = gh->gh_gl;
  1098. struct gfs2_glock_operations *glops = gl->gl_ops;
  1099. clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1100. if (glops->go_greedy)
  1101. glops->go_greedy(gl);
  1102. spin_lock(&gl->gl_spin);
  1103. if (list_empty(&gl->gl_waiters2)) {
  1104. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1105. spin_unlock(&gl->gl_spin);
  1106. gfs2_holder_uninit(gh);
  1107. kfree(gr);
  1108. } else {
  1109. gfs2_glock_hold(gl);
  1110. list_add_tail(&gh->gh_list, &gl->gl_waiters2);
  1111. run_queue(gl);
  1112. spin_unlock(&gl->gl_spin);
  1113. gfs2_glock_put(gl);
  1114. }
  1115. }
  1116. /**
  1117. * gfs2_glock_be_greedy -
  1118. * @gl:
  1119. * @time:
  1120. *
  1121. * Returns: 0 if go_greedy will be called, 1 otherwise
  1122. */
  1123. int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
  1124. {
  1125. struct greedy *gr;
  1126. struct gfs2_holder *gh;
  1127. if (!time ||
  1128. gl->gl_sbd->sd_args.ar_localcaching ||
  1129. test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
  1130. return 1;
  1131. gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
  1132. if (!gr) {
  1133. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1134. return 1;
  1135. }
  1136. gh = &gr->gr_gh;
  1137. gfs2_holder_init(gl, 0, 0, gh);
  1138. set_bit(HIF_GREEDY, &gh->gh_iflags);
  1139. INIT_WORK(&gr->gr_work, greedy_work, gr);
  1140. set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1141. schedule_delayed_work(&gr->gr_work, time);
  1142. return 0;
  1143. }
  1144. /**
  1145. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1146. * @gh: the holder structure
  1147. *
  1148. */
  1149. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1150. {
  1151. gfs2_glock_dq(gh);
  1152. gfs2_holder_uninit(gh);
  1153. }
  1154. /**
  1155. * gfs2_glock_nq_num - acquire a glock based on lock number
  1156. * @sdp: the filesystem
  1157. * @number: the lock number
  1158. * @glops: the glock operations for the type of glock
  1159. * @state: the state to acquire the glock in
  1160. * @flags: modifier flags for the aquisition
  1161. * @gh: the struct gfs2_holder
  1162. *
  1163. * Returns: errno
  1164. */
  1165. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
  1166. struct gfs2_glock_operations *glops, unsigned int state,
  1167. int flags, struct gfs2_holder *gh)
  1168. {
  1169. struct gfs2_glock *gl;
  1170. int error;
  1171. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1172. if (!error) {
  1173. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1174. gfs2_glock_put(gl);
  1175. }
  1176. return error;
  1177. }
  1178. /**
  1179. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1180. * @arg_a: the first structure
  1181. * @arg_b: the second structure
  1182. *
  1183. */
  1184. static int glock_compare(const void *arg_a, const void *arg_b)
  1185. {
  1186. struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
  1187. struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
  1188. struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1189. struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1190. int ret = 0;
  1191. if (a->ln_number > b->ln_number)
  1192. ret = 1;
  1193. else if (a->ln_number < b->ln_number)
  1194. ret = -1;
  1195. else {
  1196. if (gh_a->gh_state == LM_ST_SHARED &&
  1197. gh_b->gh_state == LM_ST_EXCLUSIVE)
  1198. ret = 1;
  1199. else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
  1200. (gh_b->gh_flags & GL_LOCAL_EXCL))
  1201. ret = 1;
  1202. }
  1203. return ret;
  1204. }
  1205. /**
  1206. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1207. * @num_gh: the number of structures
  1208. * @ghs: an array of struct gfs2_holder structures
  1209. *
  1210. * Returns: 0 on success (all glocks acquired),
  1211. * errno on failure (no glocks acquired)
  1212. */
  1213. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1214. struct gfs2_holder **p)
  1215. {
  1216. unsigned int x;
  1217. int error = 0;
  1218. for (x = 0; x < num_gh; x++)
  1219. p[x] = &ghs[x];
  1220. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1221. for (x = 0; x < num_gh; x++) {
  1222. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1223. error = gfs2_glock_nq(p[x]);
  1224. if (error) {
  1225. while (x--)
  1226. gfs2_glock_dq(p[x]);
  1227. break;
  1228. }
  1229. }
  1230. return error;
  1231. }
  1232. /**
  1233. * gfs2_glock_nq_m - acquire multiple glocks
  1234. * @num_gh: the number of structures
  1235. * @ghs: an array of struct gfs2_holder structures
  1236. *
  1237. * Figure out how big an impact this function has. Either:
  1238. * 1) Replace this code with code that calls gfs2_glock_prefetch()
  1239. * 2) Forget async stuff and just call nq_m_sync()
  1240. * 3) Leave it like it is
  1241. *
  1242. * Returns: 0 on success (all glocks acquired),
  1243. * errno on failure (no glocks acquired)
  1244. */
  1245. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1246. {
  1247. int *e;
  1248. unsigned int x;
  1249. int borked = 0, serious = 0;
  1250. int error = 0;
  1251. if (!num_gh)
  1252. return 0;
  1253. if (num_gh == 1) {
  1254. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1255. return gfs2_glock_nq(ghs);
  1256. }
  1257. e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1258. if (!e)
  1259. return -ENOMEM;
  1260. for (x = 0; x < num_gh; x++) {
  1261. ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
  1262. error = gfs2_glock_nq(&ghs[x]);
  1263. if (error) {
  1264. borked = 1;
  1265. serious = error;
  1266. num_gh = x;
  1267. break;
  1268. }
  1269. }
  1270. for (x = 0; x < num_gh; x++) {
  1271. error = e[x] = glock_wait_internal(&ghs[x]);
  1272. if (error) {
  1273. borked = 1;
  1274. if (error != GLR_TRYFAILED && error != GLR_CANCELED)
  1275. serious = error;
  1276. }
  1277. }
  1278. if (!borked) {
  1279. kfree(e);
  1280. return 0;
  1281. }
  1282. for (x = 0; x < num_gh; x++)
  1283. if (!e[x])
  1284. gfs2_glock_dq(&ghs[x]);
  1285. if (serious)
  1286. error = serious;
  1287. else {
  1288. for (x = 0; x < num_gh; x++)
  1289. gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
  1290. &ghs[x]);
  1291. error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
  1292. }
  1293. kfree(e);
  1294. return error;
  1295. }
  1296. /**
  1297. * gfs2_glock_dq_m - release multiple glocks
  1298. * @num_gh: the number of structures
  1299. * @ghs: an array of struct gfs2_holder structures
  1300. *
  1301. */
  1302. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1303. {
  1304. unsigned int x;
  1305. for (x = 0; x < num_gh; x++)
  1306. gfs2_glock_dq(&ghs[x]);
  1307. }
  1308. /**
  1309. * gfs2_glock_dq_uninit_m - release multiple glocks
  1310. * @num_gh: the number of structures
  1311. * @ghs: an array of struct gfs2_holder structures
  1312. *
  1313. */
  1314. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1315. {
  1316. unsigned int x;
  1317. for (x = 0; x < num_gh; x++)
  1318. gfs2_glock_dq_uninit(&ghs[x]);
  1319. }
  1320. /**
  1321. * gfs2_glock_prefetch_num - prefetch a glock based on lock number
  1322. * @sdp: the filesystem
  1323. * @number: the lock number
  1324. * @glops: the glock operations for the type of glock
  1325. * @state: the state to acquire the glock in
  1326. * @flags: modifier flags for the aquisition
  1327. *
  1328. * Returns: errno
  1329. */
  1330. void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
  1331. struct gfs2_glock_operations *glops,
  1332. unsigned int state, int flags)
  1333. {
  1334. struct gfs2_glock *gl;
  1335. int error;
  1336. if (atomic_read(&sdp->sd_reclaim_count) <
  1337. gfs2_tune_get(sdp, gt_reclaim_limit)) {
  1338. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1339. if (!error) {
  1340. gfs2_glock_prefetch(gl, state, flags);
  1341. gfs2_glock_put(gl);
  1342. }
  1343. }
  1344. }
  1345. /**
  1346. * gfs2_lvb_hold - attach a LVB from a glock
  1347. * @gl: The glock in question
  1348. *
  1349. */
  1350. int gfs2_lvb_hold(struct gfs2_glock *gl)
  1351. {
  1352. int error;
  1353. gfs2_glmutex_lock(gl);
  1354. if (!atomic_read(&gl->gl_lvb_count)) {
  1355. error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
  1356. if (error) {
  1357. gfs2_glmutex_unlock(gl);
  1358. return error;
  1359. }
  1360. gfs2_glock_hold(gl);
  1361. }
  1362. atomic_inc(&gl->gl_lvb_count);
  1363. gfs2_glmutex_unlock(gl);
  1364. return 0;
  1365. }
  1366. /**
  1367. * gfs2_lvb_unhold - detach a LVB from a glock
  1368. * @gl: The glock in question
  1369. *
  1370. */
  1371. void gfs2_lvb_unhold(struct gfs2_glock *gl)
  1372. {
  1373. gfs2_glock_hold(gl);
  1374. gfs2_glmutex_lock(gl);
  1375. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
  1376. if (atomic_dec_and_test(&gl->gl_lvb_count)) {
  1377. gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1378. gl->gl_lvb = NULL;
  1379. gfs2_glock_put(gl);
  1380. }
  1381. gfs2_glmutex_unlock(gl);
  1382. gfs2_glock_put(gl);
  1383. }
  1384. #if 0
  1385. void gfs2_lvb_sync(struct gfs2_glock *gl)
  1386. {
  1387. gfs2_glmutex_lock(gl);
  1388. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
  1389. if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
  1390. gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1391. gfs2_glmutex_unlock(gl);
  1392. }
  1393. #endif /* 0 */
  1394. static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
  1395. unsigned int state)
  1396. {
  1397. struct gfs2_glock *gl;
  1398. gl = gfs2_glock_find(sdp, name);
  1399. if (!gl)
  1400. return;
  1401. if (gl->gl_ops->go_callback)
  1402. gl->gl_ops->go_callback(gl, state);
  1403. handle_callback(gl, state);
  1404. spin_lock(&gl->gl_spin);
  1405. run_queue(gl);
  1406. spin_unlock(&gl->gl_spin);
  1407. gfs2_glock_put(gl);
  1408. }
  1409. /**
  1410. * gfs2_glock_cb - Callback used by locking module
  1411. * @fsdata: Pointer to the superblock
  1412. * @type: Type of callback
  1413. * @data: Type dependent data pointer
  1414. *
  1415. * Called by the locking module when it wants to tell us something.
  1416. * Either we need to drop a lock, one of our ASYNC requests completed, or
  1417. * a journal from another client needs to be recovered.
  1418. */
  1419. void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
  1420. {
  1421. struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
  1422. switch (type) {
  1423. case LM_CB_NEED_E:
  1424. blocking_cb(sdp, data, LM_ST_UNLOCKED);
  1425. return;
  1426. case LM_CB_NEED_D:
  1427. blocking_cb(sdp, data, LM_ST_DEFERRED);
  1428. return;
  1429. case LM_CB_NEED_S:
  1430. blocking_cb(sdp, data, LM_ST_SHARED);
  1431. return;
  1432. case LM_CB_ASYNC: {
  1433. struct lm_async_cb *async = data;
  1434. struct gfs2_glock *gl;
  1435. gl = gfs2_glock_find(sdp, &async->lc_name);
  1436. if (gfs2_assert_warn(sdp, gl))
  1437. return;
  1438. if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
  1439. gl->gl_req_bh(gl, async->lc_ret);
  1440. gfs2_glock_put(gl);
  1441. return;
  1442. }
  1443. case LM_CB_NEED_RECOVERY:
  1444. gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
  1445. if (sdp->sd_recoverd_process)
  1446. wake_up_process(sdp->sd_recoverd_process);
  1447. return;
  1448. case LM_CB_DROPLOCKS:
  1449. gfs2_gl_hash_clear(sdp, NO_WAIT);
  1450. gfs2_quota_scan(sdp);
  1451. return;
  1452. default:
  1453. gfs2_assert_warn(sdp, 0);
  1454. return;
  1455. }
  1456. }
  1457. /**
  1458. * gfs2_try_toss_inode - try to remove a particular inode struct from cache
  1459. * sdp: the filesystem
  1460. * inum: the inode number
  1461. *
  1462. */
  1463. void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
  1464. {
  1465. struct gfs2_glock *gl;
  1466. struct gfs2_inode *ip;
  1467. int error;
  1468. error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
  1469. NO_CREATE, &gl);
  1470. if (error || !gl)
  1471. return;
  1472. if (!gfs2_glmutex_trylock(gl))
  1473. goto out;
  1474. ip = gl->gl_object;
  1475. if (!ip)
  1476. goto out_unlock;
  1477. if (atomic_read(&ip->i_count))
  1478. goto out_unlock;
  1479. gfs2_inode_destroy(ip, 1);
  1480. out_unlock:
  1481. gfs2_glmutex_unlock(gl);
  1482. out:
  1483. gfs2_glock_put(gl);
  1484. }
  1485. /**
  1486. * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
  1487. * iopen glock from memory
  1488. * @io_gl: the iopen glock
  1489. * @state: the state into which the glock should be put
  1490. *
  1491. */
  1492. void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
  1493. {
  1494. struct gfs2_glock *i_gl;
  1495. if (state != LM_ST_UNLOCKED)
  1496. return;
  1497. spin_lock(&io_gl->gl_spin);
  1498. i_gl = io_gl->gl_object;
  1499. if (i_gl) {
  1500. gfs2_glock_hold(i_gl);
  1501. spin_unlock(&io_gl->gl_spin);
  1502. } else {
  1503. spin_unlock(&io_gl->gl_spin);
  1504. return;
  1505. }
  1506. if (gfs2_glmutex_trylock(i_gl)) {
  1507. struct gfs2_inode *ip = i_gl->gl_object;
  1508. if (ip) {
  1509. gfs2_try_toss_vnode(ip);
  1510. gfs2_glmutex_unlock(i_gl);
  1511. gfs2_glock_schedule_for_reclaim(i_gl);
  1512. goto out;
  1513. }
  1514. gfs2_glmutex_unlock(i_gl);
  1515. }
  1516. out:
  1517. gfs2_glock_put(i_gl);
  1518. }
  1519. /**
  1520. * demote_ok - Check to see if it's ok to unlock a glock
  1521. * @gl: the glock
  1522. *
  1523. * Returns: 1 if it's ok
  1524. */
  1525. static int demote_ok(struct gfs2_glock *gl)
  1526. {
  1527. struct gfs2_sbd *sdp = gl->gl_sbd;
  1528. struct gfs2_glock_operations *glops = gl->gl_ops;
  1529. int demote = 1;
  1530. if (test_bit(GLF_STICKY, &gl->gl_flags))
  1531. demote = 0;
  1532. else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
  1533. demote = time_after_eq(jiffies,
  1534. gl->gl_stamp +
  1535. gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
  1536. else if (glops->go_demote_ok)
  1537. demote = glops->go_demote_ok(gl);
  1538. return demote;
  1539. }
  1540. /**
  1541. * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  1542. * @gl: the glock
  1543. *
  1544. */
  1545. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  1546. {
  1547. struct gfs2_sbd *sdp = gl->gl_sbd;
  1548. spin_lock(&sdp->sd_reclaim_lock);
  1549. if (list_empty(&gl->gl_reclaim)) {
  1550. gfs2_glock_hold(gl);
  1551. list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
  1552. atomic_inc(&sdp->sd_reclaim_count);
  1553. }
  1554. spin_unlock(&sdp->sd_reclaim_lock);
  1555. wake_up(&sdp->sd_reclaim_wq);
  1556. }
  1557. /**
  1558. * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
  1559. * @sdp: the filesystem
  1560. *
  1561. * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
  1562. * different glock and we notice that there are a lot of glocks in the
  1563. * reclaim list.
  1564. *
  1565. */
  1566. void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
  1567. {
  1568. struct gfs2_glock *gl;
  1569. spin_lock(&sdp->sd_reclaim_lock);
  1570. if (list_empty(&sdp->sd_reclaim_list)) {
  1571. spin_unlock(&sdp->sd_reclaim_lock);
  1572. return;
  1573. }
  1574. gl = list_entry(sdp->sd_reclaim_list.next,
  1575. struct gfs2_glock, gl_reclaim);
  1576. list_del_init(&gl->gl_reclaim);
  1577. spin_unlock(&sdp->sd_reclaim_lock);
  1578. atomic_dec(&sdp->sd_reclaim_count);
  1579. atomic_inc(&sdp->sd_reclaimed);
  1580. if (gfs2_glmutex_trylock(gl)) {
  1581. if (gl->gl_ops == &gfs2_inode_glops) {
  1582. struct gfs2_inode *ip = gl->gl_object;
  1583. if (ip && !atomic_read(&ip->i_count))
  1584. gfs2_inode_destroy(ip, 1);
  1585. }
  1586. if (queue_empty(gl, &gl->gl_holders) &&
  1587. gl->gl_state != LM_ST_UNLOCKED &&
  1588. demote_ok(gl))
  1589. handle_callback(gl, LM_ST_UNLOCKED);
  1590. gfs2_glmutex_unlock(gl);
  1591. }
  1592. gfs2_glock_put(gl);
  1593. }
  1594. /**
  1595. * examine_bucket - Call a function for glock in a hash bucket
  1596. * @examiner: the function
  1597. * @sdp: the filesystem
  1598. * @bucket: the bucket
  1599. *
  1600. * Returns: 1 if the bucket has entries
  1601. */
  1602. static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
  1603. struct gfs2_gl_hash_bucket *bucket)
  1604. {
  1605. struct glock_plug plug;
  1606. struct list_head *tmp;
  1607. struct gfs2_glock *gl;
  1608. int entries;
  1609. /* Add "plug" to end of bucket list, work back up list from there */
  1610. memset(&plug.gl_flags, 0, sizeof(unsigned long));
  1611. set_bit(GLF_PLUG, &plug.gl_flags);
  1612. write_lock(&bucket->hb_lock);
  1613. list_add(&plug.gl_list, &bucket->hb_list);
  1614. write_unlock(&bucket->hb_lock);
  1615. for (;;) {
  1616. write_lock(&bucket->hb_lock);
  1617. for (;;) {
  1618. tmp = plug.gl_list.next;
  1619. if (tmp == &bucket->hb_list) {
  1620. list_del(&plug.gl_list);
  1621. entries = !list_empty(&bucket->hb_list);
  1622. write_unlock(&bucket->hb_lock);
  1623. return entries;
  1624. }
  1625. gl = list_entry(tmp, struct gfs2_glock, gl_list);
  1626. /* Move plug up list */
  1627. list_move(&plug.gl_list, &gl->gl_list);
  1628. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1629. continue;
  1630. /* examiner() must glock_put() */
  1631. gfs2_glock_hold(gl);
  1632. break;
  1633. }
  1634. write_unlock(&bucket->hb_lock);
  1635. examiner(gl);
  1636. }
  1637. }
  1638. /**
  1639. * scan_glock - look at a glock and see if we can reclaim it
  1640. * @gl: the glock to look at
  1641. *
  1642. */
  1643. static void scan_glock(struct gfs2_glock *gl)
  1644. {
  1645. if (gfs2_glmutex_trylock(gl)) {
  1646. if (gl->gl_ops == &gfs2_inode_glops) {
  1647. struct gfs2_inode *ip = gl->gl_object;
  1648. if (ip && !atomic_read(&ip->i_count))
  1649. goto out_schedule;
  1650. }
  1651. if (queue_empty(gl, &gl->gl_holders) &&
  1652. gl->gl_state != LM_ST_UNLOCKED &&
  1653. demote_ok(gl))
  1654. goto out_schedule;
  1655. gfs2_glmutex_unlock(gl);
  1656. }
  1657. gfs2_glock_put(gl);
  1658. return;
  1659. out_schedule:
  1660. gfs2_glmutex_unlock(gl);
  1661. gfs2_glock_schedule_for_reclaim(gl);
  1662. gfs2_glock_put(gl);
  1663. }
  1664. /**
  1665. * gfs2_scand_internal - Look for glocks and inodes to toss from memory
  1666. * @sdp: the filesystem
  1667. *
  1668. */
  1669. void gfs2_scand_internal(struct gfs2_sbd *sdp)
  1670. {
  1671. unsigned int x;
  1672. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1673. examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
  1674. cond_resched();
  1675. }
  1676. }
  1677. /**
  1678. * clear_glock - look at a glock and see if we can free it from glock cache
  1679. * @gl: the glock to look at
  1680. *
  1681. */
  1682. static void clear_glock(struct gfs2_glock *gl)
  1683. {
  1684. struct gfs2_sbd *sdp = gl->gl_sbd;
  1685. int released;
  1686. spin_lock(&sdp->sd_reclaim_lock);
  1687. if (!list_empty(&gl->gl_reclaim)) {
  1688. list_del_init(&gl->gl_reclaim);
  1689. atomic_dec(&sdp->sd_reclaim_count);
  1690. spin_unlock(&sdp->sd_reclaim_lock);
  1691. released = gfs2_glock_put(gl);
  1692. gfs2_assert(sdp, !released);
  1693. } else {
  1694. spin_unlock(&sdp->sd_reclaim_lock);
  1695. }
  1696. if (gfs2_glmutex_trylock(gl)) {
  1697. if (gl->gl_ops == &gfs2_inode_glops) {
  1698. struct gfs2_inode *ip = gl->gl_object;
  1699. if (ip && !atomic_read(&ip->i_count))
  1700. gfs2_inode_destroy(ip, 1);
  1701. }
  1702. if (queue_empty(gl, &gl->gl_holders) &&
  1703. gl->gl_state != LM_ST_UNLOCKED)
  1704. handle_callback(gl, LM_ST_UNLOCKED);
  1705. gfs2_glmutex_unlock(gl);
  1706. }
  1707. gfs2_glock_put(gl);
  1708. }
  1709. /**
  1710. * gfs2_gl_hash_clear - Empty out the glock hash table
  1711. * @sdp: the filesystem
  1712. * @wait: wait until it's all gone
  1713. *
  1714. * Called when unmounting the filesystem, or when inter-node lock manager
  1715. * requests DROPLOCKS because it is running out of capacity.
  1716. */
  1717. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
  1718. {
  1719. unsigned long t;
  1720. unsigned int x;
  1721. int cont;
  1722. t = jiffies;
  1723. for (;;) {
  1724. cont = 0;
  1725. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1726. if (examine_bucket(clear_glock, sdp,
  1727. &sdp->sd_gl_hash[x]))
  1728. cont = 1;
  1729. if (!wait || !cont)
  1730. break;
  1731. if (time_after_eq(jiffies,
  1732. t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
  1733. fs_warn(sdp, "Unmount seems to be stalled. "
  1734. "Dumping lock state...\n");
  1735. gfs2_dump_lockstate(sdp);
  1736. t = jiffies;
  1737. }
  1738. /* invalidate_inodes() requires that the sb inodes list
  1739. not change, but an async completion callback for an
  1740. unlock can occur which does glock_put() which
  1741. can call iput() which will change the sb inodes list.
  1742. invalidate_inodes_mutex prevents glock_put()'s during
  1743. an invalidate_inodes() */
  1744. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  1745. invalidate_inodes(sdp->sd_vfs);
  1746. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  1747. msleep(10);
  1748. }
  1749. }
  1750. /*
  1751. * Diagnostic routines to help debug distributed deadlock
  1752. */
  1753. /**
  1754. * dump_holder - print information about a glock holder
  1755. * @str: a string naming the type of holder
  1756. * @gh: the glock holder
  1757. *
  1758. * Returns: 0 on success, -ENOBUFS when we run out of space
  1759. */
  1760. static int dump_holder(char *str, struct gfs2_holder *gh)
  1761. {
  1762. unsigned int x;
  1763. int error = -ENOBUFS;
  1764. printk(KERN_INFO " %s\n", str);
  1765. printk(KERN_INFO " owner = %ld\n",
  1766. (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
  1767. printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
  1768. printk(KERN_INFO " gh_flags =");
  1769. for (x = 0; x < 32; x++)
  1770. if (gh->gh_flags & (1 << x))
  1771. printk(" %u", x);
  1772. printk(" \n");
  1773. printk(KERN_INFO " error = %d\n", gh->gh_error);
  1774. printk(KERN_INFO " gh_iflags =");
  1775. for (x = 0; x < 32; x++)
  1776. if (test_bit(x, &gh->gh_iflags))
  1777. printk(" %u", x);
  1778. printk(" \n");
  1779. print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
  1780. error = 0;
  1781. return error;
  1782. }
  1783. /**
  1784. * dump_inode - print information about an inode
  1785. * @ip: the inode
  1786. *
  1787. * Returns: 0 on success, -ENOBUFS when we run out of space
  1788. */
  1789. static int dump_inode(struct gfs2_inode *ip)
  1790. {
  1791. unsigned int x;
  1792. int error = -ENOBUFS;
  1793. printk(KERN_INFO " Inode:\n");
  1794. printk(KERN_INFO " num = %llu %llu\n",
  1795. (unsigned long long)ip->i_num.no_formal_ino,
  1796. (unsigned long long)ip->i_num.no_addr);
  1797. printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
  1798. printk(KERN_INFO " i_count = %d\n", atomic_read(&ip->i_count));
  1799. printk(KERN_INFO " i_flags =");
  1800. for (x = 0; x < 32; x++)
  1801. if (test_bit(x, &ip->i_flags))
  1802. printk(" %u", x);
  1803. printk(" \n");
  1804. printk(KERN_INFO " vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
  1805. error = 0;
  1806. return error;
  1807. }
  1808. /**
  1809. * dump_glock - print information about a glock
  1810. * @gl: the glock
  1811. * @count: where we are in the buffer
  1812. *
  1813. * Returns: 0 on success, -ENOBUFS when we run out of space
  1814. */
  1815. static int dump_glock(struct gfs2_glock *gl)
  1816. {
  1817. struct gfs2_holder *gh;
  1818. unsigned int x;
  1819. int error = -ENOBUFS;
  1820. spin_lock(&gl->gl_spin);
  1821. printk(KERN_INFO "Glock (%u, %llu)\n", gl->gl_name.ln_type,
  1822. (unsigned long long)gl->gl_name.ln_number);
  1823. printk(KERN_INFO " gl_flags =");
  1824. for (x = 0; x < 32; x++)
  1825. if (test_bit(x, &gl->gl_flags))
  1826. printk(" %u", x);
  1827. printk(" \n");
  1828. printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
  1829. printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
  1830. printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
  1831. print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
  1832. printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
  1833. printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
  1834. printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
  1835. printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
  1836. printk(KERN_INFO " le = %s\n",
  1837. (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
  1838. printk(KERN_INFO " reclaim = %s\n",
  1839. (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
  1840. if (gl->gl_aspace)
  1841. printk(KERN_INFO " aspace = %lu\n",
  1842. gl->gl_aspace->i_mapping->nrpages);
  1843. else
  1844. printk(KERN_INFO " aspace = no\n");
  1845. printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
  1846. if (gl->gl_req_gh) {
  1847. error = dump_holder("Request", gl->gl_req_gh);
  1848. if (error)
  1849. goto out;
  1850. }
  1851. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1852. error = dump_holder("Holder", gh);
  1853. if (error)
  1854. goto out;
  1855. }
  1856. list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
  1857. error = dump_holder("Waiter1", gh);
  1858. if (error)
  1859. goto out;
  1860. }
  1861. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  1862. error = dump_holder("Waiter2", gh);
  1863. if (error)
  1864. goto out;
  1865. }
  1866. list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
  1867. error = dump_holder("Waiter3", gh);
  1868. if (error)
  1869. goto out;
  1870. }
  1871. if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
  1872. if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
  1873. list_empty(&gl->gl_holders)) {
  1874. error = dump_inode(gl->gl_object);
  1875. if (error)
  1876. goto out;
  1877. } else {
  1878. error = -ENOBUFS;
  1879. printk(KERN_INFO " Inode: busy\n");
  1880. }
  1881. }
  1882. error = 0;
  1883. out:
  1884. spin_unlock(&gl->gl_spin);
  1885. return error;
  1886. }
  1887. /**
  1888. * gfs2_dump_lockstate - print out the current lockstate
  1889. * @sdp: the filesystem
  1890. * @ub: the buffer to copy the information into
  1891. *
  1892. * If @ub is NULL, dump the lockstate to the console.
  1893. *
  1894. */
  1895. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
  1896. {
  1897. struct gfs2_gl_hash_bucket *bucket;
  1898. struct gfs2_glock *gl;
  1899. unsigned int x;
  1900. int error = 0;
  1901. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1902. bucket = &sdp->sd_gl_hash[x];
  1903. read_lock(&bucket->hb_lock);
  1904. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  1905. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1906. continue;
  1907. error = dump_glock(gl);
  1908. if (error)
  1909. break;
  1910. }
  1911. read_unlock(&bucket->hb_lock);
  1912. if (error)
  1913. break;
  1914. }
  1915. return error;
  1916. }