glock.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/delay.h>
  15. #include <linux/sort.h>
  16. #include <linux/jhash.h>
  17. #include <linux/kref.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <asm/uaccess.h>
  21. #include "gfs2.h"
  22. #include "lm_interface.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "lm.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "quota.h"
  31. #include "super.h"
  32. #include "util.h"
  33. /* Must be kept in sync with the beginning of struct gfs2_glock */
  34. struct glock_plug {
  35. struct list_head gl_list;
  36. unsigned long gl_flags;
  37. };
  38. struct greedy {
  39. struct gfs2_holder gr_gh;
  40. struct work_struct gr_work;
  41. };
  42. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  43. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  44. static int dump_glock(struct gfs2_glock *gl);
  45. /**
  46. * relaxed_state_ok - is a requested lock compatible with the current lock mode?
  47. * @actual: the current state of the lock
  48. * @requested: the lock state that was requested by the caller
  49. * @flags: the modifier flags passed in by the caller
  50. *
  51. * Returns: 1 if the locks are compatible, 0 otherwise
  52. */
  53. static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
  54. int flags)
  55. {
  56. if (actual == requested)
  57. return 1;
  58. if (flags & GL_EXACT)
  59. return 0;
  60. if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
  61. return 1;
  62. if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
  63. return 1;
  64. return 0;
  65. }
  66. /**
  67. * gl_hash() - Turn glock number into hash bucket number
  68. * @lock: The glock number
  69. *
  70. * Returns: The number of the corresponding hash bucket
  71. */
  72. static unsigned int gl_hash(const struct lm_lockname *name)
  73. {
  74. unsigned int h;
  75. h = jhash(&name->ln_number, sizeof(uint64_t), 0);
  76. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  77. h &= GFS2_GL_HASH_MASK;
  78. return h;
  79. }
  80. /**
  81. * glock_free() - Perform a few checks and then release struct gfs2_glock
  82. * @gl: The glock to release
  83. *
  84. * Also calls lock module to release its internal structure for this glock.
  85. *
  86. */
  87. static void glock_free(struct gfs2_glock *gl)
  88. {
  89. struct gfs2_sbd *sdp = gl->gl_sbd;
  90. struct inode *aspace = gl->gl_aspace;
  91. gfs2_lm_put_lock(sdp, gl->gl_lock);
  92. if (aspace)
  93. gfs2_aspace_put(aspace);
  94. kmem_cache_free(gfs2_glock_cachep, gl);
  95. }
  96. /**
  97. * gfs2_glock_hold() - increment reference count on glock
  98. * @gl: The glock to hold
  99. *
  100. */
  101. void gfs2_glock_hold(struct gfs2_glock *gl)
  102. {
  103. kref_get(&gl->gl_ref);
  104. }
  105. /* All work is done after the return from kref_put() so we
  106. can release the write_lock before the free. */
  107. static void kill_glock(struct kref *kref)
  108. {
  109. struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
  110. struct gfs2_sbd *sdp = gl->gl_sbd;
  111. gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
  112. gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
  113. gfs2_assert(sdp, list_empty(&gl->gl_holders));
  114. gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
  115. gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
  116. gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
  117. }
  118. /**
  119. * gfs2_glock_put() - Decrement reference count on glock
  120. * @gl: The glock to put
  121. *
  122. */
  123. int gfs2_glock_put(struct gfs2_glock *gl)
  124. {
  125. struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
  126. int rv = 0;
  127. write_lock(&bucket->hb_lock);
  128. if (kref_put(&gl->gl_ref, kill_glock)) {
  129. list_del_init(&gl->gl_list);
  130. write_unlock(&bucket->hb_lock);
  131. BUG_ON(spin_is_locked(&gl->gl_spin));
  132. glock_free(gl);
  133. rv = 1;
  134. goto out;
  135. }
  136. write_unlock(&bucket->hb_lock);
  137. out:
  138. return rv;
  139. }
  140. /**
  141. * queue_empty - check to see if a glock's queue is empty
  142. * @gl: the glock
  143. * @head: the head of the queue to check
  144. *
  145. * This function protects the list in the event that a process already
  146. * has a holder on the list and is adding a second holder for itself.
  147. * The glmutex lock is what generally prevents processes from working
  148. * on the same glock at once, but the special case of adding a second
  149. * holder for yourself ("recursive" locking) doesn't involve locking
  150. * glmutex, making the spin lock necessary.
  151. *
  152. * Returns: 1 if the queue is empty
  153. */
  154. static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
  155. {
  156. int empty;
  157. spin_lock(&gl->gl_spin);
  158. empty = list_empty(head);
  159. spin_unlock(&gl->gl_spin);
  160. return empty;
  161. }
  162. /**
  163. * search_bucket() - Find struct gfs2_glock by lock number
  164. * @bucket: the bucket to search
  165. * @name: The lock name
  166. *
  167. * Returns: NULL, or the struct gfs2_glock with the requested number
  168. */
  169. static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
  170. const struct gfs2_sbd *sdp,
  171. const struct lm_lockname *name)
  172. {
  173. struct gfs2_glock *gl;
  174. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  175. if (test_bit(GLF_PLUG, &gl->gl_flags))
  176. continue;
  177. if (!lm_name_equal(&gl->gl_name, name))
  178. continue;
  179. if (gl->gl_sbd != sdp)
  180. continue;
  181. kref_get(&gl->gl_ref);
  182. return gl;
  183. }
  184. return NULL;
  185. }
  186. /**
  187. * gfs2_glock_find() - Find glock by lock number
  188. * @sdp: The GFS2 superblock
  189. * @name: The lock name
  190. *
  191. * Returns: NULL, or the struct gfs2_glock with the requested number
  192. */
  193. static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
  194. const struct lm_lockname *name)
  195. {
  196. struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
  197. struct gfs2_glock *gl;
  198. read_lock(&bucket->hb_lock);
  199. gl = search_bucket(bucket, sdp, name);
  200. read_unlock(&bucket->hb_lock);
  201. return gl;
  202. }
  203. /**
  204. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  205. * @sdp: The GFS2 superblock
  206. * @number: the lock number
  207. * @glops: The glock_operations to use
  208. * @create: If 0, don't create the glock if it doesn't exist
  209. * @glp: the glock is returned here
  210. *
  211. * This does not lock a glock, just finds/creates structures for one.
  212. *
  213. * Returns: errno
  214. */
  215. int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
  216. const struct gfs2_glock_operations *glops, int create,
  217. struct gfs2_glock **glp)
  218. {
  219. struct lm_lockname name;
  220. struct gfs2_glock *gl, *tmp;
  221. struct gfs2_gl_hash_bucket *bucket;
  222. int error;
  223. name.ln_number = number;
  224. name.ln_type = glops->go_type;
  225. bucket = &sdp->sd_gl_hash[gl_hash(&name)];
  226. read_lock(&bucket->hb_lock);
  227. gl = search_bucket(bucket, sdp, &name);
  228. read_unlock(&bucket->hb_lock);
  229. if (gl || !create) {
  230. *glp = gl;
  231. return 0;
  232. }
  233. gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
  234. if (!gl)
  235. return -ENOMEM;
  236. gl->gl_flags = 0;
  237. gl->gl_name = name;
  238. kref_init(&gl->gl_ref);
  239. gl->gl_state = LM_ST_UNLOCKED;
  240. gl->gl_owner = NULL;
  241. gl->gl_ip = 0;
  242. gl->gl_ops = glops;
  243. gl->gl_req_gh = NULL;
  244. gl->gl_req_bh = NULL;
  245. gl->gl_vn = 0;
  246. gl->gl_stamp = jiffies;
  247. gl->gl_object = NULL;
  248. gl->gl_bucket = bucket;
  249. gl->gl_sbd = sdp;
  250. gl->gl_aspace = NULL;
  251. lops_init_le(&gl->gl_le, &gfs2_glock_lops);
  252. /* If this glock protects actual on-disk data or metadata blocks,
  253. create a VFS inode to manage the pages/buffers holding them. */
  254. if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
  255. gl->gl_aspace = gfs2_aspace_get(sdp);
  256. if (!gl->gl_aspace) {
  257. error = -ENOMEM;
  258. goto fail;
  259. }
  260. }
  261. error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
  262. if (error)
  263. goto fail_aspace;
  264. write_lock(&bucket->hb_lock);
  265. tmp = search_bucket(bucket, sdp, &name);
  266. if (tmp) {
  267. write_unlock(&bucket->hb_lock);
  268. glock_free(gl);
  269. gl = tmp;
  270. } else {
  271. list_add_tail(&gl->gl_list, &bucket->hb_list);
  272. write_unlock(&bucket->hb_lock);
  273. }
  274. *glp = gl;
  275. return 0;
  276. fail_aspace:
  277. if (gl->gl_aspace)
  278. gfs2_aspace_put(gl->gl_aspace);
  279. fail:
  280. kmem_cache_free(gfs2_glock_cachep, gl);
  281. return error;
  282. }
  283. /**
  284. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  285. * @gl: the glock
  286. * @state: the state we're requesting
  287. * @flags: the modifier flags
  288. * @gh: the holder structure
  289. *
  290. */
  291. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  292. struct gfs2_holder *gh)
  293. {
  294. INIT_LIST_HEAD(&gh->gh_list);
  295. gh->gh_gl = gl;
  296. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  297. gh->gh_owner = current;
  298. gh->gh_state = state;
  299. gh->gh_flags = flags;
  300. gh->gh_error = 0;
  301. gh->gh_iflags = 0;
  302. init_completion(&gh->gh_wait);
  303. if (gh->gh_state == LM_ST_EXCLUSIVE)
  304. gh->gh_flags |= GL_LOCAL_EXCL;
  305. gfs2_glock_hold(gl);
  306. }
  307. /**
  308. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  309. * @state: the state we're requesting
  310. * @flags: the modifier flags
  311. * @gh: the holder structure
  312. *
  313. * Don't mess with the glock.
  314. *
  315. */
  316. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  317. {
  318. gh->gh_state = state;
  319. gh->gh_flags = flags;
  320. if (gh->gh_state == LM_ST_EXCLUSIVE)
  321. gh->gh_flags |= GL_LOCAL_EXCL;
  322. gh->gh_iflags &= 1 << HIF_ALLOCED;
  323. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  324. }
  325. /**
  326. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  327. * @gh: the holder structure
  328. *
  329. */
  330. void gfs2_holder_uninit(struct gfs2_holder *gh)
  331. {
  332. gfs2_glock_put(gh->gh_gl);
  333. gh->gh_gl = NULL;
  334. gh->gh_ip = 0;
  335. }
  336. /**
  337. * gfs2_holder_get - get a struct gfs2_holder structure
  338. * @gl: the glock
  339. * @state: the state we're requesting
  340. * @flags: the modifier flags
  341. * @gfp_flags:
  342. *
  343. * Figure out how big an impact this function has. Either:
  344. * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
  345. * 2) Leave it like it is
  346. *
  347. * Returns: the holder structure, NULL on ENOMEM
  348. */
  349. static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
  350. unsigned int state,
  351. int flags, gfp_t gfp_flags)
  352. {
  353. struct gfs2_holder *gh;
  354. gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
  355. if (!gh)
  356. return NULL;
  357. gfs2_holder_init(gl, state, flags, gh);
  358. set_bit(HIF_ALLOCED, &gh->gh_iflags);
  359. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  360. return gh;
  361. }
  362. /**
  363. * gfs2_holder_put - get rid of a struct gfs2_holder structure
  364. * @gh: the holder structure
  365. *
  366. */
  367. static void gfs2_holder_put(struct gfs2_holder *gh)
  368. {
  369. gfs2_holder_uninit(gh);
  370. kfree(gh);
  371. }
  372. /**
  373. * rq_mutex - process a mutex request in the queue
  374. * @gh: the glock holder
  375. *
  376. * Returns: 1 if the queue is blocked
  377. */
  378. static int rq_mutex(struct gfs2_holder *gh)
  379. {
  380. struct gfs2_glock *gl = gh->gh_gl;
  381. list_del_init(&gh->gh_list);
  382. /* gh->gh_error never examined. */
  383. set_bit(GLF_LOCK, &gl->gl_flags);
  384. complete(&gh->gh_wait);
  385. return 1;
  386. }
  387. /**
  388. * rq_promote - process a promote request in the queue
  389. * @gh: the glock holder
  390. *
  391. * Acquire a new inter-node lock, or change a lock state to more restrictive.
  392. *
  393. * Returns: 1 if the queue is blocked
  394. */
  395. static int rq_promote(struct gfs2_holder *gh)
  396. {
  397. struct gfs2_glock *gl = gh->gh_gl;
  398. struct gfs2_sbd *sdp = gl->gl_sbd;
  399. const struct gfs2_glock_operations *glops = gl->gl_ops;
  400. if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  401. if (list_empty(&gl->gl_holders)) {
  402. gl->gl_req_gh = gh;
  403. set_bit(GLF_LOCK, &gl->gl_flags);
  404. spin_unlock(&gl->gl_spin);
  405. if (atomic_read(&sdp->sd_reclaim_count) >
  406. gfs2_tune_get(sdp, gt_reclaim_limit) &&
  407. !(gh->gh_flags & LM_FLAG_PRIORITY)) {
  408. gfs2_reclaim_glock(sdp);
  409. gfs2_reclaim_glock(sdp);
  410. }
  411. glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
  412. spin_lock(&gl->gl_spin);
  413. }
  414. return 1;
  415. }
  416. if (list_empty(&gl->gl_holders)) {
  417. set_bit(HIF_FIRST, &gh->gh_iflags);
  418. set_bit(GLF_LOCK, &gl->gl_flags);
  419. } else {
  420. struct gfs2_holder *next_gh;
  421. if (gh->gh_flags & GL_LOCAL_EXCL)
  422. return 1;
  423. next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
  424. gh_list);
  425. if (next_gh->gh_flags & GL_LOCAL_EXCL)
  426. return 1;
  427. }
  428. list_move_tail(&gh->gh_list, &gl->gl_holders);
  429. gh->gh_error = 0;
  430. set_bit(HIF_HOLDER, &gh->gh_iflags);
  431. complete(&gh->gh_wait);
  432. return 0;
  433. }
  434. /**
  435. * rq_demote - process a demote request in the queue
  436. * @gh: the glock holder
  437. *
  438. * Returns: 1 if the queue is blocked
  439. */
  440. static int rq_demote(struct gfs2_holder *gh)
  441. {
  442. struct gfs2_glock *gl = gh->gh_gl;
  443. const struct gfs2_glock_operations *glops = gl->gl_ops;
  444. if (!list_empty(&gl->gl_holders))
  445. return 1;
  446. if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
  447. list_del_init(&gh->gh_list);
  448. gh->gh_error = 0;
  449. spin_unlock(&gl->gl_spin);
  450. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  451. gfs2_holder_put(gh);
  452. else
  453. complete(&gh->gh_wait);
  454. spin_lock(&gl->gl_spin);
  455. } else {
  456. gl->gl_req_gh = gh;
  457. set_bit(GLF_LOCK, &gl->gl_flags);
  458. spin_unlock(&gl->gl_spin);
  459. if (gh->gh_state == LM_ST_UNLOCKED ||
  460. gl->gl_state != LM_ST_EXCLUSIVE)
  461. glops->go_drop_th(gl);
  462. else
  463. glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
  464. spin_lock(&gl->gl_spin);
  465. }
  466. return 0;
  467. }
  468. /**
  469. * rq_greedy - process a queued request to drop greedy status
  470. * @gh: the glock holder
  471. *
  472. * Returns: 1 if the queue is blocked
  473. */
  474. static int rq_greedy(struct gfs2_holder *gh)
  475. {
  476. struct gfs2_glock *gl = gh->gh_gl;
  477. list_del_init(&gh->gh_list);
  478. /* gh->gh_error never examined. */
  479. clear_bit(GLF_GREEDY, &gl->gl_flags);
  480. spin_unlock(&gl->gl_spin);
  481. gfs2_holder_uninit(gh);
  482. kfree(container_of(gh, struct greedy, gr_gh));
  483. spin_lock(&gl->gl_spin);
  484. return 0;
  485. }
  486. /**
  487. * run_queue - process holder structures on a glock
  488. * @gl: the glock
  489. *
  490. */
  491. static void run_queue(struct gfs2_glock *gl)
  492. {
  493. struct gfs2_holder *gh;
  494. int blocked = 1;
  495. for (;;) {
  496. if (test_bit(GLF_LOCK, &gl->gl_flags))
  497. break;
  498. if (!list_empty(&gl->gl_waiters1)) {
  499. gh = list_entry(gl->gl_waiters1.next,
  500. struct gfs2_holder, gh_list);
  501. if (test_bit(HIF_MUTEX, &gh->gh_iflags))
  502. blocked = rq_mutex(gh);
  503. else
  504. gfs2_assert_warn(gl->gl_sbd, 0);
  505. } else if (!list_empty(&gl->gl_waiters2) &&
  506. !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
  507. gh = list_entry(gl->gl_waiters2.next,
  508. struct gfs2_holder, gh_list);
  509. if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
  510. blocked = rq_demote(gh);
  511. else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
  512. blocked = rq_greedy(gh);
  513. else
  514. gfs2_assert_warn(gl->gl_sbd, 0);
  515. } else if (!list_empty(&gl->gl_waiters3)) {
  516. gh = list_entry(gl->gl_waiters3.next,
  517. struct gfs2_holder, gh_list);
  518. if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
  519. blocked = rq_promote(gh);
  520. else
  521. gfs2_assert_warn(gl->gl_sbd, 0);
  522. } else
  523. break;
  524. if (blocked)
  525. break;
  526. }
  527. }
  528. /**
  529. * gfs2_glmutex_lock - acquire a local lock on a glock
  530. * @gl: the glock
  531. *
  532. * Gives caller exclusive access to manipulate a glock structure.
  533. */
  534. static void gfs2_glmutex_lock(struct gfs2_glock *gl)
  535. {
  536. struct gfs2_holder gh;
  537. gfs2_holder_init(gl, 0, 0, &gh);
  538. set_bit(HIF_MUTEX, &gh.gh_iflags);
  539. spin_lock(&gl->gl_spin);
  540. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  541. list_add_tail(&gh.gh_list, &gl->gl_waiters1);
  542. else {
  543. gl->gl_owner = current;
  544. gl->gl_ip = (unsigned long)__builtin_return_address(0);
  545. complete(&gh.gh_wait);
  546. }
  547. spin_unlock(&gl->gl_spin);
  548. wait_for_completion(&gh.gh_wait);
  549. gfs2_holder_uninit(&gh);
  550. }
  551. /**
  552. * gfs2_glmutex_trylock - try to acquire a local lock on a glock
  553. * @gl: the glock
  554. *
  555. * Returns: 1 if the glock is acquired
  556. */
  557. static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
  558. {
  559. int acquired = 1;
  560. spin_lock(&gl->gl_spin);
  561. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  562. acquired = 0;
  563. else {
  564. gl->gl_owner = current;
  565. gl->gl_ip = (unsigned long)__builtin_return_address(0);
  566. }
  567. spin_unlock(&gl->gl_spin);
  568. return acquired;
  569. }
  570. /**
  571. * gfs2_glmutex_unlock - release a local lock on a glock
  572. * @gl: the glock
  573. *
  574. */
  575. static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
  576. {
  577. spin_lock(&gl->gl_spin);
  578. clear_bit(GLF_LOCK, &gl->gl_flags);
  579. gl->gl_owner = NULL;
  580. gl->gl_ip = 0;
  581. run_queue(gl);
  582. BUG_ON(!spin_is_locked(&gl->gl_spin));
  583. spin_unlock(&gl->gl_spin);
  584. }
  585. /**
  586. * handle_callback - add a demote request to a lock's queue
  587. * @gl: the glock
  588. * @state: the state the caller wants us to change to
  589. *
  590. * Note: This may fail sliently if we are out of memory.
  591. */
  592. static void handle_callback(struct gfs2_glock *gl, unsigned int state)
  593. {
  594. struct gfs2_holder *gh, *new_gh = NULL;
  595. restart:
  596. spin_lock(&gl->gl_spin);
  597. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  598. if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
  599. gl->gl_req_gh != gh) {
  600. if (gh->gh_state != state)
  601. gh->gh_state = LM_ST_UNLOCKED;
  602. goto out;
  603. }
  604. }
  605. if (new_gh) {
  606. list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
  607. new_gh = NULL;
  608. } else {
  609. spin_unlock(&gl->gl_spin);
  610. new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
  611. if (!new_gh)
  612. return;
  613. set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
  614. set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
  615. goto restart;
  616. }
  617. out:
  618. spin_unlock(&gl->gl_spin);
  619. if (new_gh)
  620. gfs2_holder_put(new_gh);
  621. }
  622. void gfs2_glock_inode_squish(struct inode *inode)
  623. {
  624. struct gfs2_holder gh;
  625. struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
  626. gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
  627. set_bit(HIF_DEMOTE, &gh.gh_iflags);
  628. spin_lock(&gl->gl_spin);
  629. gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
  630. list_add_tail(&gh.gh_list, &gl->gl_waiters2);
  631. run_queue(gl);
  632. spin_unlock(&gl->gl_spin);
  633. wait_for_completion(&gh.gh_wait);
  634. gfs2_holder_uninit(&gh);
  635. }
  636. /**
  637. * state_change - record that the glock is now in a different state
  638. * @gl: the glock
  639. * @new_state the new state
  640. *
  641. */
  642. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  643. {
  644. int held1, held2;
  645. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  646. held2 = (new_state != LM_ST_UNLOCKED);
  647. if (held1 != held2) {
  648. if (held2)
  649. gfs2_glock_hold(gl);
  650. else
  651. gfs2_glock_put(gl);
  652. }
  653. gl->gl_state = new_state;
  654. }
  655. /**
  656. * xmote_bh - Called after the lock module is done acquiring a lock
  657. * @gl: The glock in question
  658. * @ret: the int returned from the lock module
  659. *
  660. */
  661. static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
  662. {
  663. struct gfs2_sbd *sdp = gl->gl_sbd;
  664. const struct gfs2_glock_operations *glops = gl->gl_ops;
  665. struct gfs2_holder *gh = gl->gl_req_gh;
  666. int prev_state = gl->gl_state;
  667. int op_done = 1;
  668. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  669. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  670. gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
  671. state_change(gl, ret & LM_OUT_ST_MASK);
  672. if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
  673. if (glops->go_inval)
  674. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  675. } else if (gl->gl_state == LM_ST_DEFERRED) {
  676. /* We might not want to do this here.
  677. Look at moving to the inode glops. */
  678. if (glops->go_inval)
  679. glops->go_inval(gl, DIO_DATA);
  680. }
  681. /* Deal with each possible exit condition */
  682. if (!gh)
  683. gl->gl_stamp = jiffies;
  684. else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  685. spin_lock(&gl->gl_spin);
  686. list_del_init(&gh->gh_list);
  687. gh->gh_error = -EIO;
  688. spin_unlock(&gl->gl_spin);
  689. } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
  690. spin_lock(&gl->gl_spin);
  691. list_del_init(&gh->gh_list);
  692. if (gl->gl_state == gh->gh_state ||
  693. gl->gl_state == LM_ST_UNLOCKED)
  694. gh->gh_error = 0;
  695. else {
  696. if (gfs2_assert_warn(sdp, gh->gh_flags &
  697. (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
  698. fs_warn(sdp, "ret = 0x%.8X\n", ret);
  699. gh->gh_error = GLR_TRYFAILED;
  700. }
  701. spin_unlock(&gl->gl_spin);
  702. if (ret & LM_OUT_CANCELED)
  703. handle_callback(gl, LM_ST_UNLOCKED);
  704. } else if (ret & LM_OUT_CANCELED) {
  705. spin_lock(&gl->gl_spin);
  706. list_del_init(&gh->gh_list);
  707. gh->gh_error = GLR_CANCELED;
  708. spin_unlock(&gl->gl_spin);
  709. } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  710. spin_lock(&gl->gl_spin);
  711. list_move_tail(&gh->gh_list, &gl->gl_holders);
  712. gh->gh_error = 0;
  713. set_bit(HIF_HOLDER, &gh->gh_iflags);
  714. spin_unlock(&gl->gl_spin);
  715. set_bit(HIF_FIRST, &gh->gh_iflags);
  716. op_done = 0;
  717. } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  718. spin_lock(&gl->gl_spin);
  719. list_del_init(&gh->gh_list);
  720. gh->gh_error = GLR_TRYFAILED;
  721. spin_unlock(&gl->gl_spin);
  722. } else {
  723. if (gfs2_assert_withdraw(sdp, 0) == -1)
  724. fs_err(sdp, "ret = 0x%.8X\n", ret);
  725. }
  726. if (glops->go_xmote_bh)
  727. glops->go_xmote_bh(gl);
  728. if (op_done) {
  729. spin_lock(&gl->gl_spin);
  730. gl->gl_req_gh = NULL;
  731. gl->gl_req_bh = NULL;
  732. clear_bit(GLF_LOCK, &gl->gl_flags);
  733. run_queue(gl);
  734. spin_unlock(&gl->gl_spin);
  735. }
  736. gfs2_glock_put(gl);
  737. if (gh) {
  738. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  739. gfs2_holder_put(gh);
  740. else
  741. complete(&gh->gh_wait);
  742. }
  743. }
  744. /**
  745. * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
  746. * @gl: The glock in question
  747. * @state: the requested state
  748. * @flags: modifier flags to the lock call
  749. *
  750. */
  751. void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
  752. {
  753. struct gfs2_sbd *sdp = gl->gl_sbd;
  754. const struct gfs2_glock_operations *glops = gl->gl_ops;
  755. int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
  756. LM_FLAG_NOEXP | LM_FLAG_ANY |
  757. LM_FLAG_PRIORITY);
  758. unsigned int lck_ret;
  759. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  760. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  761. gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
  762. gfs2_assert_warn(sdp, state != gl->gl_state);
  763. if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
  764. glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
  765. gfs2_glock_hold(gl);
  766. gl->gl_req_bh = xmote_bh;
  767. lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
  768. if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
  769. return;
  770. if (lck_ret & LM_OUT_ASYNC)
  771. gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
  772. else
  773. xmote_bh(gl, lck_ret);
  774. }
  775. /**
  776. * drop_bh - Called after a lock module unlock completes
  777. * @gl: the glock
  778. * @ret: the return status
  779. *
  780. * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
  781. * Doesn't drop the reference on the glock the top half took out
  782. *
  783. */
  784. static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
  785. {
  786. struct gfs2_sbd *sdp = gl->gl_sbd;
  787. const struct gfs2_glock_operations *glops = gl->gl_ops;
  788. struct gfs2_holder *gh = gl->gl_req_gh;
  789. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  790. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  791. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  792. gfs2_assert_warn(sdp, !ret);
  793. state_change(gl, LM_ST_UNLOCKED);
  794. if (glops->go_inval)
  795. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  796. if (gh) {
  797. spin_lock(&gl->gl_spin);
  798. list_del_init(&gh->gh_list);
  799. gh->gh_error = 0;
  800. spin_unlock(&gl->gl_spin);
  801. }
  802. if (glops->go_drop_bh)
  803. glops->go_drop_bh(gl);
  804. spin_lock(&gl->gl_spin);
  805. gl->gl_req_gh = NULL;
  806. gl->gl_req_bh = NULL;
  807. clear_bit(GLF_LOCK, &gl->gl_flags);
  808. run_queue(gl);
  809. spin_unlock(&gl->gl_spin);
  810. gfs2_glock_put(gl);
  811. if (gh) {
  812. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  813. gfs2_holder_put(gh);
  814. else
  815. complete(&gh->gh_wait);
  816. }
  817. }
  818. /**
  819. * gfs2_glock_drop_th - call into the lock module to unlock a lock
  820. * @gl: the glock
  821. *
  822. */
  823. void gfs2_glock_drop_th(struct gfs2_glock *gl)
  824. {
  825. struct gfs2_sbd *sdp = gl->gl_sbd;
  826. const struct gfs2_glock_operations *glops = gl->gl_ops;
  827. unsigned int ret;
  828. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  829. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  830. gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
  831. if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
  832. glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
  833. gfs2_glock_hold(gl);
  834. gl->gl_req_bh = drop_bh;
  835. ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
  836. if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
  837. return;
  838. if (!ret)
  839. drop_bh(gl, ret);
  840. else
  841. gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
  842. }
  843. /**
  844. * do_cancels - cancel requests for locks stuck waiting on an expire flag
  845. * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
  846. *
  847. * Don't cancel GL_NOCANCEL requests.
  848. */
  849. static void do_cancels(struct gfs2_holder *gh)
  850. {
  851. struct gfs2_glock *gl = gh->gh_gl;
  852. spin_lock(&gl->gl_spin);
  853. while (gl->gl_req_gh != gh &&
  854. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  855. !list_empty(&gh->gh_list)) {
  856. if (gl->gl_req_bh && !(gl->gl_req_gh &&
  857. (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
  858. spin_unlock(&gl->gl_spin);
  859. gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
  860. msleep(100);
  861. spin_lock(&gl->gl_spin);
  862. } else {
  863. spin_unlock(&gl->gl_spin);
  864. msleep(100);
  865. spin_lock(&gl->gl_spin);
  866. }
  867. }
  868. spin_unlock(&gl->gl_spin);
  869. }
  870. /**
  871. * glock_wait_internal - wait on a glock acquisition
  872. * @gh: the glock holder
  873. *
  874. * Returns: 0 on success
  875. */
  876. static int glock_wait_internal(struct gfs2_holder *gh)
  877. {
  878. struct gfs2_glock *gl = gh->gh_gl;
  879. struct gfs2_sbd *sdp = gl->gl_sbd;
  880. const struct gfs2_glock_operations *glops = gl->gl_ops;
  881. if (test_bit(HIF_ABORTED, &gh->gh_iflags))
  882. return -EIO;
  883. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  884. spin_lock(&gl->gl_spin);
  885. if (gl->gl_req_gh != gh &&
  886. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  887. !list_empty(&gh->gh_list)) {
  888. list_del_init(&gh->gh_list);
  889. gh->gh_error = GLR_TRYFAILED;
  890. run_queue(gl);
  891. spin_unlock(&gl->gl_spin);
  892. return gh->gh_error;
  893. }
  894. spin_unlock(&gl->gl_spin);
  895. }
  896. if (gh->gh_flags & LM_FLAG_PRIORITY)
  897. do_cancels(gh);
  898. wait_for_completion(&gh->gh_wait);
  899. if (gh->gh_error)
  900. return gh->gh_error;
  901. gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
  902. gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
  903. gh->gh_state,
  904. gh->gh_flags));
  905. if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
  906. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  907. if (glops->go_lock) {
  908. gh->gh_error = glops->go_lock(gh);
  909. if (gh->gh_error) {
  910. spin_lock(&gl->gl_spin);
  911. list_del_init(&gh->gh_list);
  912. spin_unlock(&gl->gl_spin);
  913. }
  914. }
  915. spin_lock(&gl->gl_spin);
  916. gl->gl_req_gh = NULL;
  917. gl->gl_req_bh = NULL;
  918. clear_bit(GLF_LOCK, &gl->gl_flags);
  919. run_queue(gl);
  920. spin_unlock(&gl->gl_spin);
  921. }
  922. return gh->gh_error;
  923. }
  924. static inline struct gfs2_holder *
  925. find_holder_by_owner(struct list_head *head, struct task_struct *owner)
  926. {
  927. struct gfs2_holder *gh;
  928. list_for_each_entry(gh, head, gh_list) {
  929. if (gh->gh_owner == owner)
  930. return gh;
  931. }
  932. return NULL;
  933. }
  934. /**
  935. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  936. * @gh: the holder structure to add
  937. *
  938. */
  939. static void add_to_queue(struct gfs2_holder *gh)
  940. {
  941. struct gfs2_glock *gl = gh->gh_gl;
  942. struct gfs2_holder *existing;
  943. BUG_ON(!gh->gh_owner);
  944. existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
  945. if (existing) {
  946. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  947. printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
  948. printk(KERN_INFO "lock type : %d lock state : %d\n",
  949. existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
  950. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  951. printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
  952. printk(KERN_INFO "lock type : %d lock state : %d\n",
  953. gl->gl_name.ln_type, gl->gl_state);
  954. BUG();
  955. }
  956. existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
  957. if (existing) {
  958. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  959. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  960. BUG();
  961. }
  962. if (gh->gh_flags & LM_FLAG_PRIORITY)
  963. list_add(&gh->gh_list, &gl->gl_waiters3);
  964. else
  965. list_add_tail(&gh->gh_list, &gl->gl_waiters3);
  966. }
  967. /**
  968. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  969. * @gh: the holder structure
  970. *
  971. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  972. *
  973. * Returns: 0, GLR_TRYFAILED, or errno on failure
  974. */
  975. int gfs2_glock_nq(struct gfs2_holder *gh)
  976. {
  977. struct gfs2_glock *gl = gh->gh_gl;
  978. struct gfs2_sbd *sdp = gl->gl_sbd;
  979. int error = 0;
  980. restart:
  981. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  982. set_bit(HIF_ABORTED, &gh->gh_iflags);
  983. return -EIO;
  984. }
  985. set_bit(HIF_PROMOTE, &gh->gh_iflags);
  986. spin_lock(&gl->gl_spin);
  987. add_to_queue(gh);
  988. run_queue(gl);
  989. spin_unlock(&gl->gl_spin);
  990. if (!(gh->gh_flags & GL_ASYNC)) {
  991. error = glock_wait_internal(gh);
  992. if (error == GLR_CANCELED) {
  993. msleep(100);
  994. goto restart;
  995. }
  996. }
  997. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  998. if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
  999. dump_glock(gl);
  1000. return error;
  1001. }
  1002. /**
  1003. * gfs2_glock_poll - poll to see if an async request has been completed
  1004. * @gh: the holder
  1005. *
  1006. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  1007. */
  1008. int gfs2_glock_poll(struct gfs2_holder *gh)
  1009. {
  1010. struct gfs2_glock *gl = gh->gh_gl;
  1011. int ready = 0;
  1012. spin_lock(&gl->gl_spin);
  1013. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1014. ready = 1;
  1015. else if (list_empty(&gh->gh_list)) {
  1016. if (gh->gh_error == GLR_CANCELED) {
  1017. spin_unlock(&gl->gl_spin);
  1018. msleep(100);
  1019. if (gfs2_glock_nq(gh))
  1020. return 1;
  1021. return 0;
  1022. } else
  1023. ready = 1;
  1024. }
  1025. spin_unlock(&gl->gl_spin);
  1026. return ready;
  1027. }
  1028. /**
  1029. * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
  1030. * @gh: the holder structure
  1031. *
  1032. * Returns: 0, GLR_TRYFAILED, or errno on failure
  1033. */
  1034. int gfs2_glock_wait(struct gfs2_holder *gh)
  1035. {
  1036. int error;
  1037. error = glock_wait_internal(gh);
  1038. if (error == GLR_CANCELED) {
  1039. msleep(100);
  1040. gh->gh_flags &= ~GL_ASYNC;
  1041. error = gfs2_glock_nq(gh);
  1042. }
  1043. return error;
  1044. }
  1045. /**
  1046. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  1047. * @gh: the glock holder
  1048. *
  1049. */
  1050. void gfs2_glock_dq(struct gfs2_holder *gh)
  1051. {
  1052. struct gfs2_glock *gl = gh->gh_gl;
  1053. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1054. if (gh->gh_flags & GL_NOCACHE)
  1055. handle_callback(gl, LM_ST_UNLOCKED);
  1056. gfs2_glmutex_lock(gl);
  1057. spin_lock(&gl->gl_spin);
  1058. list_del_init(&gh->gh_list);
  1059. if (list_empty(&gl->gl_holders)) {
  1060. spin_unlock(&gl->gl_spin);
  1061. if (glops->go_unlock)
  1062. glops->go_unlock(gh);
  1063. gl->gl_stamp = jiffies;
  1064. spin_lock(&gl->gl_spin);
  1065. }
  1066. clear_bit(GLF_LOCK, &gl->gl_flags);
  1067. run_queue(gl);
  1068. spin_unlock(&gl->gl_spin);
  1069. }
  1070. /**
  1071. * gfs2_glock_prefetch - Try to prefetch a glock
  1072. * @gl: the glock
  1073. * @state: the state to prefetch in
  1074. * @flags: flags passed to go_xmote_th()
  1075. *
  1076. */
  1077. static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
  1078. int flags)
  1079. {
  1080. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1081. spin_lock(&gl->gl_spin);
  1082. if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
  1083. !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
  1084. !list_empty(&gl->gl_waiters3) ||
  1085. relaxed_state_ok(gl->gl_state, state, flags)) {
  1086. spin_unlock(&gl->gl_spin);
  1087. return;
  1088. }
  1089. set_bit(GLF_PREFETCH, &gl->gl_flags);
  1090. set_bit(GLF_LOCK, &gl->gl_flags);
  1091. spin_unlock(&gl->gl_spin);
  1092. glops->go_xmote_th(gl, state, flags);
  1093. }
  1094. static void greedy_work(void *data)
  1095. {
  1096. struct greedy *gr = data;
  1097. struct gfs2_holder *gh = &gr->gr_gh;
  1098. struct gfs2_glock *gl = gh->gh_gl;
  1099. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1100. clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1101. if (glops->go_greedy)
  1102. glops->go_greedy(gl);
  1103. spin_lock(&gl->gl_spin);
  1104. if (list_empty(&gl->gl_waiters2)) {
  1105. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1106. spin_unlock(&gl->gl_spin);
  1107. gfs2_holder_uninit(gh);
  1108. kfree(gr);
  1109. } else {
  1110. gfs2_glock_hold(gl);
  1111. list_add_tail(&gh->gh_list, &gl->gl_waiters2);
  1112. run_queue(gl);
  1113. spin_unlock(&gl->gl_spin);
  1114. gfs2_glock_put(gl);
  1115. }
  1116. }
  1117. /**
  1118. * gfs2_glock_be_greedy -
  1119. * @gl:
  1120. * @time:
  1121. *
  1122. * Returns: 0 if go_greedy will be called, 1 otherwise
  1123. */
  1124. int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
  1125. {
  1126. struct greedy *gr;
  1127. struct gfs2_holder *gh;
  1128. if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
  1129. test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
  1130. return 1;
  1131. gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
  1132. if (!gr) {
  1133. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1134. return 1;
  1135. }
  1136. gh = &gr->gr_gh;
  1137. gfs2_holder_init(gl, 0, 0, gh);
  1138. set_bit(HIF_GREEDY, &gh->gh_iflags);
  1139. INIT_WORK(&gr->gr_work, greedy_work, gr);
  1140. set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1141. schedule_delayed_work(&gr->gr_work, time);
  1142. return 0;
  1143. }
  1144. /**
  1145. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1146. * @gh: the holder structure
  1147. *
  1148. */
  1149. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1150. {
  1151. gfs2_glock_dq(gh);
  1152. gfs2_holder_uninit(gh);
  1153. }
  1154. /**
  1155. * gfs2_glock_nq_num - acquire a glock based on lock number
  1156. * @sdp: the filesystem
  1157. * @number: the lock number
  1158. * @glops: the glock operations for the type of glock
  1159. * @state: the state to acquire the glock in
  1160. * @flags: modifier flags for the aquisition
  1161. * @gh: the struct gfs2_holder
  1162. *
  1163. * Returns: errno
  1164. */
  1165. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
  1166. const struct gfs2_glock_operations *glops,
  1167. unsigned int state, int flags, struct gfs2_holder *gh)
  1168. {
  1169. struct gfs2_glock *gl;
  1170. int error;
  1171. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1172. if (!error) {
  1173. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1174. gfs2_glock_put(gl);
  1175. }
  1176. return error;
  1177. }
  1178. /**
  1179. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1180. * @arg_a: the first structure
  1181. * @arg_b: the second structure
  1182. *
  1183. */
  1184. static int glock_compare(const void *arg_a, const void *arg_b)
  1185. {
  1186. struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
  1187. struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
  1188. struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1189. struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1190. int ret = 0;
  1191. if (a->ln_number > b->ln_number)
  1192. ret = 1;
  1193. else if (a->ln_number < b->ln_number)
  1194. ret = -1;
  1195. else {
  1196. if (gh_a->gh_state == LM_ST_SHARED &&
  1197. gh_b->gh_state == LM_ST_EXCLUSIVE)
  1198. ret = 1;
  1199. else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
  1200. (gh_b->gh_flags & GL_LOCAL_EXCL))
  1201. ret = 1;
  1202. }
  1203. return ret;
  1204. }
  1205. /**
  1206. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1207. * @num_gh: the number of structures
  1208. * @ghs: an array of struct gfs2_holder structures
  1209. *
  1210. * Returns: 0 on success (all glocks acquired),
  1211. * errno on failure (no glocks acquired)
  1212. */
  1213. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1214. struct gfs2_holder **p)
  1215. {
  1216. unsigned int x;
  1217. int error = 0;
  1218. for (x = 0; x < num_gh; x++)
  1219. p[x] = &ghs[x];
  1220. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1221. for (x = 0; x < num_gh; x++) {
  1222. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1223. error = gfs2_glock_nq(p[x]);
  1224. if (error) {
  1225. while (x--)
  1226. gfs2_glock_dq(p[x]);
  1227. break;
  1228. }
  1229. }
  1230. return error;
  1231. }
  1232. /**
  1233. * gfs2_glock_nq_m - acquire multiple glocks
  1234. * @num_gh: the number of structures
  1235. * @ghs: an array of struct gfs2_holder structures
  1236. *
  1237. * Figure out how big an impact this function has. Either:
  1238. * 1) Replace this code with code that calls gfs2_glock_prefetch()
  1239. * 2) Forget async stuff and just call nq_m_sync()
  1240. * 3) Leave it like it is
  1241. *
  1242. * Returns: 0 on success (all glocks acquired),
  1243. * errno on failure (no glocks acquired)
  1244. */
  1245. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1246. {
  1247. int *e;
  1248. unsigned int x;
  1249. int borked = 0, serious = 0;
  1250. int error = 0;
  1251. if (!num_gh)
  1252. return 0;
  1253. if (num_gh == 1) {
  1254. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1255. return gfs2_glock_nq(ghs);
  1256. }
  1257. e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1258. if (!e)
  1259. return -ENOMEM;
  1260. for (x = 0; x < num_gh; x++) {
  1261. ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
  1262. error = gfs2_glock_nq(&ghs[x]);
  1263. if (error) {
  1264. borked = 1;
  1265. serious = error;
  1266. num_gh = x;
  1267. break;
  1268. }
  1269. }
  1270. for (x = 0; x < num_gh; x++) {
  1271. error = e[x] = glock_wait_internal(&ghs[x]);
  1272. if (error) {
  1273. borked = 1;
  1274. if (error != GLR_TRYFAILED && error != GLR_CANCELED)
  1275. serious = error;
  1276. }
  1277. }
  1278. if (!borked) {
  1279. kfree(e);
  1280. return 0;
  1281. }
  1282. for (x = 0; x < num_gh; x++)
  1283. if (!e[x])
  1284. gfs2_glock_dq(&ghs[x]);
  1285. if (serious)
  1286. error = serious;
  1287. else {
  1288. for (x = 0; x < num_gh; x++)
  1289. gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
  1290. &ghs[x]);
  1291. error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
  1292. }
  1293. kfree(e);
  1294. return error;
  1295. }
  1296. /**
  1297. * gfs2_glock_dq_m - release multiple glocks
  1298. * @num_gh: the number of structures
  1299. * @ghs: an array of struct gfs2_holder structures
  1300. *
  1301. */
  1302. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1303. {
  1304. unsigned int x;
  1305. for (x = 0; x < num_gh; x++)
  1306. gfs2_glock_dq(&ghs[x]);
  1307. }
  1308. /**
  1309. * gfs2_glock_dq_uninit_m - release multiple glocks
  1310. * @num_gh: the number of structures
  1311. * @ghs: an array of struct gfs2_holder structures
  1312. *
  1313. */
  1314. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1315. {
  1316. unsigned int x;
  1317. for (x = 0; x < num_gh; x++)
  1318. gfs2_glock_dq_uninit(&ghs[x]);
  1319. }
  1320. /**
  1321. * gfs2_glock_prefetch_num - prefetch a glock based on lock number
  1322. * @sdp: the filesystem
  1323. * @number: the lock number
  1324. * @glops: the glock operations for the type of glock
  1325. * @state: the state to acquire the glock in
  1326. * @flags: modifier flags for the aquisition
  1327. *
  1328. * Returns: errno
  1329. */
  1330. void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
  1331. const struct gfs2_glock_operations *glops,
  1332. unsigned int state, int flags)
  1333. {
  1334. struct gfs2_glock *gl;
  1335. int error;
  1336. if (atomic_read(&sdp->sd_reclaim_count) <
  1337. gfs2_tune_get(sdp, gt_reclaim_limit)) {
  1338. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1339. if (!error) {
  1340. gfs2_glock_prefetch(gl, state, flags);
  1341. gfs2_glock_put(gl);
  1342. }
  1343. }
  1344. }
  1345. /**
  1346. * gfs2_lvb_hold - attach a LVB from a glock
  1347. * @gl: The glock in question
  1348. *
  1349. */
  1350. int gfs2_lvb_hold(struct gfs2_glock *gl)
  1351. {
  1352. int error;
  1353. gfs2_glmutex_lock(gl);
  1354. if (!atomic_read(&gl->gl_lvb_count)) {
  1355. error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
  1356. if (error) {
  1357. gfs2_glmutex_unlock(gl);
  1358. return error;
  1359. }
  1360. gfs2_glock_hold(gl);
  1361. }
  1362. atomic_inc(&gl->gl_lvb_count);
  1363. gfs2_glmutex_unlock(gl);
  1364. return 0;
  1365. }
  1366. /**
  1367. * gfs2_lvb_unhold - detach a LVB from a glock
  1368. * @gl: The glock in question
  1369. *
  1370. */
  1371. void gfs2_lvb_unhold(struct gfs2_glock *gl)
  1372. {
  1373. gfs2_glock_hold(gl);
  1374. gfs2_glmutex_lock(gl);
  1375. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
  1376. if (atomic_dec_and_test(&gl->gl_lvb_count)) {
  1377. gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1378. gl->gl_lvb = NULL;
  1379. gfs2_glock_put(gl);
  1380. }
  1381. gfs2_glmutex_unlock(gl);
  1382. gfs2_glock_put(gl);
  1383. }
  1384. static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
  1385. unsigned int state)
  1386. {
  1387. struct gfs2_glock *gl;
  1388. gl = gfs2_glock_find(sdp, name);
  1389. if (!gl)
  1390. return;
  1391. if (gl->gl_ops->go_callback)
  1392. gl->gl_ops->go_callback(gl, state);
  1393. handle_callback(gl, state);
  1394. spin_lock(&gl->gl_spin);
  1395. run_queue(gl);
  1396. spin_unlock(&gl->gl_spin);
  1397. gfs2_glock_put(gl);
  1398. }
  1399. /**
  1400. * gfs2_glock_cb - Callback used by locking module
  1401. * @fsdata: Pointer to the superblock
  1402. * @type: Type of callback
  1403. * @data: Type dependent data pointer
  1404. *
  1405. * Called by the locking module when it wants to tell us something.
  1406. * Either we need to drop a lock, one of our ASYNC requests completed, or
  1407. * a journal from another client needs to be recovered.
  1408. */
  1409. void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
  1410. {
  1411. struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
  1412. switch (type) {
  1413. case LM_CB_NEED_E:
  1414. blocking_cb(sdp, data, LM_ST_UNLOCKED);
  1415. return;
  1416. case LM_CB_NEED_D:
  1417. blocking_cb(sdp, data, LM_ST_DEFERRED);
  1418. return;
  1419. case LM_CB_NEED_S:
  1420. blocking_cb(sdp, data, LM_ST_SHARED);
  1421. return;
  1422. case LM_CB_ASYNC: {
  1423. struct lm_async_cb *async = data;
  1424. struct gfs2_glock *gl;
  1425. gl = gfs2_glock_find(sdp, &async->lc_name);
  1426. if (gfs2_assert_warn(sdp, gl))
  1427. return;
  1428. if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
  1429. gl->gl_req_bh(gl, async->lc_ret);
  1430. gfs2_glock_put(gl);
  1431. return;
  1432. }
  1433. case LM_CB_NEED_RECOVERY:
  1434. gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
  1435. if (sdp->sd_recoverd_process)
  1436. wake_up_process(sdp->sd_recoverd_process);
  1437. return;
  1438. case LM_CB_DROPLOCKS:
  1439. gfs2_gl_hash_clear(sdp, NO_WAIT);
  1440. gfs2_quota_scan(sdp);
  1441. return;
  1442. default:
  1443. gfs2_assert_warn(sdp, 0);
  1444. return;
  1445. }
  1446. }
  1447. /**
  1448. * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
  1449. * iopen glock from memory
  1450. * @io_gl: the iopen glock
  1451. * @state: the state into which the glock should be put
  1452. *
  1453. */
  1454. void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
  1455. {
  1456. if (state != LM_ST_UNLOCKED)
  1457. return;
  1458. /* FIXME: remove this? */
  1459. }
  1460. /**
  1461. * demote_ok - Check to see if it's ok to unlock a glock
  1462. * @gl: the glock
  1463. *
  1464. * Returns: 1 if it's ok
  1465. */
  1466. static int demote_ok(struct gfs2_glock *gl)
  1467. {
  1468. struct gfs2_sbd *sdp = gl->gl_sbd;
  1469. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1470. int demote = 1;
  1471. if (test_bit(GLF_STICKY, &gl->gl_flags))
  1472. demote = 0;
  1473. else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
  1474. demote = time_after_eq(jiffies, gl->gl_stamp +
  1475. gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
  1476. else if (glops->go_demote_ok)
  1477. demote = glops->go_demote_ok(gl);
  1478. return demote;
  1479. }
  1480. /**
  1481. * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  1482. * @gl: the glock
  1483. *
  1484. */
  1485. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  1486. {
  1487. struct gfs2_sbd *sdp = gl->gl_sbd;
  1488. spin_lock(&sdp->sd_reclaim_lock);
  1489. if (list_empty(&gl->gl_reclaim)) {
  1490. gfs2_glock_hold(gl);
  1491. list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
  1492. atomic_inc(&sdp->sd_reclaim_count);
  1493. }
  1494. spin_unlock(&sdp->sd_reclaim_lock);
  1495. wake_up(&sdp->sd_reclaim_wq);
  1496. }
  1497. /**
  1498. * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
  1499. * @sdp: the filesystem
  1500. *
  1501. * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
  1502. * different glock and we notice that there are a lot of glocks in the
  1503. * reclaim list.
  1504. *
  1505. */
  1506. void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
  1507. {
  1508. struct gfs2_glock *gl;
  1509. spin_lock(&sdp->sd_reclaim_lock);
  1510. if (list_empty(&sdp->sd_reclaim_list)) {
  1511. spin_unlock(&sdp->sd_reclaim_lock);
  1512. return;
  1513. }
  1514. gl = list_entry(sdp->sd_reclaim_list.next,
  1515. struct gfs2_glock, gl_reclaim);
  1516. list_del_init(&gl->gl_reclaim);
  1517. spin_unlock(&sdp->sd_reclaim_lock);
  1518. atomic_dec(&sdp->sd_reclaim_count);
  1519. atomic_inc(&sdp->sd_reclaimed);
  1520. if (gfs2_glmutex_trylock(gl)) {
  1521. if (queue_empty(gl, &gl->gl_holders) &&
  1522. gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
  1523. handle_callback(gl, LM_ST_UNLOCKED);
  1524. gfs2_glmutex_unlock(gl);
  1525. }
  1526. gfs2_glock_put(gl);
  1527. }
  1528. /**
  1529. * examine_bucket - Call a function for glock in a hash bucket
  1530. * @examiner: the function
  1531. * @sdp: the filesystem
  1532. * @bucket: the bucket
  1533. *
  1534. * Returns: 1 if the bucket has entries
  1535. */
  1536. static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
  1537. struct gfs2_gl_hash_bucket *bucket)
  1538. {
  1539. struct glock_plug plug;
  1540. struct list_head *tmp;
  1541. struct gfs2_glock *gl;
  1542. int entries;
  1543. /* Add "plug" to end of bucket list, work back up list from there */
  1544. memset(&plug.gl_flags, 0, sizeof(unsigned long));
  1545. set_bit(GLF_PLUG, &plug.gl_flags);
  1546. write_lock(&bucket->hb_lock);
  1547. list_add(&plug.gl_list, &bucket->hb_list);
  1548. write_unlock(&bucket->hb_lock);
  1549. for (;;) {
  1550. write_lock(&bucket->hb_lock);
  1551. for (;;) {
  1552. tmp = plug.gl_list.next;
  1553. if (tmp == &bucket->hb_list) {
  1554. list_del(&plug.gl_list);
  1555. entries = !list_empty(&bucket->hb_list);
  1556. write_unlock(&bucket->hb_lock);
  1557. return entries;
  1558. }
  1559. gl = list_entry(tmp, struct gfs2_glock, gl_list);
  1560. /* Move plug up list */
  1561. list_move(&plug.gl_list, &gl->gl_list);
  1562. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1563. continue;
  1564. /* examiner() must glock_put() */
  1565. gfs2_glock_hold(gl);
  1566. break;
  1567. }
  1568. write_unlock(&bucket->hb_lock);
  1569. examiner(gl);
  1570. }
  1571. }
  1572. /**
  1573. * scan_glock - look at a glock and see if we can reclaim it
  1574. * @gl: the glock to look at
  1575. *
  1576. */
  1577. static void scan_glock(struct gfs2_glock *gl)
  1578. {
  1579. if (gl->gl_ops == &gfs2_inode_glops)
  1580. goto out;
  1581. if (gfs2_glmutex_trylock(gl)) {
  1582. if (queue_empty(gl, &gl->gl_holders) &&
  1583. gl->gl_state != LM_ST_UNLOCKED &&
  1584. demote_ok(gl))
  1585. goto out_schedule;
  1586. gfs2_glmutex_unlock(gl);
  1587. }
  1588. out:
  1589. gfs2_glock_put(gl);
  1590. return;
  1591. out_schedule:
  1592. gfs2_glmutex_unlock(gl);
  1593. gfs2_glock_schedule_for_reclaim(gl);
  1594. gfs2_glock_put(gl);
  1595. }
  1596. /**
  1597. * gfs2_scand_internal - Look for glocks and inodes to toss from memory
  1598. * @sdp: the filesystem
  1599. *
  1600. */
  1601. void gfs2_scand_internal(struct gfs2_sbd *sdp)
  1602. {
  1603. unsigned int x;
  1604. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1605. examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
  1606. cond_resched();
  1607. }
  1608. }
  1609. /**
  1610. * clear_glock - look at a glock and see if we can free it from glock cache
  1611. * @gl: the glock to look at
  1612. *
  1613. */
  1614. static void clear_glock(struct gfs2_glock *gl)
  1615. {
  1616. struct gfs2_sbd *sdp = gl->gl_sbd;
  1617. int released;
  1618. spin_lock(&sdp->sd_reclaim_lock);
  1619. if (!list_empty(&gl->gl_reclaim)) {
  1620. list_del_init(&gl->gl_reclaim);
  1621. atomic_dec(&sdp->sd_reclaim_count);
  1622. spin_unlock(&sdp->sd_reclaim_lock);
  1623. released = gfs2_glock_put(gl);
  1624. gfs2_assert(sdp, !released);
  1625. } else {
  1626. spin_unlock(&sdp->sd_reclaim_lock);
  1627. }
  1628. if (gfs2_glmutex_trylock(gl)) {
  1629. if (queue_empty(gl, &gl->gl_holders) &&
  1630. gl->gl_state != LM_ST_UNLOCKED)
  1631. handle_callback(gl, LM_ST_UNLOCKED);
  1632. gfs2_glmutex_unlock(gl);
  1633. }
  1634. gfs2_glock_put(gl);
  1635. }
  1636. /**
  1637. * gfs2_gl_hash_clear - Empty out the glock hash table
  1638. * @sdp: the filesystem
  1639. * @wait: wait until it's all gone
  1640. *
  1641. * Called when unmounting the filesystem, or when inter-node lock manager
  1642. * requests DROPLOCKS because it is running out of capacity.
  1643. */
  1644. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
  1645. {
  1646. unsigned long t;
  1647. unsigned int x;
  1648. int cont;
  1649. t = jiffies;
  1650. for (;;) {
  1651. cont = 0;
  1652. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1653. if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x]))
  1654. cont = 1;
  1655. if (!wait || !cont)
  1656. break;
  1657. if (time_after_eq(jiffies,
  1658. t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
  1659. fs_warn(sdp, "Unmount seems to be stalled. "
  1660. "Dumping lock state...\n");
  1661. gfs2_dump_lockstate(sdp);
  1662. t = jiffies;
  1663. }
  1664. invalidate_inodes(sdp->sd_vfs);
  1665. msleep(10);
  1666. }
  1667. }
  1668. /*
  1669. * Diagnostic routines to help debug distributed deadlock
  1670. */
  1671. /**
  1672. * dump_holder - print information about a glock holder
  1673. * @str: a string naming the type of holder
  1674. * @gh: the glock holder
  1675. *
  1676. * Returns: 0 on success, -ENOBUFS when we run out of space
  1677. */
  1678. static int dump_holder(char *str, struct gfs2_holder *gh)
  1679. {
  1680. unsigned int x;
  1681. int error = -ENOBUFS;
  1682. printk(KERN_INFO " %s\n", str);
  1683. printk(KERN_INFO " owner = %ld\n",
  1684. (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
  1685. printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
  1686. printk(KERN_INFO " gh_flags =");
  1687. for (x = 0; x < 32; x++)
  1688. if (gh->gh_flags & (1 << x))
  1689. printk(" %u", x);
  1690. printk(" \n");
  1691. printk(KERN_INFO " error = %d\n", gh->gh_error);
  1692. printk(KERN_INFO " gh_iflags =");
  1693. for (x = 0; x < 32; x++)
  1694. if (test_bit(x, &gh->gh_iflags))
  1695. printk(" %u", x);
  1696. printk(" \n");
  1697. print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
  1698. error = 0;
  1699. return error;
  1700. }
  1701. /**
  1702. * dump_inode - print information about an inode
  1703. * @ip: the inode
  1704. *
  1705. * Returns: 0 on success, -ENOBUFS when we run out of space
  1706. */
  1707. static int dump_inode(struct gfs2_inode *ip)
  1708. {
  1709. unsigned int x;
  1710. int error = -ENOBUFS;
  1711. printk(KERN_INFO " Inode:\n");
  1712. printk(KERN_INFO " num = %llu %llu\n",
  1713. (unsigned long long)ip->i_num.no_formal_ino,
  1714. (unsigned long long)ip->i_num.no_addr);
  1715. printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
  1716. printk(KERN_INFO " i_flags =");
  1717. for (x = 0; x < 32; x++)
  1718. if (test_bit(x, &ip->i_flags))
  1719. printk(" %u", x);
  1720. printk(" \n");
  1721. error = 0;
  1722. return error;
  1723. }
  1724. /**
  1725. * dump_glock - print information about a glock
  1726. * @gl: the glock
  1727. * @count: where we are in the buffer
  1728. *
  1729. * Returns: 0 on success, -ENOBUFS when we run out of space
  1730. */
  1731. static int dump_glock(struct gfs2_glock *gl)
  1732. {
  1733. struct gfs2_holder *gh;
  1734. unsigned int x;
  1735. int error = -ENOBUFS;
  1736. spin_lock(&gl->gl_spin);
  1737. printk(KERN_INFO "Glock 0x%p (%u, %llu)\n",
  1738. gl,
  1739. gl->gl_name.ln_type,
  1740. (unsigned long long)gl->gl_name.ln_number);
  1741. printk(KERN_INFO " gl_flags =");
  1742. for (x = 0; x < 32; x++)
  1743. if (test_bit(x, &gl->gl_flags))
  1744. printk(" %u", x);
  1745. printk(" \n");
  1746. printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
  1747. printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
  1748. printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
  1749. print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
  1750. printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
  1751. printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
  1752. printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
  1753. printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
  1754. printk(KERN_INFO " le = %s\n",
  1755. (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
  1756. printk(KERN_INFO " reclaim = %s\n",
  1757. (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
  1758. if (gl->gl_aspace)
  1759. printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n",
  1760. gl->gl_aspace,
  1761. gl->gl_aspace->i_mapping->nrpages);
  1762. else
  1763. printk(KERN_INFO " aspace = no\n");
  1764. printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
  1765. if (gl->gl_req_gh) {
  1766. error = dump_holder("Request", gl->gl_req_gh);
  1767. if (error)
  1768. goto out;
  1769. }
  1770. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1771. error = dump_holder("Holder", gh);
  1772. if (error)
  1773. goto out;
  1774. }
  1775. list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
  1776. error = dump_holder("Waiter1", gh);
  1777. if (error)
  1778. goto out;
  1779. }
  1780. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  1781. error = dump_holder("Waiter2", gh);
  1782. if (error)
  1783. goto out;
  1784. }
  1785. list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
  1786. error = dump_holder("Waiter3", gh);
  1787. if (error)
  1788. goto out;
  1789. }
  1790. if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
  1791. if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
  1792. list_empty(&gl->gl_holders)) {
  1793. error = dump_inode(gl->gl_object);
  1794. if (error)
  1795. goto out;
  1796. } else {
  1797. error = -ENOBUFS;
  1798. printk(KERN_INFO " Inode: busy\n");
  1799. }
  1800. }
  1801. error = 0;
  1802. out:
  1803. spin_unlock(&gl->gl_spin);
  1804. return error;
  1805. }
  1806. /**
  1807. * gfs2_dump_lockstate - print out the current lockstate
  1808. * @sdp: the filesystem
  1809. * @ub: the buffer to copy the information into
  1810. *
  1811. * If @ub is NULL, dump the lockstate to the console.
  1812. *
  1813. */
  1814. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
  1815. {
  1816. struct gfs2_gl_hash_bucket *bucket;
  1817. struct gfs2_glock *gl;
  1818. unsigned int x;
  1819. int error = 0;
  1820. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1821. bucket = &sdp->sd_gl_hash[x];
  1822. read_lock(&bucket->hb_lock);
  1823. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  1824. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1825. continue;
  1826. error = dump_glock(gl);
  1827. if (error)
  1828. break;
  1829. }
  1830. read_unlock(&bucket->hb_lock);
  1831. if (error)
  1832. break;
  1833. }
  1834. return error;
  1835. }