glock.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/delay.h>
  15. #include <linux/sort.h>
  16. #include <linux/jhash.h>
  17. #include <linux/kref.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <asm/uaccess.h>
  21. #include "gfs2.h"
  22. #include "lm_interface.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "lm.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "quota.h"
  31. #include "super.h"
  32. #include "util.h"
  33. /* Must be kept in sync with the beginning of struct gfs2_glock */
  34. struct glock_plug {
  35. struct list_head gl_list;
  36. unsigned long gl_flags;
  37. };
  38. struct greedy {
  39. struct gfs2_holder gr_gh;
  40. struct work_struct gr_work;
  41. };
  42. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  43. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  44. /**
  45. * relaxed_state_ok - is a requested lock compatible with the current lock mode?
  46. * @actual: the current state of the lock
  47. * @requested: the lock state that was requested by the caller
  48. * @flags: the modifier flags passed in by the caller
  49. *
  50. * Returns: 1 if the locks are compatible, 0 otherwise
  51. */
  52. static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
  53. int flags)
  54. {
  55. if (actual == requested)
  56. return 1;
  57. if (flags & GL_EXACT)
  58. return 0;
  59. if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
  60. return 1;
  61. if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
  62. return 1;
  63. return 0;
  64. }
  65. /**
  66. * gl_hash() - Turn glock number into hash bucket number
  67. * @lock: The glock number
  68. *
  69. * Returns: The number of the corresponding hash bucket
  70. */
  71. static unsigned int gl_hash(struct lm_lockname *name)
  72. {
  73. unsigned int h;
  74. h = jhash(&name->ln_number, sizeof(uint64_t), 0);
  75. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  76. h &= GFS2_GL_HASH_MASK;
  77. return h;
  78. }
  79. /**
  80. * glock_free() - Perform a few checks and then release struct gfs2_glock
  81. * @gl: The glock to release
  82. *
  83. * Also calls lock module to release its internal structure for this glock.
  84. *
  85. */
  86. static void glock_free(struct gfs2_glock *gl)
  87. {
  88. struct gfs2_sbd *sdp = gl->gl_sbd;
  89. struct inode *aspace = gl->gl_aspace;
  90. gfs2_lm_put_lock(sdp, gl->gl_lock);
  91. if (aspace)
  92. gfs2_aspace_put(aspace);
  93. kmem_cache_free(gfs2_glock_cachep, gl);
  94. }
  95. /**
  96. * gfs2_glock_hold() - increment reference count on glock
  97. * @gl: The glock to hold
  98. *
  99. */
  100. void gfs2_glock_hold(struct gfs2_glock *gl)
  101. {
  102. kref_get(&gl->gl_ref);
  103. }
  104. /* All work is done after the return from kref_put() so we
  105. can release the write_lock before the free. */
  106. static void kill_glock(struct kref *kref)
  107. {
  108. struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
  109. struct gfs2_sbd *sdp = gl->gl_sbd;
  110. gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
  111. gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
  112. gfs2_assert(sdp, list_empty(&gl->gl_holders));
  113. gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
  114. gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
  115. gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
  116. }
  117. /**
  118. * gfs2_glock_put() - Decrement reference count on glock
  119. * @gl: The glock to put
  120. *
  121. */
  122. int gfs2_glock_put(struct gfs2_glock *gl)
  123. {
  124. struct gfs2_sbd *sdp = gl->gl_sbd;
  125. struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
  126. int rv = 0;
  127. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  128. write_lock(&bucket->hb_lock);
  129. if (kref_put(&gl->gl_ref, kill_glock)) {
  130. list_del_init(&gl->gl_list);
  131. write_unlock(&bucket->hb_lock);
  132. BUG_ON(spin_is_locked(&gl->gl_spin));
  133. glock_free(gl);
  134. rv = 1;
  135. goto out;
  136. }
  137. write_unlock(&bucket->hb_lock);
  138. out:
  139. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  140. return rv;
  141. }
  142. /**
  143. * queue_empty - check to see if a glock's queue is empty
  144. * @gl: the glock
  145. * @head: the head of the queue to check
  146. *
  147. * This function protects the list in the event that a process already
  148. * has a holder on the list and is adding a second holder for itself.
  149. * The glmutex lock is what generally prevents processes from working
  150. * on the same glock at once, but the special case of adding a second
  151. * holder for yourself ("recursive" locking) doesn't involve locking
  152. * glmutex, making the spin lock necessary.
  153. *
  154. * Returns: 1 if the queue is empty
  155. */
  156. static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
  157. {
  158. int empty;
  159. spin_lock(&gl->gl_spin);
  160. empty = list_empty(head);
  161. spin_unlock(&gl->gl_spin);
  162. return empty;
  163. }
  164. /**
  165. * search_bucket() - Find struct gfs2_glock by lock number
  166. * @bucket: the bucket to search
  167. * @name: The lock name
  168. *
  169. * Returns: NULL, or the struct gfs2_glock with the requested number
  170. */
  171. static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
  172. struct lm_lockname *name)
  173. {
  174. struct gfs2_glock *gl;
  175. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  176. if (test_bit(GLF_PLUG, &gl->gl_flags))
  177. continue;
  178. if (!lm_name_equal(&gl->gl_name, name))
  179. continue;
  180. kref_get(&gl->gl_ref);
  181. return gl;
  182. }
  183. return NULL;
  184. }
  185. /**
  186. * gfs2_glock_find() - Find glock by lock number
  187. * @sdp: The GFS2 superblock
  188. * @name: The lock name
  189. *
  190. * Returns: NULL, or the struct gfs2_glock with the requested number
  191. */
  192. static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
  193. struct lm_lockname *name)
  194. {
  195. struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
  196. struct gfs2_glock *gl;
  197. read_lock(&bucket->hb_lock);
  198. gl = search_bucket(bucket, name);
  199. read_unlock(&bucket->hb_lock);
  200. return gl;
  201. }
  202. /**
  203. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  204. * @sdp: The GFS2 superblock
  205. * @number: the lock number
  206. * @glops: The glock_operations to use
  207. * @create: If 0, don't create the glock if it doesn't exist
  208. * @glp: the glock is returned here
  209. *
  210. * This does not lock a glock, just finds/creates structures for one.
  211. *
  212. * Returns: errno
  213. */
  214. int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
  215. struct gfs2_glock_operations *glops, int create,
  216. struct gfs2_glock **glp)
  217. {
  218. struct lm_lockname name;
  219. struct gfs2_glock *gl, *tmp;
  220. struct gfs2_gl_hash_bucket *bucket;
  221. int error;
  222. name.ln_number = number;
  223. name.ln_type = glops->go_type;
  224. bucket = &sdp->sd_gl_hash[gl_hash(&name)];
  225. read_lock(&bucket->hb_lock);
  226. gl = search_bucket(bucket, &name);
  227. read_unlock(&bucket->hb_lock);
  228. if (gl || !create) {
  229. *glp = gl;
  230. return 0;
  231. }
  232. gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
  233. if (!gl)
  234. return -ENOMEM;
  235. memset(gl, 0, sizeof(struct gfs2_glock));
  236. INIT_LIST_HEAD(&gl->gl_list);
  237. gl->gl_name = name;
  238. kref_init(&gl->gl_ref);
  239. spin_lock_init(&gl->gl_spin);
  240. gl->gl_state = LM_ST_UNLOCKED;
  241. INIT_LIST_HEAD(&gl->gl_holders);
  242. INIT_LIST_HEAD(&gl->gl_waiters1);
  243. INIT_LIST_HEAD(&gl->gl_waiters2);
  244. INIT_LIST_HEAD(&gl->gl_waiters3);
  245. gl->gl_ops = glops;
  246. gl->gl_bucket = bucket;
  247. INIT_LIST_HEAD(&gl->gl_reclaim);
  248. gl->gl_sbd = sdp;
  249. lops_init_le(&gl->gl_le, &gfs2_glock_lops);
  250. INIT_LIST_HEAD(&gl->gl_ail_list);
  251. /* If this glock protects actual on-disk data or metadata blocks,
  252. create a VFS inode to manage the pages/buffers holding them. */
  253. if (glops == &gfs2_inode_glops ||
  254. glops == &gfs2_rgrp_glops ||
  255. glops == &gfs2_meta_glops) {
  256. gl->gl_aspace = gfs2_aspace_get(sdp);
  257. if (!gl->gl_aspace) {
  258. error = -ENOMEM;
  259. goto fail;
  260. }
  261. }
  262. error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
  263. if (error)
  264. goto fail_aspace;
  265. write_lock(&bucket->hb_lock);
  266. tmp = search_bucket(bucket, &name);
  267. if (tmp) {
  268. write_unlock(&bucket->hb_lock);
  269. glock_free(gl);
  270. gl = tmp;
  271. } else {
  272. list_add_tail(&gl->gl_list, &bucket->hb_list);
  273. write_unlock(&bucket->hb_lock);
  274. }
  275. *glp = gl;
  276. return 0;
  277. fail_aspace:
  278. if (gl->gl_aspace)
  279. gfs2_aspace_put(gl->gl_aspace);
  280. fail:
  281. kmem_cache_free(gfs2_glock_cachep, gl);
  282. return error;
  283. }
  284. /**
  285. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  286. * @gl: the glock
  287. * @state: the state we're requesting
  288. * @flags: the modifier flags
  289. * @gh: the holder structure
  290. *
  291. */
  292. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  293. struct gfs2_holder *gh)
  294. {
  295. INIT_LIST_HEAD(&gh->gh_list);
  296. gh->gh_gl = gl;
  297. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  298. gh->gh_owner = current;
  299. gh->gh_state = state;
  300. gh->gh_flags = flags;
  301. gh->gh_error = 0;
  302. gh->gh_iflags = 0;
  303. init_completion(&gh->gh_wait);
  304. if (gh->gh_state == LM_ST_EXCLUSIVE)
  305. gh->gh_flags |= GL_LOCAL_EXCL;
  306. gfs2_glock_hold(gl);
  307. }
  308. /**
  309. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  310. * @state: the state we're requesting
  311. * @flags: the modifier flags
  312. * @gh: the holder structure
  313. *
  314. * Don't mess with the glock.
  315. *
  316. */
  317. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  318. {
  319. gh->gh_state = state;
  320. gh->gh_flags = flags;
  321. if (gh->gh_state == LM_ST_EXCLUSIVE)
  322. gh->gh_flags |= GL_LOCAL_EXCL;
  323. gh->gh_iflags &= 1 << HIF_ALLOCED;
  324. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  325. }
  326. /**
  327. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  328. * @gh: the holder structure
  329. *
  330. */
  331. void gfs2_holder_uninit(struct gfs2_holder *gh)
  332. {
  333. gfs2_glock_put(gh->gh_gl);
  334. gh->gh_gl = NULL;
  335. gh->gh_ip = 0;
  336. }
  337. /**
  338. * gfs2_holder_get - get a struct gfs2_holder structure
  339. * @gl: the glock
  340. * @state: the state we're requesting
  341. * @flags: the modifier flags
  342. * @gfp_flags: __GFP_NOFAIL
  343. *
  344. * Figure out how big an impact this function has. Either:
  345. * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
  346. * 2) Leave it like it is
  347. *
  348. * Returns: the holder structure, NULL on ENOMEM
  349. */
  350. static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
  351. unsigned int state,
  352. int flags, gfp_t gfp_flags)
  353. {
  354. struct gfs2_holder *gh;
  355. gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
  356. if (!gh)
  357. return NULL;
  358. gfs2_holder_init(gl, state, flags, gh);
  359. set_bit(HIF_ALLOCED, &gh->gh_iflags);
  360. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  361. return gh;
  362. }
  363. /**
  364. * gfs2_holder_put - get rid of a struct gfs2_holder structure
  365. * @gh: the holder structure
  366. *
  367. */
  368. static void gfs2_holder_put(struct gfs2_holder *gh)
  369. {
  370. gfs2_holder_uninit(gh);
  371. kfree(gh);
  372. }
  373. /**
  374. * rq_mutex - process a mutex request in the queue
  375. * @gh: the glock holder
  376. *
  377. * Returns: 1 if the queue is blocked
  378. */
  379. static int rq_mutex(struct gfs2_holder *gh)
  380. {
  381. struct gfs2_glock *gl = gh->gh_gl;
  382. list_del_init(&gh->gh_list);
  383. /* gh->gh_error never examined. */
  384. set_bit(GLF_LOCK, &gl->gl_flags);
  385. complete(&gh->gh_wait);
  386. return 1;
  387. }
  388. /**
  389. * rq_promote - process a promote request in the queue
  390. * @gh: the glock holder
  391. *
  392. * Acquire a new inter-node lock, or change a lock state to more restrictive.
  393. *
  394. * Returns: 1 if the queue is blocked
  395. */
  396. static int rq_promote(struct gfs2_holder *gh)
  397. {
  398. struct gfs2_glock *gl = gh->gh_gl;
  399. struct gfs2_sbd *sdp = gl->gl_sbd;
  400. struct gfs2_glock_operations *glops = gl->gl_ops;
  401. if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  402. if (list_empty(&gl->gl_holders)) {
  403. gl->gl_req_gh = gh;
  404. set_bit(GLF_LOCK, &gl->gl_flags);
  405. spin_unlock(&gl->gl_spin);
  406. if (atomic_read(&sdp->sd_reclaim_count) >
  407. gfs2_tune_get(sdp, gt_reclaim_limit) &&
  408. !(gh->gh_flags & LM_FLAG_PRIORITY)) {
  409. gfs2_reclaim_glock(sdp);
  410. gfs2_reclaim_glock(sdp);
  411. }
  412. glops->go_xmote_th(gl, gh->gh_state,
  413. gh->gh_flags);
  414. spin_lock(&gl->gl_spin);
  415. }
  416. return 1;
  417. }
  418. if (list_empty(&gl->gl_holders)) {
  419. set_bit(HIF_FIRST, &gh->gh_iflags);
  420. set_bit(GLF_LOCK, &gl->gl_flags);
  421. } else {
  422. struct gfs2_holder *next_gh;
  423. if (gh->gh_flags & GL_LOCAL_EXCL)
  424. return 1;
  425. next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
  426. gh_list);
  427. if (next_gh->gh_flags & GL_LOCAL_EXCL)
  428. return 1;
  429. }
  430. list_move_tail(&gh->gh_list, &gl->gl_holders);
  431. gh->gh_error = 0;
  432. set_bit(HIF_HOLDER, &gh->gh_iflags);
  433. complete(&gh->gh_wait);
  434. return 0;
  435. }
  436. /**
  437. * rq_demote - process a demote request in the queue
  438. * @gh: the glock holder
  439. *
  440. * Returns: 1 if the queue is blocked
  441. */
  442. static int rq_demote(struct gfs2_holder *gh)
  443. {
  444. struct gfs2_glock *gl = gh->gh_gl;
  445. struct gfs2_glock_operations *glops = gl->gl_ops;
  446. if (!list_empty(&gl->gl_holders))
  447. return 1;
  448. if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
  449. list_del_init(&gh->gh_list);
  450. gh->gh_error = 0;
  451. spin_unlock(&gl->gl_spin);
  452. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  453. gfs2_holder_put(gh);
  454. else
  455. complete(&gh->gh_wait);
  456. spin_lock(&gl->gl_spin);
  457. } else {
  458. gl->gl_req_gh = gh;
  459. set_bit(GLF_LOCK, &gl->gl_flags);
  460. spin_unlock(&gl->gl_spin);
  461. if (gh->gh_state == LM_ST_UNLOCKED ||
  462. gl->gl_state != LM_ST_EXCLUSIVE)
  463. glops->go_drop_th(gl);
  464. else
  465. glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
  466. spin_lock(&gl->gl_spin);
  467. }
  468. return 0;
  469. }
  470. /**
  471. * rq_greedy - process a queued request to drop greedy status
  472. * @gh: the glock holder
  473. *
  474. * Returns: 1 if the queue is blocked
  475. */
  476. static int rq_greedy(struct gfs2_holder *gh)
  477. {
  478. struct gfs2_glock *gl = gh->gh_gl;
  479. list_del_init(&gh->gh_list);
  480. /* gh->gh_error never examined. */
  481. clear_bit(GLF_GREEDY, &gl->gl_flags);
  482. spin_unlock(&gl->gl_spin);
  483. gfs2_holder_uninit(gh);
  484. kfree(container_of(gh, struct greedy, gr_gh));
  485. spin_lock(&gl->gl_spin);
  486. return 0;
  487. }
  488. /**
  489. * run_queue - process holder structures on a glock
  490. * @gl: the glock
  491. *
  492. */
  493. static void run_queue(struct gfs2_glock *gl)
  494. {
  495. struct gfs2_holder *gh;
  496. int blocked = 1;
  497. for (;;) {
  498. if (test_bit(GLF_LOCK, &gl->gl_flags))
  499. break;
  500. if (!list_empty(&gl->gl_waiters1)) {
  501. gh = list_entry(gl->gl_waiters1.next,
  502. struct gfs2_holder, gh_list);
  503. if (test_bit(HIF_MUTEX, &gh->gh_iflags))
  504. blocked = rq_mutex(gh);
  505. else
  506. gfs2_assert_warn(gl->gl_sbd, 0);
  507. } else if (!list_empty(&gl->gl_waiters2) &&
  508. !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
  509. gh = list_entry(gl->gl_waiters2.next,
  510. struct gfs2_holder, gh_list);
  511. if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
  512. blocked = rq_demote(gh);
  513. else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
  514. blocked = rq_greedy(gh);
  515. else
  516. gfs2_assert_warn(gl->gl_sbd, 0);
  517. } else if (!list_empty(&gl->gl_waiters3)) {
  518. gh = list_entry(gl->gl_waiters3.next,
  519. struct gfs2_holder, gh_list);
  520. if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
  521. blocked = rq_promote(gh);
  522. else
  523. gfs2_assert_warn(gl->gl_sbd, 0);
  524. } else
  525. break;
  526. if (blocked)
  527. break;
  528. }
  529. }
  530. /**
  531. * gfs2_glmutex_lock - acquire a local lock on a glock
  532. * @gl: the glock
  533. *
  534. * Gives caller exclusive access to manipulate a glock structure.
  535. */
  536. void gfs2_glmutex_lock(struct gfs2_glock *gl)
  537. {
  538. struct gfs2_holder gh;
  539. gfs2_holder_init(gl, 0, 0, &gh);
  540. set_bit(HIF_MUTEX, &gh.gh_iflags);
  541. spin_lock(&gl->gl_spin);
  542. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  543. list_add_tail(&gh.gh_list, &gl->gl_waiters1);
  544. else
  545. complete(&gh.gh_wait);
  546. spin_unlock(&gl->gl_spin);
  547. wait_for_completion(&gh.gh_wait);
  548. gfs2_holder_uninit(&gh);
  549. }
  550. /**
  551. * gfs2_glmutex_trylock - try to acquire a local lock on a glock
  552. * @gl: the glock
  553. *
  554. * Returns: 1 if the glock is acquired
  555. */
  556. static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
  557. {
  558. int acquired = 1;
  559. spin_lock(&gl->gl_spin);
  560. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  561. acquired = 0;
  562. spin_unlock(&gl->gl_spin);
  563. return acquired;
  564. }
  565. /**
  566. * gfs2_glmutex_unlock - release a local lock on a glock
  567. * @gl: the glock
  568. *
  569. */
  570. void gfs2_glmutex_unlock(struct gfs2_glock *gl)
  571. {
  572. spin_lock(&gl->gl_spin);
  573. clear_bit(GLF_LOCK, &gl->gl_flags);
  574. run_queue(gl);
  575. BUG_ON(!spin_is_locked(&gl->gl_spin));
  576. spin_unlock(&gl->gl_spin);
  577. }
  578. /**
  579. * handle_callback - add a demote request to a lock's queue
  580. * @gl: the glock
  581. * @state: the state the caller wants us to change to
  582. *
  583. */
  584. static void handle_callback(struct gfs2_glock *gl, unsigned int state)
  585. {
  586. struct gfs2_holder *gh, *new_gh = NULL;
  587. restart:
  588. spin_lock(&gl->gl_spin);
  589. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  590. if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
  591. gl->gl_req_gh != gh) {
  592. if (gh->gh_state != state)
  593. gh->gh_state = LM_ST_UNLOCKED;
  594. goto out;
  595. }
  596. }
  597. if (new_gh) {
  598. list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
  599. new_gh = NULL;
  600. } else {
  601. spin_unlock(&gl->gl_spin);
  602. new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY,
  603. GFP_KERNEL | __GFP_NOFAIL),
  604. set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
  605. set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
  606. goto restart;
  607. }
  608. out:
  609. spin_unlock(&gl->gl_spin);
  610. if (new_gh)
  611. gfs2_holder_put(new_gh);
  612. }
  613. /**
  614. * state_change - record that the glock is now in a different state
  615. * @gl: the glock
  616. * @new_state the new state
  617. *
  618. */
  619. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  620. {
  621. int held1, held2;
  622. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  623. held2 = (new_state != LM_ST_UNLOCKED);
  624. if (held1 != held2) {
  625. if (held2)
  626. gfs2_glock_hold(gl);
  627. else
  628. gfs2_glock_put(gl);
  629. }
  630. gl->gl_state = new_state;
  631. }
  632. /**
  633. * xmote_bh - Called after the lock module is done acquiring a lock
  634. * @gl: The glock in question
  635. * @ret: the int returned from the lock module
  636. *
  637. */
  638. static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
  639. {
  640. struct gfs2_sbd *sdp = gl->gl_sbd;
  641. struct gfs2_glock_operations *glops = gl->gl_ops;
  642. struct gfs2_holder *gh = gl->gl_req_gh;
  643. int prev_state = gl->gl_state;
  644. int op_done = 1;
  645. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  646. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  647. gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
  648. state_change(gl, ret & LM_OUT_ST_MASK);
  649. if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
  650. if (glops->go_inval)
  651. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  652. } else if (gl->gl_state == LM_ST_DEFERRED) {
  653. /* We might not want to do this here.
  654. Look at moving to the inode glops. */
  655. if (glops->go_inval)
  656. glops->go_inval(gl, DIO_DATA);
  657. }
  658. /* Deal with each possible exit condition */
  659. if (!gh)
  660. gl->gl_stamp = jiffies;
  661. else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  662. spin_lock(&gl->gl_spin);
  663. list_del_init(&gh->gh_list);
  664. gh->gh_error = -EIO;
  665. spin_unlock(&gl->gl_spin);
  666. } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
  667. spin_lock(&gl->gl_spin);
  668. list_del_init(&gh->gh_list);
  669. if (gl->gl_state == gh->gh_state ||
  670. gl->gl_state == LM_ST_UNLOCKED)
  671. gh->gh_error = 0;
  672. else {
  673. if (gfs2_assert_warn(sdp, gh->gh_flags &
  674. (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
  675. fs_warn(sdp, "ret = 0x%.8X\n", ret);
  676. gh->gh_error = GLR_TRYFAILED;
  677. }
  678. spin_unlock(&gl->gl_spin);
  679. if (ret & LM_OUT_CANCELED)
  680. handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
  681. } else if (ret & LM_OUT_CANCELED) {
  682. spin_lock(&gl->gl_spin);
  683. list_del_init(&gh->gh_list);
  684. gh->gh_error = GLR_CANCELED;
  685. spin_unlock(&gl->gl_spin);
  686. } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  687. spin_lock(&gl->gl_spin);
  688. list_move_tail(&gh->gh_list, &gl->gl_holders);
  689. gh->gh_error = 0;
  690. set_bit(HIF_HOLDER, &gh->gh_iflags);
  691. spin_unlock(&gl->gl_spin);
  692. set_bit(HIF_FIRST, &gh->gh_iflags);
  693. op_done = 0;
  694. } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  695. spin_lock(&gl->gl_spin);
  696. list_del_init(&gh->gh_list);
  697. gh->gh_error = GLR_TRYFAILED;
  698. spin_unlock(&gl->gl_spin);
  699. } else {
  700. if (gfs2_assert_withdraw(sdp, 0) == -1)
  701. fs_err(sdp, "ret = 0x%.8X\n", ret);
  702. }
  703. if (glops->go_xmote_bh)
  704. glops->go_xmote_bh(gl);
  705. if (op_done) {
  706. spin_lock(&gl->gl_spin);
  707. gl->gl_req_gh = NULL;
  708. gl->gl_req_bh = NULL;
  709. clear_bit(GLF_LOCK, &gl->gl_flags);
  710. run_queue(gl);
  711. spin_unlock(&gl->gl_spin);
  712. }
  713. gfs2_glock_put(gl);
  714. if (gh) {
  715. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  716. gfs2_holder_put(gh);
  717. else
  718. complete(&gh->gh_wait);
  719. }
  720. }
  721. /**
  722. * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
  723. * @gl: The glock in question
  724. * @state: the requested state
  725. * @flags: modifier flags to the lock call
  726. *
  727. */
  728. void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
  729. {
  730. struct gfs2_sbd *sdp = gl->gl_sbd;
  731. struct gfs2_glock_operations *glops = gl->gl_ops;
  732. int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
  733. LM_FLAG_NOEXP | LM_FLAG_ANY |
  734. LM_FLAG_PRIORITY);
  735. unsigned int lck_ret;
  736. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  737. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  738. gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
  739. gfs2_assert_warn(sdp, state != gl->gl_state);
  740. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  741. if (glops->go_sync)
  742. glops->go_sync(gl,
  743. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  744. }
  745. gfs2_glock_hold(gl);
  746. gl->gl_req_bh = xmote_bh;
  747. lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
  748. lck_flags);
  749. if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
  750. return;
  751. if (lck_ret & LM_OUT_ASYNC)
  752. gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
  753. else
  754. xmote_bh(gl, lck_ret);
  755. }
  756. /**
  757. * drop_bh - Called after a lock module unlock completes
  758. * @gl: the glock
  759. * @ret: the return status
  760. *
  761. * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
  762. * Doesn't drop the reference on the glock the top half took out
  763. *
  764. */
  765. static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
  766. {
  767. struct gfs2_sbd *sdp = gl->gl_sbd;
  768. struct gfs2_glock_operations *glops = gl->gl_ops;
  769. struct gfs2_holder *gh = gl->gl_req_gh;
  770. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  771. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  772. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  773. gfs2_assert_warn(sdp, !ret);
  774. state_change(gl, LM_ST_UNLOCKED);
  775. if (glops->go_inval)
  776. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  777. if (gh) {
  778. spin_lock(&gl->gl_spin);
  779. list_del_init(&gh->gh_list);
  780. gh->gh_error = 0;
  781. spin_unlock(&gl->gl_spin);
  782. }
  783. if (glops->go_drop_bh)
  784. glops->go_drop_bh(gl);
  785. spin_lock(&gl->gl_spin);
  786. gl->gl_req_gh = NULL;
  787. gl->gl_req_bh = NULL;
  788. clear_bit(GLF_LOCK, &gl->gl_flags);
  789. run_queue(gl);
  790. spin_unlock(&gl->gl_spin);
  791. gfs2_glock_put(gl);
  792. if (gh) {
  793. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  794. gfs2_holder_put(gh);
  795. else
  796. complete(&gh->gh_wait);
  797. }
  798. }
  799. /**
  800. * gfs2_glock_drop_th - call into the lock module to unlock a lock
  801. * @gl: the glock
  802. *
  803. */
  804. void gfs2_glock_drop_th(struct gfs2_glock *gl)
  805. {
  806. struct gfs2_sbd *sdp = gl->gl_sbd;
  807. struct gfs2_glock_operations *glops = gl->gl_ops;
  808. unsigned int ret;
  809. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  810. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  811. gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
  812. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  813. if (glops->go_sync)
  814. glops->go_sync(gl,
  815. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  816. }
  817. gfs2_glock_hold(gl);
  818. gl->gl_req_bh = drop_bh;
  819. ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
  820. if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
  821. return;
  822. if (!ret)
  823. drop_bh(gl, ret);
  824. else
  825. gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
  826. }
  827. /**
  828. * do_cancels - cancel requests for locks stuck waiting on an expire flag
  829. * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
  830. *
  831. * Don't cancel GL_NOCANCEL requests.
  832. */
  833. static void do_cancels(struct gfs2_holder *gh)
  834. {
  835. struct gfs2_glock *gl = gh->gh_gl;
  836. spin_lock(&gl->gl_spin);
  837. while (gl->gl_req_gh != gh &&
  838. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  839. !list_empty(&gh->gh_list)) {
  840. if (gl->gl_req_bh &&
  841. !(gl->gl_req_gh &&
  842. (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
  843. spin_unlock(&gl->gl_spin);
  844. gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
  845. msleep(100);
  846. spin_lock(&gl->gl_spin);
  847. } else {
  848. spin_unlock(&gl->gl_spin);
  849. msleep(100);
  850. spin_lock(&gl->gl_spin);
  851. }
  852. }
  853. spin_unlock(&gl->gl_spin);
  854. }
  855. /**
  856. * glock_wait_internal - wait on a glock acquisition
  857. * @gh: the glock holder
  858. *
  859. * Returns: 0 on success
  860. */
  861. static int glock_wait_internal(struct gfs2_holder *gh)
  862. {
  863. struct gfs2_glock *gl = gh->gh_gl;
  864. struct gfs2_sbd *sdp = gl->gl_sbd;
  865. struct gfs2_glock_operations *glops = gl->gl_ops;
  866. if (test_bit(HIF_ABORTED, &gh->gh_iflags))
  867. return -EIO;
  868. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  869. spin_lock(&gl->gl_spin);
  870. if (gl->gl_req_gh != gh &&
  871. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  872. !list_empty(&gh->gh_list)) {
  873. list_del_init(&gh->gh_list);
  874. gh->gh_error = GLR_TRYFAILED;
  875. run_queue(gl);
  876. spin_unlock(&gl->gl_spin);
  877. return gh->gh_error;
  878. }
  879. spin_unlock(&gl->gl_spin);
  880. }
  881. if (gh->gh_flags & LM_FLAG_PRIORITY)
  882. do_cancels(gh);
  883. wait_for_completion(&gh->gh_wait);
  884. if (gh->gh_error)
  885. return gh->gh_error;
  886. gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
  887. gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
  888. gh->gh_state,
  889. gh->gh_flags));
  890. if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
  891. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  892. if (glops->go_lock) {
  893. gh->gh_error = glops->go_lock(gh);
  894. if (gh->gh_error) {
  895. spin_lock(&gl->gl_spin);
  896. list_del_init(&gh->gh_list);
  897. spin_unlock(&gl->gl_spin);
  898. }
  899. }
  900. spin_lock(&gl->gl_spin);
  901. gl->gl_req_gh = NULL;
  902. gl->gl_req_bh = NULL;
  903. clear_bit(GLF_LOCK, &gl->gl_flags);
  904. run_queue(gl);
  905. spin_unlock(&gl->gl_spin);
  906. }
  907. return gh->gh_error;
  908. }
  909. static inline struct gfs2_holder *
  910. find_holder_by_owner(struct list_head *head, struct task_struct *owner)
  911. {
  912. struct gfs2_holder *gh;
  913. list_for_each_entry(gh, head, gh_list) {
  914. if (gh->gh_owner == owner)
  915. return gh;
  916. }
  917. return NULL;
  918. }
  919. /**
  920. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  921. * @gh: the holder structure to add
  922. *
  923. */
  924. static void add_to_queue(struct gfs2_holder *gh)
  925. {
  926. struct gfs2_glock *gl = gh->gh_gl;
  927. struct gfs2_holder *existing;
  928. BUG_ON(!gh->gh_owner);
  929. existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
  930. if (existing) {
  931. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  932. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  933. BUG();
  934. }
  935. existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
  936. if (existing) {
  937. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  938. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  939. BUG();
  940. }
  941. if (gh->gh_flags & LM_FLAG_PRIORITY)
  942. list_add(&gh->gh_list, &gl->gl_waiters3);
  943. else
  944. list_add_tail(&gh->gh_list, &gl->gl_waiters3);
  945. }
  946. /**
  947. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  948. * @gh: the holder structure
  949. *
  950. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  951. *
  952. * Returns: 0, GLR_TRYFAILED, or errno on failure
  953. */
  954. int gfs2_glock_nq(struct gfs2_holder *gh)
  955. {
  956. struct gfs2_glock *gl = gh->gh_gl;
  957. struct gfs2_sbd *sdp = gl->gl_sbd;
  958. int error = 0;
  959. restart:
  960. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  961. set_bit(HIF_ABORTED, &gh->gh_iflags);
  962. return -EIO;
  963. }
  964. set_bit(HIF_PROMOTE, &gh->gh_iflags);
  965. spin_lock(&gl->gl_spin);
  966. add_to_queue(gh);
  967. run_queue(gl);
  968. spin_unlock(&gl->gl_spin);
  969. if (!(gh->gh_flags & GL_ASYNC)) {
  970. error = glock_wait_internal(gh);
  971. if (error == GLR_CANCELED) {
  972. msleep(100);
  973. goto restart;
  974. }
  975. }
  976. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  977. return error;
  978. }
  979. /**
  980. * gfs2_glock_poll - poll to see if an async request has been completed
  981. * @gh: the holder
  982. *
  983. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  984. */
  985. int gfs2_glock_poll(struct gfs2_holder *gh)
  986. {
  987. struct gfs2_glock *gl = gh->gh_gl;
  988. int ready = 0;
  989. spin_lock(&gl->gl_spin);
  990. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  991. ready = 1;
  992. else if (list_empty(&gh->gh_list)) {
  993. if (gh->gh_error == GLR_CANCELED) {
  994. spin_unlock(&gl->gl_spin);
  995. msleep(100);
  996. if (gfs2_glock_nq(gh))
  997. return 1;
  998. return 0;
  999. } else
  1000. ready = 1;
  1001. }
  1002. spin_unlock(&gl->gl_spin);
  1003. return ready;
  1004. }
  1005. /**
  1006. * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
  1007. * @gh: the holder structure
  1008. *
  1009. * Returns: 0, GLR_TRYFAILED, or errno on failure
  1010. */
  1011. int gfs2_glock_wait(struct gfs2_holder *gh)
  1012. {
  1013. int error;
  1014. error = glock_wait_internal(gh);
  1015. if (error == GLR_CANCELED) {
  1016. msleep(100);
  1017. gh->gh_flags &= ~GL_ASYNC;
  1018. error = gfs2_glock_nq(gh);
  1019. }
  1020. return error;
  1021. }
  1022. /**
  1023. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  1024. * @gh: the glock holder
  1025. *
  1026. */
  1027. void gfs2_glock_dq(struct gfs2_holder *gh)
  1028. {
  1029. struct gfs2_glock *gl = gh->gh_gl;
  1030. struct gfs2_glock_operations *glops = gl->gl_ops;
  1031. if (gh->gh_flags & GL_SYNC)
  1032. set_bit(GLF_SYNC, &gl->gl_flags);
  1033. if (gh->gh_flags & GL_NOCACHE)
  1034. handle_callback(gl, LM_ST_UNLOCKED);
  1035. gfs2_glmutex_lock(gl);
  1036. spin_lock(&gl->gl_spin);
  1037. list_del_init(&gh->gh_list);
  1038. if (list_empty(&gl->gl_holders)) {
  1039. spin_unlock(&gl->gl_spin);
  1040. if (glops->go_unlock)
  1041. glops->go_unlock(gh);
  1042. if (test_bit(GLF_SYNC, &gl->gl_flags)) {
  1043. if (glops->go_sync)
  1044. glops->go_sync(gl, DIO_METADATA | DIO_DATA);
  1045. }
  1046. gl->gl_stamp = jiffies;
  1047. spin_lock(&gl->gl_spin);
  1048. }
  1049. clear_bit(GLF_LOCK, &gl->gl_flags);
  1050. run_queue(gl);
  1051. spin_unlock(&gl->gl_spin);
  1052. }
  1053. /**
  1054. * gfs2_glock_prefetch - Try to prefetch a glock
  1055. * @gl: the glock
  1056. * @state: the state to prefetch in
  1057. * @flags: flags passed to go_xmote_th()
  1058. *
  1059. */
  1060. static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
  1061. int flags)
  1062. {
  1063. struct gfs2_glock_operations *glops = gl->gl_ops;
  1064. spin_lock(&gl->gl_spin);
  1065. if (test_bit(GLF_LOCK, &gl->gl_flags) ||
  1066. !list_empty(&gl->gl_holders) ||
  1067. !list_empty(&gl->gl_waiters1) ||
  1068. !list_empty(&gl->gl_waiters2) ||
  1069. !list_empty(&gl->gl_waiters3) ||
  1070. relaxed_state_ok(gl->gl_state, state, flags)) {
  1071. spin_unlock(&gl->gl_spin);
  1072. return;
  1073. }
  1074. set_bit(GLF_PREFETCH, &gl->gl_flags);
  1075. set_bit(GLF_LOCK, &gl->gl_flags);
  1076. spin_unlock(&gl->gl_spin);
  1077. glops->go_xmote_th(gl, state, flags);
  1078. }
  1079. static void greedy_work(void *data)
  1080. {
  1081. struct greedy *gr = data;
  1082. struct gfs2_holder *gh = &gr->gr_gh;
  1083. struct gfs2_glock *gl = gh->gh_gl;
  1084. struct gfs2_glock_operations *glops = gl->gl_ops;
  1085. clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1086. if (glops->go_greedy)
  1087. glops->go_greedy(gl);
  1088. spin_lock(&gl->gl_spin);
  1089. if (list_empty(&gl->gl_waiters2)) {
  1090. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1091. spin_unlock(&gl->gl_spin);
  1092. gfs2_holder_uninit(gh);
  1093. kfree(gr);
  1094. } else {
  1095. gfs2_glock_hold(gl);
  1096. list_add_tail(&gh->gh_list, &gl->gl_waiters2);
  1097. run_queue(gl);
  1098. spin_unlock(&gl->gl_spin);
  1099. gfs2_glock_put(gl);
  1100. }
  1101. }
  1102. /**
  1103. * gfs2_glock_be_greedy -
  1104. * @gl:
  1105. * @time:
  1106. *
  1107. * Returns: 0 if go_greedy will be called, 1 otherwise
  1108. */
  1109. int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
  1110. {
  1111. struct greedy *gr;
  1112. struct gfs2_holder *gh;
  1113. if (!time ||
  1114. gl->gl_sbd->sd_args.ar_localcaching ||
  1115. test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
  1116. return 1;
  1117. gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
  1118. if (!gr) {
  1119. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1120. return 1;
  1121. }
  1122. gh = &gr->gr_gh;
  1123. gfs2_holder_init(gl, 0, 0, gh);
  1124. set_bit(HIF_GREEDY, &gh->gh_iflags);
  1125. INIT_WORK(&gr->gr_work, greedy_work, gr);
  1126. set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1127. schedule_delayed_work(&gr->gr_work, time);
  1128. return 0;
  1129. }
  1130. /**
  1131. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1132. * @gh: the holder structure
  1133. *
  1134. */
  1135. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1136. {
  1137. gfs2_glock_dq(gh);
  1138. gfs2_holder_uninit(gh);
  1139. }
  1140. /**
  1141. * gfs2_glock_nq_num - acquire a glock based on lock number
  1142. * @sdp: the filesystem
  1143. * @number: the lock number
  1144. * @glops: the glock operations for the type of glock
  1145. * @state: the state to acquire the glock in
  1146. * @flags: modifier flags for the aquisition
  1147. * @gh: the struct gfs2_holder
  1148. *
  1149. * Returns: errno
  1150. */
  1151. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
  1152. struct gfs2_glock_operations *glops, unsigned int state,
  1153. int flags, struct gfs2_holder *gh)
  1154. {
  1155. struct gfs2_glock *gl;
  1156. int error;
  1157. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1158. if (!error) {
  1159. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1160. gfs2_glock_put(gl);
  1161. }
  1162. return error;
  1163. }
  1164. /**
  1165. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1166. * @arg_a: the first structure
  1167. * @arg_b: the second structure
  1168. *
  1169. */
  1170. static int glock_compare(const void *arg_a, const void *arg_b)
  1171. {
  1172. struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
  1173. struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
  1174. struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1175. struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1176. int ret = 0;
  1177. if (a->ln_number > b->ln_number)
  1178. ret = 1;
  1179. else if (a->ln_number < b->ln_number)
  1180. ret = -1;
  1181. else {
  1182. if (gh_a->gh_state == LM_ST_SHARED &&
  1183. gh_b->gh_state == LM_ST_EXCLUSIVE)
  1184. ret = 1;
  1185. else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
  1186. (gh_b->gh_flags & GL_LOCAL_EXCL))
  1187. ret = 1;
  1188. }
  1189. return ret;
  1190. }
  1191. /**
  1192. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1193. * @num_gh: the number of structures
  1194. * @ghs: an array of struct gfs2_holder structures
  1195. *
  1196. * Returns: 0 on success (all glocks acquired),
  1197. * errno on failure (no glocks acquired)
  1198. */
  1199. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1200. struct gfs2_holder **p)
  1201. {
  1202. unsigned int x;
  1203. int error = 0;
  1204. for (x = 0; x < num_gh; x++)
  1205. p[x] = &ghs[x];
  1206. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1207. for (x = 0; x < num_gh; x++) {
  1208. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1209. error = gfs2_glock_nq(p[x]);
  1210. if (error) {
  1211. while (x--)
  1212. gfs2_glock_dq(p[x]);
  1213. break;
  1214. }
  1215. }
  1216. return error;
  1217. }
  1218. /**
  1219. * gfs2_glock_nq_m - acquire multiple glocks
  1220. * @num_gh: the number of structures
  1221. * @ghs: an array of struct gfs2_holder structures
  1222. *
  1223. * Figure out how big an impact this function has. Either:
  1224. * 1) Replace this code with code that calls gfs2_glock_prefetch()
  1225. * 2) Forget async stuff and just call nq_m_sync()
  1226. * 3) Leave it like it is
  1227. *
  1228. * Returns: 0 on success (all glocks acquired),
  1229. * errno on failure (no glocks acquired)
  1230. */
  1231. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1232. {
  1233. int *e;
  1234. unsigned int x;
  1235. int borked = 0, serious = 0;
  1236. int error = 0;
  1237. if (!num_gh)
  1238. return 0;
  1239. if (num_gh == 1) {
  1240. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1241. return gfs2_glock_nq(ghs);
  1242. }
  1243. e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1244. if (!e)
  1245. return -ENOMEM;
  1246. for (x = 0; x < num_gh; x++) {
  1247. ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
  1248. error = gfs2_glock_nq(&ghs[x]);
  1249. if (error) {
  1250. borked = 1;
  1251. serious = error;
  1252. num_gh = x;
  1253. break;
  1254. }
  1255. }
  1256. for (x = 0; x < num_gh; x++) {
  1257. error = e[x] = glock_wait_internal(&ghs[x]);
  1258. if (error) {
  1259. borked = 1;
  1260. if (error != GLR_TRYFAILED && error != GLR_CANCELED)
  1261. serious = error;
  1262. }
  1263. }
  1264. if (!borked) {
  1265. kfree(e);
  1266. return 0;
  1267. }
  1268. for (x = 0; x < num_gh; x++)
  1269. if (!e[x])
  1270. gfs2_glock_dq(&ghs[x]);
  1271. if (serious)
  1272. error = serious;
  1273. else {
  1274. for (x = 0; x < num_gh; x++)
  1275. gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
  1276. &ghs[x]);
  1277. error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
  1278. }
  1279. kfree(e);
  1280. return error;
  1281. }
  1282. /**
  1283. * gfs2_glock_dq_m - release multiple glocks
  1284. * @num_gh: the number of structures
  1285. * @ghs: an array of struct gfs2_holder structures
  1286. *
  1287. */
  1288. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1289. {
  1290. unsigned int x;
  1291. for (x = 0; x < num_gh; x++)
  1292. gfs2_glock_dq(&ghs[x]);
  1293. }
  1294. /**
  1295. * gfs2_glock_dq_uninit_m - release multiple glocks
  1296. * @num_gh: the number of structures
  1297. * @ghs: an array of struct gfs2_holder structures
  1298. *
  1299. */
  1300. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1301. {
  1302. unsigned int x;
  1303. for (x = 0; x < num_gh; x++)
  1304. gfs2_glock_dq_uninit(&ghs[x]);
  1305. }
  1306. /**
  1307. * gfs2_glock_prefetch_num - prefetch a glock based on lock number
  1308. * @sdp: the filesystem
  1309. * @number: the lock number
  1310. * @glops: the glock operations for the type of glock
  1311. * @state: the state to acquire the glock in
  1312. * @flags: modifier flags for the aquisition
  1313. *
  1314. * Returns: errno
  1315. */
  1316. void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
  1317. struct gfs2_glock_operations *glops,
  1318. unsigned int state, int flags)
  1319. {
  1320. struct gfs2_glock *gl;
  1321. int error;
  1322. if (atomic_read(&sdp->sd_reclaim_count) <
  1323. gfs2_tune_get(sdp, gt_reclaim_limit)) {
  1324. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1325. if (!error) {
  1326. gfs2_glock_prefetch(gl, state, flags);
  1327. gfs2_glock_put(gl);
  1328. }
  1329. }
  1330. }
  1331. /**
  1332. * gfs2_lvb_hold - attach a LVB from a glock
  1333. * @gl: The glock in question
  1334. *
  1335. */
  1336. int gfs2_lvb_hold(struct gfs2_glock *gl)
  1337. {
  1338. int error;
  1339. gfs2_glmutex_lock(gl);
  1340. if (!atomic_read(&gl->gl_lvb_count)) {
  1341. error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
  1342. if (error) {
  1343. gfs2_glmutex_unlock(gl);
  1344. return error;
  1345. }
  1346. gfs2_glock_hold(gl);
  1347. }
  1348. atomic_inc(&gl->gl_lvb_count);
  1349. gfs2_glmutex_unlock(gl);
  1350. return 0;
  1351. }
  1352. /**
  1353. * gfs2_lvb_unhold - detach a LVB from a glock
  1354. * @gl: The glock in question
  1355. *
  1356. */
  1357. void gfs2_lvb_unhold(struct gfs2_glock *gl)
  1358. {
  1359. gfs2_glock_hold(gl);
  1360. gfs2_glmutex_lock(gl);
  1361. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
  1362. if (atomic_dec_and_test(&gl->gl_lvb_count)) {
  1363. gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1364. gl->gl_lvb = NULL;
  1365. gfs2_glock_put(gl);
  1366. }
  1367. gfs2_glmutex_unlock(gl);
  1368. gfs2_glock_put(gl);
  1369. }
  1370. #if 0
  1371. void gfs2_lvb_sync(struct gfs2_glock *gl)
  1372. {
  1373. gfs2_glmutex_lock(gl);
  1374. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
  1375. if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
  1376. gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1377. gfs2_glmutex_unlock(gl);
  1378. }
  1379. #endif /* 0 */
  1380. static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
  1381. unsigned int state)
  1382. {
  1383. struct gfs2_glock *gl;
  1384. gl = gfs2_glock_find(sdp, name);
  1385. if (!gl)
  1386. return;
  1387. if (gl->gl_ops->go_callback)
  1388. gl->gl_ops->go_callback(gl, state);
  1389. handle_callback(gl, state);
  1390. spin_lock(&gl->gl_spin);
  1391. run_queue(gl);
  1392. spin_unlock(&gl->gl_spin);
  1393. gfs2_glock_put(gl);
  1394. }
  1395. /**
  1396. * gfs2_glock_cb - Callback used by locking module
  1397. * @fsdata: Pointer to the superblock
  1398. * @type: Type of callback
  1399. * @data: Type dependent data pointer
  1400. *
  1401. * Called by the locking module when it wants to tell us something.
  1402. * Either we need to drop a lock, one of our ASYNC requests completed, or
  1403. * a journal from another client needs to be recovered.
  1404. */
  1405. void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
  1406. {
  1407. struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
  1408. switch (type) {
  1409. case LM_CB_NEED_E:
  1410. blocking_cb(sdp, data, LM_ST_UNLOCKED);
  1411. return;
  1412. case LM_CB_NEED_D:
  1413. blocking_cb(sdp, data, LM_ST_DEFERRED);
  1414. return;
  1415. case LM_CB_NEED_S:
  1416. blocking_cb(sdp, data, LM_ST_SHARED);
  1417. return;
  1418. case LM_CB_ASYNC: {
  1419. struct lm_async_cb *async = data;
  1420. struct gfs2_glock *gl;
  1421. gl = gfs2_glock_find(sdp, &async->lc_name);
  1422. if (gfs2_assert_warn(sdp, gl))
  1423. return;
  1424. if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
  1425. gl->gl_req_bh(gl, async->lc_ret);
  1426. gfs2_glock_put(gl);
  1427. return;
  1428. }
  1429. case LM_CB_NEED_RECOVERY:
  1430. gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
  1431. if (sdp->sd_recoverd_process)
  1432. wake_up_process(sdp->sd_recoverd_process);
  1433. return;
  1434. case LM_CB_DROPLOCKS:
  1435. gfs2_gl_hash_clear(sdp, NO_WAIT);
  1436. gfs2_quota_scan(sdp);
  1437. return;
  1438. default:
  1439. gfs2_assert_warn(sdp, 0);
  1440. return;
  1441. }
  1442. }
  1443. /**
  1444. * gfs2_try_toss_inode - try to remove a particular inode struct from cache
  1445. * sdp: the filesystem
  1446. * inum: the inode number
  1447. *
  1448. */
  1449. void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
  1450. {
  1451. struct gfs2_glock *gl;
  1452. struct gfs2_inode *ip;
  1453. int error;
  1454. error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
  1455. NO_CREATE, &gl);
  1456. if (error || !gl)
  1457. return;
  1458. if (!gfs2_glmutex_trylock(gl))
  1459. goto out;
  1460. ip = gl->gl_object;
  1461. if (!ip)
  1462. goto out_unlock;
  1463. if (atomic_read(&ip->i_count))
  1464. goto out_unlock;
  1465. gfs2_inode_destroy(ip, 1);
  1466. out_unlock:
  1467. gfs2_glmutex_unlock(gl);
  1468. out:
  1469. gfs2_glock_put(gl);
  1470. }
  1471. /**
  1472. * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
  1473. * iopen glock from memory
  1474. * @io_gl: the iopen glock
  1475. * @state: the state into which the glock should be put
  1476. *
  1477. */
  1478. void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
  1479. {
  1480. struct gfs2_glock *i_gl;
  1481. if (state != LM_ST_UNLOCKED)
  1482. return;
  1483. spin_lock(&io_gl->gl_spin);
  1484. i_gl = io_gl->gl_object;
  1485. if (i_gl) {
  1486. gfs2_glock_hold(i_gl);
  1487. spin_unlock(&io_gl->gl_spin);
  1488. } else {
  1489. spin_unlock(&io_gl->gl_spin);
  1490. return;
  1491. }
  1492. if (gfs2_glmutex_trylock(i_gl)) {
  1493. struct gfs2_inode *ip = i_gl->gl_object;
  1494. if (ip) {
  1495. gfs2_try_toss_vnode(ip);
  1496. gfs2_glmutex_unlock(i_gl);
  1497. gfs2_glock_schedule_for_reclaim(i_gl);
  1498. goto out;
  1499. }
  1500. gfs2_glmutex_unlock(i_gl);
  1501. }
  1502. out:
  1503. gfs2_glock_put(i_gl);
  1504. }
  1505. /**
  1506. * demote_ok - Check to see if it's ok to unlock a glock
  1507. * @gl: the glock
  1508. *
  1509. * Returns: 1 if it's ok
  1510. */
  1511. static int demote_ok(struct gfs2_glock *gl)
  1512. {
  1513. struct gfs2_sbd *sdp = gl->gl_sbd;
  1514. struct gfs2_glock_operations *glops = gl->gl_ops;
  1515. int demote = 1;
  1516. if (test_bit(GLF_STICKY, &gl->gl_flags))
  1517. demote = 0;
  1518. else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
  1519. demote = time_after_eq(jiffies,
  1520. gl->gl_stamp +
  1521. gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
  1522. else if (glops->go_demote_ok)
  1523. demote = glops->go_demote_ok(gl);
  1524. return demote;
  1525. }
  1526. /**
  1527. * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  1528. * @gl: the glock
  1529. *
  1530. */
  1531. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  1532. {
  1533. struct gfs2_sbd *sdp = gl->gl_sbd;
  1534. spin_lock(&sdp->sd_reclaim_lock);
  1535. if (list_empty(&gl->gl_reclaim)) {
  1536. gfs2_glock_hold(gl);
  1537. list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
  1538. atomic_inc(&sdp->sd_reclaim_count);
  1539. }
  1540. spin_unlock(&sdp->sd_reclaim_lock);
  1541. wake_up(&sdp->sd_reclaim_wq);
  1542. }
  1543. /**
  1544. * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
  1545. * @sdp: the filesystem
  1546. *
  1547. * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
  1548. * different glock and we notice that there are a lot of glocks in the
  1549. * reclaim list.
  1550. *
  1551. */
  1552. void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
  1553. {
  1554. struct gfs2_glock *gl;
  1555. spin_lock(&sdp->sd_reclaim_lock);
  1556. if (list_empty(&sdp->sd_reclaim_list)) {
  1557. spin_unlock(&sdp->sd_reclaim_lock);
  1558. return;
  1559. }
  1560. gl = list_entry(sdp->sd_reclaim_list.next,
  1561. struct gfs2_glock, gl_reclaim);
  1562. list_del_init(&gl->gl_reclaim);
  1563. spin_unlock(&sdp->sd_reclaim_lock);
  1564. atomic_dec(&sdp->sd_reclaim_count);
  1565. atomic_inc(&sdp->sd_reclaimed);
  1566. if (gfs2_glmutex_trylock(gl)) {
  1567. if (gl->gl_ops == &gfs2_inode_glops) {
  1568. struct gfs2_inode *ip = gl->gl_object;
  1569. if (ip && !atomic_read(&ip->i_count))
  1570. gfs2_inode_destroy(ip, 1);
  1571. }
  1572. if (queue_empty(gl, &gl->gl_holders) &&
  1573. gl->gl_state != LM_ST_UNLOCKED &&
  1574. demote_ok(gl))
  1575. handle_callback(gl, LM_ST_UNLOCKED);
  1576. gfs2_glmutex_unlock(gl);
  1577. }
  1578. gfs2_glock_put(gl);
  1579. }
  1580. /**
  1581. * examine_bucket - Call a function for glock in a hash bucket
  1582. * @examiner: the function
  1583. * @sdp: the filesystem
  1584. * @bucket: the bucket
  1585. *
  1586. * Returns: 1 if the bucket has entries
  1587. */
  1588. static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
  1589. struct gfs2_gl_hash_bucket *bucket)
  1590. {
  1591. struct glock_plug plug;
  1592. struct list_head *tmp;
  1593. struct gfs2_glock *gl;
  1594. int entries;
  1595. /* Add "plug" to end of bucket list, work back up list from there */
  1596. memset(&plug.gl_flags, 0, sizeof(unsigned long));
  1597. set_bit(GLF_PLUG, &plug.gl_flags);
  1598. write_lock(&bucket->hb_lock);
  1599. list_add(&plug.gl_list, &bucket->hb_list);
  1600. write_unlock(&bucket->hb_lock);
  1601. for (;;) {
  1602. write_lock(&bucket->hb_lock);
  1603. for (;;) {
  1604. tmp = plug.gl_list.next;
  1605. if (tmp == &bucket->hb_list) {
  1606. list_del(&plug.gl_list);
  1607. entries = !list_empty(&bucket->hb_list);
  1608. write_unlock(&bucket->hb_lock);
  1609. return entries;
  1610. }
  1611. gl = list_entry(tmp, struct gfs2_glock, gl_list);
  1612. /* Move plug up list */
  1613. list_move(&plug.gl_list, &gl->gl_list);
  1614. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1615. continue;
  1616. /* examiner() must glock_put() */
  1617. gfs2_glock_hold(gl);
  1618. break;
  1619. }
  1620. write_unlock(&bucket->hb_lock);
  1621. examiner(gl);
  1622. }
  1623. }
  1624. /**
  1625. * scan_glock - look at a glock and see if we can reclaim it
  1626. * @gl: the glock to look at
  1627. *
  1628. */
  1629. static void scan_glock(struct gfs2_glock *gl)
  1630. {
  1631. if (gfs2_glmutex_trylock(gl)) {
  1632. if (gl->gl_ops == &gfs2_inode_glops) {
  1633. struct gfs2_inode *ip = gl->gl_object;
  1634. if (ip && !atomic_read(&ip->i_count))
  1635. goto out_schedule;
  1636. }
  1637. if (queue_empty(gl, &gl->gl_holders) &&
  1638. gl->gl_state != LM_ST_UNLOCKED &&
  1639. demote_ok(gl))
  1640. goto out_schedule;
  1641. gfs2_glmutex_unlock(gl);
  1642. }
  1643. gfs2_glock_put(gl);
  1644. return;
  1645. out_schedule:
  1646. gfs2_glmutex_unlock(gl);
  1647. gfs2_glock_schedule_for_reclaim(gl);
  1648. gfs2_glock_put(gl);
  1649. }
  1650. /**
  1651. * gfs2_scand_internal - Look for glocks and inodes to toss from memory
  1652. * @sdp: the filesystem
  1653. *
  1654. */
  1655. void gfs2_scand_internal(struct gfs2_sbd *sdp)
  1656. {
  1657. unsigned int x;
  1658. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1659. examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
  1660. cond_resched();
  1661. }
  1662. }
  1663. /**
  1664. * clear_glock - look at a glock and see if we can free it from glock cache
  1665. * @gl: the glock to look at
  1666. *
  1667. */
  1668. static void clear_glock(struct gfs2_glock *gl)
  1669. {
  1670. struct gfs2_sbd *sdp = gl->gl_sbd;
  1671. int released;
  1672. spin_lock(&sdp->sd_reclaim_lock);
  1673. if (!list_empty(&gl->gl_reclaim)) {
  1674. list_del_init(&gl->gl_reclaim);
  1675. atomic_dec(&sdp->sd_reclaim_count);
  1676. spin_unlock(&sdp->sd_reclaim_lock);
  1677. released = gfs2_glock_put(gl);
  1678. gfs2_assert(sdp, !released);
  1679. } else {
  1680. spin_unlock(&sdp->sd_reclaim_lock);
  1681. }
  1682. if (gfs2_glmutex_trylock(gl)) {
  1683. if (gl->gl_ops == &gfs2_inode_glops) {
  1684. struct gfs2_inode *ip = gl->gl_object;
  1685. if (ip && !atomic_read(&ip->i_count))
  1686. gfs2_inode_destroy(ip, 1);
  1687. }
  1688. if (queue_empty(gl, &gl->gl_holders) &&
  1689. gl->gl_state != LM_ST_UNLOCKED)
  1690. handle_callback(gl, LM_ST_UNLOCKED);
  1691. gfs2_glmutex_unlock(gl);
  1692. }
  1693. gfs2_glock_put(gl);
  1694. }
  1695. /**
  1696. * gfs2_gl_hash_clear - Empty out the glock hash table
  1697. * @sdp: the filesystem
  1698. * @wait: wait until it's all gone
  1699. *
  1700. * Called when unmounting the filesystem, or when inter-node lock manager
  1701. * requests DROPLOCKS because it is running out of capacity.
  1702. */
  1703. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
  1704. {
  1705. unsigned long t;
  1706. unsigned int x;
  1707. int cont;
  1708. t = jiffies;
  1709. for (;;) {
  1710. cont = 0;
  1711. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1712. if (examine_bucket(clear_glock, sdp,
  1713. &sdp->sd_gl_hash[x]))
  1714. cont = 1;
  1715. if (!wait || !cont)
  1716. break;
  1717. if (time_after_eq(jiffies,
  1718. t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
  1719. fs_warn(sdp, "Unmount seems to be stalled. "
  1720. "Dumping lock state...\n");
  1721. gfs2_dump_lockstate(sdp);
  1722. t = jiffies;
  1723. }
  1724. /* invalidate_inodes() requires that the sb inodes list
  1725. not change, but an async completion callback for an
  1726. unlock can occur which does glock_put() which
  1727. can call iput() which will change the sb inodes list.
  1728. invalidate_inodes_mutex prevents glock_put()'s during
  1729. an invalidate_inodes() */
  1730. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  1731. invalidate_inodes(sdp->sd_vfs);
  1732. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  1733. msleep(10);
  1734. }
  1735. }
  1736. /*
  1737. * Diagnostic routines to help debug distributed deadlock
  1738. */
  1739. /**
  1740. * dump_holder - print information about a glock holder
  1741. * @str: a string naming the type of holder
  1742. * @gh: the glock holder
  1743. *
  1744. * Returns: 0 on success, -ENOBUFS when we run out of space
  1745. */
  1746. static int dump_holder(char *str, struct gfs2_holder *gh)
  1747. {
  1748. unsigned int x;
  1749. int error = -ENOBUFS;
  1750. printk(KERN_INFO " %s\n", str);
  1751. printk(KERN_INFO " owner = %ld\n",
  1752. (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
  1753. printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
  1754. printk(KERN_INFO " gh_flags =");
  1755. for (x = 0; x < 32; x++)
  1756. if (gh->gh_flags & (1 << x))
  1757. printk(" %u", x);
  1758. printk(" \n");
  1759. printk(KERN_INFO " error = %d\n", gh->gh_error);
  1760. printk(KERN_INFO " gh_iflags =");
  1761. for (x = 0; x < 32; x++)
  1762. if (test_bit(x, &gh->gh_iflags))
  1763. printk(" %u", x);
  1764. printk(" \n");
  1765. print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
  1766. error = 0;
  1767. return error;
  1768. }
  1769. /**
  1770. * dump_inode - print information about an inode
  1771. * @ip: the inode
  1772. *
  1773. * Returns: 0 on success, -ENOBUFS when we run out of space
  1774. */
  1775. static int dump_inode(struct gfs2_inode *ip)
  1776. {
  1777. unsigned int x;
  1778. int error = -ENOBUFS;
  1779. printk(KERN_INFO " Inode:\n");
  1780. printk(KERN_INFO " num = %llu %llu\n",
  1781. ip->i_num.no_formal_ino, ip->i_num.no_addr);
  1782. printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
  1783. printk(KERN_INFO " i_count = %d\n", atomic_read(&ip->i_count));
  1784. printk(KERN_INFO " i_flags =");
  1785. for (x = 0; x < 32; x++)
  1786. if (test_bit(x, &ip->i_flags))
  1787. printk(" %u", x);
  1788. printk(" \n");
  1789. printk(KERN_INFO " vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
  1790. error = 0;
  1791. return error;
  1792. }
  1793. /**
  1794. * dump_glock - print information about a glock
  1795. * @gl: the glock
  1796. * @count: where we are in the buffer
  1797. *
  1798. * Returns: 0 on success, -ENOBUFS when we run out of space
  1799. */
  1800. static int dump_glock(struct gfs2_glock *gl)
  1801. {
  1802. struct gfs2_holder *gh;
  1803. unsigned int x;
  1804. int error = -ENOBUFS;
  1805. spin_lock(&gl->gl_spin);
  1806. printk(KERN_INFO "Glock (%u, %llu)\n",
  1807. gl->gl_name.ln_type,
  1808. gl->gl_name.ln_number);
  1809. printk(KERN_INFO " gl_flags =");
  1810. for (x = 0; x < 32; x++)
  1811. if (test_bit(x, &gl->gl_flags))
  1812. printk(" %u", x);
  1813. printk(" \n");
  1814. printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
  1815. printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
  1816. printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
  1817. printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
  1818. printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
  1819. printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
  1820. printk(KERN_INFO " le = %s\n",
  1821. (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
  1822. printk(KERN_INFO " reclaim = %s\n",
  1823. (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
  1824. if (gl->gl_aspace)
  1825. printk(KERN_INFO " aspace = %lu\n",
  1826. gl->gl_aspace->i_mapping->nrpages);
  1827. else
  1828. printk(KERN_INFO " aspace = no\n");
  1829. printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
  1830. if (gl->gl_req_gh) {
  1831. error = dump_holder("Request", gl->gl_req_gh);
  1832. if (error)
  1833. goto out;
  1834. }
  1835. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1836. error = dump_holder("Holder", gh);
  1837. if (error)
  1838. goto out;
  1839. }
  1840. list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
  1841. error = dump_holder("Waiter1", gh);
  1842. if (error)
  1843. goto out;
  1844. }
  1845. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  1846. error = dump_holder("Waiter2", gh);
  1847. if (error)
  1848. goto out;
  1849. }
  1850. list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
  1851. error = dump_holder("Waiter3", gh);
  1852. if (error)
  1853. goto out;
  1854. }
  1855. if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
  1856. if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
  1857. list_empty(&gl->gl_holders)) {
  1858. error = dump_inode(gl->gl_object);
  1859. if (error)
  1860. goto out;
  1861. } else {
  1862. error = -ENOBUFS;
  1863. printk(KERN_INFO " Inode: busy\n");
  1864. }
  1865. }
  1866. error = 0;
  1867. out:
  1868. spin_unlock(&gl->gl_spin);
  1869. return error;
  1870. }
  1871. /**
  1872. * gfs2_dump_lockstate - print out the current lockstate
  1873. * @sdp: the filesystem
  1874. * @ub: the buffer to copy the information into
  1875. *
  1876. * If @ub is NULL, dump the lockstate to the console.
  1877. *
  1878. */
  1879. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
  1880. {
  1881. struct gfs2_gl_hash_bucket *bucket;
  1882. struct gfs2_glock *gl;
  1883. unsigned int x;
  1884. int error = 0;
  1885. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1886. bucket = &sdp->sd_gl_hash[x];
  1887. read_lock(&bucket->hb_lock);
  1888. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  1889. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1890. continue;
  1891. error = dump_glock(gl);
  1892. if (error)
  1893. break;
  1894. }
  1895. read_unlock(&bucket->hb_lock);
  1896. if (error)
  1897. break;
  1898. }
  1899. return error;
  1900. }