glock.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/delay.h>
  14. #include <linux/sort.h>
  15. #include <linux/jhash.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/gfs2_ondisk.h>
  18. #include <linux/list.h>
  19. #include <linux/wait.h>
  20. #include <linux/module.h>
  21. #include <asm/uaccess.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/jiffies.h>
  28. #include "gfs2.h"
  29. #include "incore.h"
  30. #include "glock.h"
  31. #include "glops.h"
  32. #include "inode.h"
  33. #include "lops.h"
  34. #include "meta_io.h"
  35. #include "quota.h"
  36. #include "super.h"
  37. #include "util.h"
  38. #include "bmap.h"
  39. #define CREATE_TRACE_POINTS
  40. #include "trace_gfs2.h"
  41. struct gfs2_gl_hash_bucket {
  42. struct hlist_head hb_list;
  43. };
  44. struct gfs2_glock_iter {
  45. int hash; /* hash bucket index */
  46. struct gfs2_sbd *sdp; /* incore superblock */
  47. struct gfs2_glock *gl; /* current glock struct */
  48. char string[512]; /* scratch space */
  49. };
  50. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  51. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  52. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  53. #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  54. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  55. static struct dentry *gfs2_root;
  56. static struct workqueue_struct *glock_workqueue;
  57. struct workqueue_struct *gfs2_delete_workqueue;
  58. static LIST_HEAD(lru_list);
  59. static atomic_t lru_count = ATOMIC_INIT(0);
  60. static DEFINE_SPINLOCK(lru_lock);
  61. #define GFS2_GL_HASH_SHIFT 15
  62. #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
  63. #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
  64. static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
  65. static struct dentry *gfs2_root;
  66. /*
  67. * Despite what you might think, the numbers below are not arbitrary :-)
  68. * They are taken from the ipv4 routing hash code, which is well tested
  69. * and thus should be nearly optimal. Later on we might tweek the numbers
  70. * but for now this should be fine.
  71. *
  72. * The reason for putting the locks in a separate array from the list heads
  73. * is that we can have fewer locks than list heads and save memory. We use
  74. * the same hash function for both, but with a different hash mask.
  75. */
  76. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
  77. defined(CONFIG_PROVE_LOCKING)
  78. #ifdef CONFIG_LOCKDEP
  79. # define GL_HASH_LOCK_SZ 256
  80. #else
  81. # if NR_CPUS >= 32
  82. # define GL_HASH_LOCK_SZ 4096
  83. # elif NR_CPUS >= 16
  84. # define GL_HASH_LOCK_SZ 2048
  85. # elif NR_CPUS >= 8
  86. # define GL_HASH_LOCK_SZ 1024
  87. # elif NR_CPUS >= 4
  88. # define GL_HASH_LOCK_SZ 512
  89. # else
  90. # define GL_HASH_LOCK_SZ 256
  91. # endif
  92. #endif
  93. /* We never want more locks than chains */
  94. #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
  95. # undef GL_HASH_LOCK_SZ
  96. # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
  97. #endif
  98. static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
  99. static inline rwlock_t *gl_lock_addr(unsigned int x)
  100. {
  101. return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
  102. }
  103. #else /* not SMP, so no spinlocks required */
  104. static inline rwlock_t *gl_lock_addr(unsigned int x)
  105. {
  106. return NULL;
  107. }
  108. #endif
  109. /**
  110. * gl_hash() - Turn glock number into hash bucket number
  111. * @lock: The glock number
  112. *
  113. * Returns: The number of the corresponding hash bucket
  114. */
  115. static unsigned int gl_hash(const struct gfs2_sbd *sdp,
  116. const struct lm_lockname *name)
  117. {
  118. unsigned int h;
  119. h = jhash(&name->ln_number, sizeof(u64), 0);
  120. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  121. h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
  122. h &= GFS2_GL_HASH_MASK;
  123. return h;
  124. }
  125. /**
  126. * glock_free() - Perform a few checks and then release struct gfs2_glock
  127. * @gl: The glock to release
  128. *
  129. * Also calls lock module to release its internal structure for this glock.
  130. *
  131. */
  132. static void glock_free(struct gfs2_glock *gl)
  133. {
  134. struct gfs2_sbd *sdp = gl->gl_sbd;
  135. struct address_space *mapping = gfs2_glock2aspace(gl);
  136. struct kmem_cache *cachep = gfs2_glock_cachep;
  137. GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
  138. trace_gfs2_glock_put(gl);
  139. if (mapping)
  140. cachep = gfs2_glock_aspace_cachep;
  141. sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
  142. }
  143. /**
  144. * gfs2_glock_hold() - increment reference count on glock
  145. * @gl: The glock to hold
  146. *
  147. */
  148. void gfs2_glock_hold(struct gfs2_glock *gl)
  149. {
  150. GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
  151. atomic_inc(&gl->gl_ref);
  152. }
  153. /**
  154. * demote_ok - Check to see if it's ok to unlock a glock
  155. * @gl: the glock
  156. *
  157. * Returns: 1 if it's ok
  158. */
  159. static int demote_ok(const struct gfs2_glock *gl)
  160. {
  161. const struct gfs2_glock_operations *glops = gl->gl_ops;
  162. if (gl->gl_state == LM_ST_UNLOCKED)
  163. return 0;
  164. if (!list_empty(&gl->gl_holders))
  165. return 0;
  166. if (glops->go_demote_ok)
  167. return glops->go_demote_ok(gl);
  168. return 1;
  169. }
  170. /**
  171. * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  172. * @gl: the glock
  173. *
  174. */
  175. static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  176. {
  177. int may_reclaim;
  178. may_reclaim = (demote_ok(gl) &&
  179. (atomic_read(&gl->gl_ref) == 1 ||
  180. (gl->gl_name.ln_type == LM_TYPE_INODE &&
  181. atomic_read(&gl->gl_ref) <= 2)));
  182. spin_lock(&lru_lock);
  183. if (list_empty(&gl->gl_lru) && may_reclaim) {
  184. list_add_tail(&gl->gl_lru, &lru_list);
  185. atomic_inc(&lru_count);
  186. }
  187. spin_unlock(&lru_lock);
  188. }
  189. /**
  190. * gfs2_glock_put_nolock() - Decrement reference count on glock
  191. * @gl: The glock to put
  192. *
  193. * This function should only be used if the caller has its own reference
  194. * to the glock, in addition to the one it is dropping.
  195. */
  196. void gfs2_glock_put_nolock(struct gfs2_glock *gl)
  197. {
  198. if (atomic_dec_and_test(&gl->gl_ref))
  199. GLOCK_BUG_ON(gl, 1);
  200. gfs2_glock_schedule_for_reclaim(gl);
  201. }
  202. /**
  203. * gfs2_glock_put() - Decrement reference count on glock
  204. * @gl: The glock to put
  205. *
  206. */
  207. int gfs2_glock_put(struct gfs2_glock *gl)
  208. {
  209. int rv = 0;
  210. write_lock(gl_lock_addr(gl->gl_hash));
  211. if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
  212. hlist_del(&gl->gl_list);
  213. if (!list_empty(&gl->gl_lru)) {
  214. list_del_init(&gl->gl_lru);
  215. atomic_dec(&lru_count);
  216. }
  217. spin_unlock(&lru_lock);
  218. write_unlock(gl_lock_addr(gl->gl_hash));
  219. GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
  220. glock_free(gl);
  221. rv = 1;
  222. goto out;
  223. }
  224. spin_lock(&gl->gl_spin);
  225. gfs2_glock_schedule_for_reclaim(gl);
  226. spin_unlock(&gl->gl_spin);
  227. write_unlock(gl_lock_addr(gl->gl_hash));
  228. out:
  229. return rv;
  230. }
  231. /**
  232. * search_bucket() - Find struct gfs2_glock by lock number
  233. * @bucket: the bucket to search
  234. * @name: The lock name
  235. *
  236. * Returns: NULL, or the struct gfs2_glock with the requested number
  237. */
  238. static struct gfs2_glock *search_bucket(unsigned int hash,
  239. const struct gfs2_sbd *sdp,
  240. const struct lm_lockname *name)
  241. {
  242. struct gfs2_glock *gl;
  243. struct hlist_node *h;
  244. hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
  245. if (!lm_name_equal(&gl->gl_name, name))
  246. continue;
  247. if (gl->gl_sbd != sdp)
  248. continue;
  249. atomic_inc(&gl->gl_ref);
  250. return gl;
  251. }
  252. return NULL;
  253. }
  254. /**
  255. * may_grant - check if its ok to grant a new lock
  256. * @gl: The glock
  257. * @gh: The lock request which we wish to grant
  258. *
  259. * Returns: true if its ok to grant the lock
  260. */
  261. static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
  262. {
  263. const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
  264. if ((gh->gh_state == LM_ST_EXCLUSIVE ||
  265. gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
  266. return 0;
  267. if (gl->gl_state == gh->gh_state)
  268. return 1;
  269. if (gh->gh_flags & GL_EXACT)
  270. return 0;
  271. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  272. if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
  273. return 1;
  274. if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
  275. return 1;
  276. }
  277. if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
  278. return 1;
  279. return 0;
  280. }
  281. static void gfs2_holder_wake(struct gfs2_holder *gh)
  282. {
  283. clear_bit(HIF_WAIT, &gh->gh_iflags);
  284. smp_mb__after_clear_bit();
  285. wake_up_bit(&gh->gh_iflags, HIF_WAIT);
  286. }
  287. /**
  288. * do_error - Something unexpected has happened during a lock request
  289. *
  290. */
  291. static inline void do_error(struct gfs2_glock *gl, const int ret)
  292. {
  293. struct gfs2_holder *gh, *tmp;
  294. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  295. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  296. continue;
  297. if (ret & LM_OUT_ERROR)
  298. gh->gh_error = -EIO;
  299. else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
  300. gh->gh_error = GLR_TRYFAILED;
  301. else
  302. continue;
  303. list_del_init(&gh->gh_list);
  304. trace_gfs2_glock_queue(gh, 0);
  305. gfs2_holder_wake(gh);
  306. }
  307. }
  308. /**
  309. * do_promote - promote as many requests as possible on the current queue
  310. * @gl: The glock
  311. *
  312. * Returns: 1 if there is a blocked holder at the head of the list, or 2
  313. * if a type specific operation is underway.
  314. */
  315. static int do_promote(struct gfs2_glock *gl)
  316. __releases(&gl->gl_spin)
  317. __acquires(&gl->gl_spin)
  318. {
  319. const struct gfs2_glock_operations *glops = gl->gl_ops;
  320. struct gfs2_holder *gh, *tmp;
  321. int ret;
  322. restart:
  323. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  324. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  325. continue;
  326. if (may_grant(gl, gh)) {
  327. if (gh->gh_list.prev == &gl->gl_holders &&
  328. glops->go_lock) {
  329. spin_unlock(&gl->gl_spin);
  330. /* FIXME: eliminate this eventually */
  331. ret = glops->go_lock(gh);
  332. spin_lock(&gl->gl_spin);
  333. if (ret) {
  334. if (ret == 1)
  335. return 2;
  336. gh->gh_error = ret;
  337. list_del_init(&gh->gh_list);
  338. trace_gfs2_glock_queue(gh, 0);
  339. gfs2_holder_wake(gh);
  340. goto restart;
  341. }
  342. set_bit(HIF_HOLDER, &gh->gh_iflags);
  343. trace_gfs2_promote(gh, 1);
  344. gfs2_holder_wake(gh);
  345. goto restart;
  346. }
  347. set_bit(HIF_HOLDER, &gh->gh_iflags);
  348. trace_gfs2_promote(gh, 0);
  349. gfs2_holder_wake(gh);
  350. continue;
  351. }
  352. if (gh->gh_list.prev == &gl->gl_holders)
  353. return 1;
  354. do_error(gl, 0);
  355. break;
  356. }
  357. return 0;
  358. }
  359. /**
  360. * find_first_waiter - find the first gh that's waiting for the glock
  361. * @gl: the glock
  362. */
  363. static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
  364. {
  365. struct gfs2_holder *gh;
  366. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  367. if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
  368. return gh;
  369. }
  370. return NULL;
  371. }
  372. /**
  373. * state_change - record that the glock is now in a different state
  374. * @gl: the glock
  375. * @new_state the new state
  376. *
  377. */
  378. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  379. {
  380. int held1, held2;
  381. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  382. held2 = (new_state != LM_ST_UNLOCKED);
  383. if (held1 != held2) {
  384. if (held2)
  385. gfs2_glock_hold(gl);
  386. else
  387. gfs2_glock_put_nolock(gl);
  388. }
  389. if (held1 && held2 && list_empty(&gl->gl_holders))
  390. clear_bit(GLF_QUEUED, &gl->gl_flags);
  391. gl->gl_state = new_state;
  392. gl->gl_tchange = jiffies;
  393. }
  394. static void gfs2_demote_wake(struct gfs2_glock *gl)
  395. {
  396. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  397. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  398. smp_mb__after_clear_bit();
  399. wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
  400. }
  401. /**
  402. * finish_xmote - The DLM has replied to one of our lock requests
  403. * @gl: The glock
  404. * @ret: The status from the DLM
  405. *
  406. */
  407. static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
  408. {
  409. const struct gfs2_glock_operations *glops = gl->gl_ops;
  410. struct gfs2_holder *gh;
  411. unsigned state = ret & LM_OUT_ST_MASK;
  412. int rv;
  413. spin_lock(&gl->gl_spin);
  414. trace_gfs2_glock_state_change(gl, state);
  415. state_change(gl, state);
  416. gh = find_first_waiter(gl);
  417. /* Demote to UN request arrived during demote to SH or DF */
  418. if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
  419. state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
  420. gl->gl_target = LM_ST_UNLOCKED;
  421. /* Check for state != intended state */
  422. if (unlikely(state != gl->gl_target)) {
  423. if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
  424. /* move to back of queue and try next entry */
  425. if (ret & LM_OUT_CANCELED) {
  426. if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
  427. list_move_tail(&gh->gh_list, &gl->gl_holders);
  428. gh = find_first_waiter(gl);
  429. gl->gl_target = gh->gh_state;
  430. goto retry;
  431. }
  432. /* Some error or failed "try lock" - report it */
  433. if ((ret & LM_OUT_ERROR) ||
  434. (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  435. gl->gl_target = gl->gl_state;
  436. do_error(gl, ret);
  437. goto out;
  438. }
  439. }
  440. switch(state) {
  441. /* Unlocked due to conversion deadlock, try again */
  442. case LM_ST_UNLOCKED:
  443. retry:
  444. do_xmote(gl, gh, gl->gl_target);
  445. break;
  446. /* Conversion fails, unlock and try again */
  447. case LM_ST_SHARED:
  448. case LM_ST_DEFERRED:
  449. do_xmote(gl, gh, LM_ST_UNLOCKED);
  450. break;
  451. default: /* Everything else */
  452. printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
  453. GLOCK_BUG_ON(gl, 1);
  454. }
  455. spin_unlock(&gl->gl_spin);
  456. return;
  457. }
  458. /* Fast path - we got what we asked for */
  459. if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
  460. gfs2_demote_wake(gl);
  461. if (state != LM_ST_UNLOCKED) {
  462. if (glops->go_xmote_bh) {
  463. spin_unlock(&gl->gl_spin);
  464. rv = glops->go_xmote_bh(gl, gh);
  465. spin_lock(&gl->gl_spin);
  466. if (rv) {
  467. do_error(gl, rv);
  468. goto out;
  469. }
  470. }
  471. rv = do_promote(gl);
  472. if (rv == 2)
  473. goto out_locked;
  474. }
  475. out:
  476. clear_bit(GLF_LOCK, &gl->gl_flags);
  477. out_locked:
  478. spin_unlock(&gl->gl_spin);
  479. }
  480. /**
  481. * do_xmote - Calls the DLM to change the state of a lock
  482. * @gl: The lock state
  483. * @gh: The holder (only for promotes)
  484. * @target: The target lock state
  485. *
  486. */
  487. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
  488. __releases(&gl->gl_spin)
  489. __acquires(&gl->gl_spin)
  490. {
  491. const struct gfs2_glock_operations *glops = gl->gl_ops;
  492. struct gfs2_sbd *sdp = gl->gl_sbd;
  493. unsigned int lck_flags = gh ? gh->gh_flags : 0;
  494. int ret;
  495. lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
  496. LM_FLAG_PRIORITY);
  497. GLOCK_BUG_ON(gl, gl->gl_state == target);
  498. GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
  499. if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
  500. glops->go_inval) {
  501. set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  502. do_error(gl, 0); /* Fail queued try locks */
  503. }
  504. spin_unlock(&gl->gl_spin);
  505. if (glops->go_xmote_th)
  506. glops->go_xmote_th(gl);
  507. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  508. glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
  509. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  510. gfs2_glock_hold(gl);
  511. if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
  512. gl->gl_state == LM_ST_DEFERRED) &&
  513. !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  514. lck_flags |= LM_FLAG_TRY_1CB;
  515. if (sdp->sd_lockstruct.ls_ops->lm_lock) {
  516. /* lock_dlm */
  517. ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
  518. GLOCK_BUG_ON(gl, ret);
  519. } else { /* lock_nolock */
  520. finish_xmote(gl, target);
  521. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  522. gfs2_glock_put(gl);
  523. }
  524. spin_lock(&gl->gl_spin);
  525. }
  526. /**
  527. * find_first_holder - find the first "holder" gh
  528. * @gl: the glock
  529. */
  530. static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
  531. {
  532. struct gfs2_holder *gh;
  533. if (!list_empty(&gl->gl_holders)) {
  534. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  535. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  536. return gh;
  537. }
  538. return NULL;
  539. }
  540. /**
  541. * run_queue - do all outstanding tasks related to a glock
  542. * @gl: The glock in question
  543. * @nonblock: True if we must not block in run_queue
  544. *
  545. */
  546. static void run_queue(struct gfs2_glock *gl, const int nonblock)
  547. __releases(&gl->gl_spin)
  548. __acquires(&gl->gl_spin)
  549. {
  550. struct gfs2_holder *gh = NULL;
  551. int ret;
  552. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  553. return;
  554. GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
  555. if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  556. gl->gl_demote_state != gl->gl_state) {
  557. if (find_first_holder(gl))
  558. goto out_unlock;
  559. if (nonblock)
  560. goto out_sched;
  561. set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
  562. GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
  563. gl->gl_target = gl->gl_demote_state;
  564. } else {
  565. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  566. gfs2_demote_wake(gl);
  567. ret = do_promote(gl);
  568. if (ret == 0)
  569. goto out_unlock;
  570. if (ret == 2)
  571. goto out;
  572. gh = find_first_waiter(gl);
  573. gl->gl_target = gh->gh_state;
  574. if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  575. do_error(gl, 0); /* Fail queued try locks */
  576. }
  577. do_xmote(gl, gh, gl->gl_target);
  578. out:
  579. return;
  580. out_sched:
  581. clear_bit(GLF_LOCK, &gl->gl_flags);
  582. smp_mb__after_clear_bit();
  583. gfs2_glock_hold(gl);
  584. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  585. gfs2_glock_put_nolock(gl);
  586. return;
  587. out_unlock:
  588. clear_bit(GLF_LOCK, &gl->gl_flags);
  589. smp_mb__after_clear_bit();
  590. return;
  591. }
  592. static void delete_work_func(struct work_struct *work)
  593. {
  594. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
  595. struct gfs2_sbd *sdp = gl->gl_sbd;
  596. struct gfs2_inode *ip;
  597. struct inode *inode;
  598. u64 no_addr = gl->gl_name.ln_number;
  599. ip = gl->gl_object;
  600. /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
  601. if (ip)
  602. inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
  603. else
  604. inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
  605. if (inode && !IS_ERR(inode)) {
  606. d_prune_aliases(inode);
  607. iput(inode);
  608. }
  609. gfs2_glock_put(gl);
  610. }
  611. static void glock_work_func(struct work_struct *work)
  612. {
  613. unsigned long delay = 0;
  614. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
  615. int drop_ref = 0;
  616. if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
  617. finish_xmote(gl, gl->gl_reply);
  618. drop_ref = 1;
  619. }
  620. spin_lock(&gl->gl_spin);
  621. if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  622. gl->gl_state != LM_ST_UNLOCKED &&
  623. gl->gl_demote_state != LM_ST_EXCLUSIVE) {
  624. unsigned long holdtime, now = jiffies;
  625. holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
  626. if (time_before(now, holdtime))
  627. delay = holdtime - now;
  628. set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
  629. }
  630. run_queue(gl, 0);
  631. spin_unlock(&gl->gl_spin);
  632. if (!delay ||
  633. queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  634. gfs2_glock_put(gl);
  635. if (drop_ref)
  636. gfs2_glock_put(gl);
  637. }
  638. /**
  639. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  640. * @sdp: The GFS2 superblock
  641. * @number: the lock number
  642. * @glops: The glock_operations to use
  643. * @create: If 0, don't create the glock if it doesn't exist
  644. * @glp: the glock is returned here
  645. *
  646. * This does not lock a glock, just finds/creates structures for one.
  647. *
  648. * Returns: errno
  649. */
  650. int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
  651. const struct gfs2_glock_operations *glops, int create,
  652. struct gfs2_glock **glp)
  653. {
  654. struct super_block *s = sdp->sd_vfs;
  655. struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
  656. struct gfs2_glock *gl, *tmp;
  657. unsigned int hash = gl_hash(sdp, &name);
  658. struct address_space *mapping;
  659. read_lock(gl_lock_addr(hash));
  660. gl = search_bucket(hash, sdp, &name);
  661. read_unlock(gl_lock_addr(hash));
  662. *glp = gl;
  663. if (gl)
  664. return 0;
  665. if (!create)
  666. return -ENOENT;
  667. if (glops->go_flags & GLOF_ASPACE)
  668. gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
  669. else
  670. gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
  671. if (!gl)
  672. return -ENOMEM;
  673. atomic_inc(&sdp->sd_glock_disposal);
  674. gl->gl_flags = 0;
  675. gl->gl_name = name;
  676. atomic_set(&gl->gl_ref, 1);
  677. gl->gl_state = LM_ST_UNLOCKED;
  678. gl->gl_target = LM_ST_UNLOCKED;
  679. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  680. gl->gl_hash = hash;
  681. gl->gl_ops = glops;
  682. snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
  683. memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
  684. gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
  685. gl->gl_tchange = jiffies;
  686. gl->gl_object = NULL;
  687. gl->gl_sbd = sdp;
  688. INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  689. INIT_WORK(&gl->gl_delete, delete_work_func);
  690. mapping = gfs2_glock2aspace(gl);
  691. if (mapping) {
  692. mapping->a_ops = &gfs2_meta_aops;
  693. mapping->host = s->s_bdev->bd_inode;
  694. mapping->flags = 0;
  695. mapping_set_gfp_mask(mapping, GFP_NOFS);
  696. mapping->assoc_mapping = NULL;
  697. mapping->backing_dev_info = s->s_bdi;
  698. mapping->writeback_index = 0;
  699. }
  700. write_lock(gl_lock_addr(hash));
  701. tmp = search_bucket(hash, sdp, &name);
  702. if (tmp) {
  703. write_unlock(gl_lock_addr(hash));
  704. glock_free(gl);
  705. gl = tmp;
  706. } else {
  707. hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
  708. write_unlock(gl_lock_addr(hash));
  709. }
  710. *glp = gl;
  711. return 0;
  712. }
  713. /**
  714. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  715. * @gl: the glock
  716. * @state: the state we're requesting
  717. * @flags: the modifier flags
  718. * @gh: the holder structure
  719. *
  720. */
  721. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  722. struct gfs2_holder *gh)
  723. {
  724. INIT_LIST_HEAD(&gh->gh_list);
  725. gh->gh_gl = gl;
  726. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  727. gh->gh_owner_pid = get_pid(task_pid(current));
  728. gh->gh_state = state;
  729. gh->gh_flags = flags;
  730. gh->gh_error = 0;
  731. gh->gh_iflags = 0;
  732. gfs2_glock_hold(gl);
  733. }
  734. /**
  735. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  736. * @state: the state we're requesting
  737. * @flags: the modifier flags
  738. * @gh: the holder structure
  739. *
  740. * Don't mess with the glock.
  741. *
  742. */
  743. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  744. {
  745. gh->gh_state = state;
  746. gh->gh_flags = flags;
  747. gh->gh_iflags = 0;
  748. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  749. if (gh->gh_owner_pid)
  750. put_pid(gh->gh_owner_pid);
  751. gh->gh_owner_pid = get_pid(task_pid(current));
  752. }
  753. /**
  754. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  755. * @gh: the holder structure
  756. *
  757. */
  758. void gfs2_holder_uninit(struct gfs2_holder *gh)
  759. {
  760. put_pid(gh->gh_owner_pid);
  761. gfs2_glock_put(gh->gh_gl);
  762. gh->gh_gl = NULL;
  763. gh->gh_ip = 0;
  764. }
  765. /**
  766. * gfs2_glock_holder_wait
  767. * @word: unused
  768. *
  769. * This function and gfs2_glock_demote_wait both show up in the WCHAN
  770. * field. Thus I've separated these otherwise identical functions in
  771. * order to be more informative to the user.
  772. */
  773. static int gfs2_glock_holder_wait(void *word)
  774. {
  775. schedule();
  776. return 0;
  777. }
  778. static int gfs2_glock_demote_wait(void *word)
  779. {
  780. schedule();
  781. return 0;
  782. }
  783. static void wait_on_holder(struct gfs2_holder *gh)
  784. {
  785. might_sleep();
  786. wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
  787. }
  788. static void wait_on_demote(struct gfs2_glock *gl)
  789. {
  790. might_sleep();
  791. wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
  792. }
  793. /**
  794. * handle_callback - process a demote request
  795. * @gl: the glock
  796. * @state: the state the caller wants us to change to
  797. *
  798. * There are only two requests that we are going to see in actual
  799. * practise: LM_ST_SHARED and LM_ST_UNLOCKED
  800. */
  801. static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  802. unsigned long delay)
  803. {
  804. int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
  805. set_bit(bit, &gl->gl_flags);
  806. if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
  807. gl->gl_demote_state = state;
  808. gl->gl_demote_time = jiffies;
  809. } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
  810. gl->gl_demote_state != state) {
  811. gl->gl_demote_state = LM_ST_UNLOCKED;
  812. }
  813. if (gl->gl_ops->go_callback)
  814. gl->gl_ops->go_callback(gl);
  815. trace_gfs2_demote_rq(gl);
  816. }
  817. /**
  818. * gfs2_glock_wait - wait on a glock acquisition
  819. * @gh: the glock holder
  820. *
  821. * Returns: 0 on success
  822. */
  823. int gfs2_glock_wait(struct gfs2_holder *gh)
  824. {
  825. wait_on_holder(gh);
  826. return gh->gh_error;
  827. }
  828. void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
  829. {
  830. struct va_format vaf;
  831. va_list args;
  832. va_start(args, fmt);
  833. if (seq) {
  834. struct gfs2_glock_iter *gi = seq->private;
  835. vsprintf(gi->string, fmt, args);
  836. seq_printf(seq, gi->string);
  837. } else {
  838. vaf.fmt = fmt;
  839. vaf.va = &args;
  840. printk(KERN_ERR " %pV", &vaf);
  841. }
  842. va_end(args);
  843. }
  844. /**
  845. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  846. * @gh: the holder structure to add
  847. *
  848. * Eventually we should move the recursive locking trap to a
  849. * debugging option or something like that. This is the fast
  850. * path and needs to have the minimum number of distractions.
  851. *
  852. */
  853. static inline void add_to_queue(struct gfs2_holder *gh)
  854. __releases(&gl->gl_spin)
  855. __acquires(&gl->gl_spin)
  856. {
  857. struct gfs2_glock *gl = gh->gh_gl;
  858. struct gfs2_sbd *sdp = gl->gl_sbd;
  859. struct list_head *insert_pt = NULL;
  860. struct gfs2_holder *gh2;
  861. int try_lock = 0;
  862. BUG_ON(gh->gh_owner_pid == NULL);
  863. if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
  864. BUG();
  865. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  866. if (test_bit(GLF_LOCK, &gl->gl_flags))
  867. try_lock = 1;
  868. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  869. goto fail;
  870. }
  871. list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
  872. if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
  873. (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
  874. goto trap_recursive;
  875. if (try_lock &&
  876. !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
  877. !may_grant(gl, gh)) {
  878. fail:
  879. gh->gh_error = GLR_TRYFAILED;
  880. gfs2_holder_wake(gh);
  881. return;
  882. }
  883. if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
  884. continue;
  885. if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
  886. insert_pt = &gh2->gh_list;
  887. }
  888. set_bit(GLF_QUEUED, &gl->gl_flags);
  889. if (likely(insert_pt == NULL)) {
  890. list_add_tail(&gh->gh_list, &gl->gl_holders);
  891. if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
  892. goto do_cancel;
  893. return;
  894. }
  895. trace_gfs2_glock_queue(gh, 1);
  896. list_add_tail(&gh->gh_list, insert_pt);
  897. do_cancel:
  898. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  899. if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
  900. spin_unlock(&gl->gl_spin);
  901. if (sdp->sd_lockstruct.ls_ops->lm_cancel)
  902. sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
  903. spin_lock(&gl->gl_spin);
  904. }
  905. return;
  906. trap_recursive:
  907. print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
  908. printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
  909. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  910. gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
  911. print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
  912. printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
  913. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  914. gh->gh_gl->gl_name.ln_type, gh->gh_state);
  915. __dump_glock(NULL, gl);
  916. BUG();
  917. }
  918. /**
  919. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  920. * @gh: the holder structure
  921. *
  922. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  923. *
  924. * Returns: 0, GLR_TRYFAILED, or errno on failure
  925. */
  926. int gfs2_glock_nq(struct gfs2_holder *gh)
  927. {
  928. struct gfs2_glock *gl = gh->gh_gl;
  929. struct gfs2_sbd *sdp = gl->gl_sbd;
  930. int error = 0;
  931. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  932. return -EIO;
  933. spin_lock(&gl->gl_spin);
  934. add_to_queue(gh);
  935. if ((LM_FLAG_NOEXP & gh->gh_flags) &&
  936. test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  937. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  938. run_queue(gl, 1);
  939. spin_unlock(&gl->gl_spin);
  940. if (!(gh->gh_flags & GL_ASYNC))
  941. error = gfs2_glock_wait(gh);
  942. return error;
  943. }
  944. /**
  945. * gfs2_glock_poll - poll to see if an async request has been completed
  946. * @gh: the holder
  947. *
  948. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  949. */
  950. int gfs2_glock_poll(struct gfs2_holder *gh)
  951. {
  952. return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
  953. }
  954. /**
  955. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  956. * @gh: the glock holder
  957. *
  958. */
  959. void gfs2_glock_dq(struct gfs2_holder *gh)
  960. {
  961. struct gfs2_glock *gl = gh->gh_gl;
  962. const struct gfs2_glock_operations *glops = gl->gl_ops;
  963. unsigned delay = 0;
  964. int fast_path = 0;
  965. spin_lock(&gl->gl_spin);
  966. if (gh->gh_flags & GL_NOCACHE)
  967. handle_callback(gl, LM_ST_UNLOCKED, 0);
  968. list_del_init(&gh->gh_list);
  969. if (find_first_holder(gl) == NULL) {
  970. if (glops->go_unlock) {
  971. GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
  972. spin_unlock(&gl->gl_spin);
  973. glops->go_unlock(gh);
  974. spin_lock(&gl->gl_spin);
  975. clear_bit(GLF_LOCK, &gl->gl_flags);
  976. }
  977. if (list_empty(&gl->gl_holders) &&
  978. !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  979. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  980. fast_path = 1;
  981. }
  982. trace_gfs2_glock_queue(gh, 0);
  983. spin_unlock(&gl->gl_spin);
  984. if (likely(fast_path))
  985. return;
  986. gfs2_glock_hold(gl);
  987. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  988. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  989. delay = gl->gl_ops->go_min_hold_time;
  990. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  991. gfs2_glock_put(gl);
  992. }
  993. void gfs2_glock_dq_wait(struct gfs2_holder *gh)
  994. {
  995. struct gfs2_glock *gl = gh->gh_gl;
  996. gfs2_glock_dq(gh);
  997. wait_on_demote(gl);
  998. }
  999. /**
  1000. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1001. * @gh: the holder structure
  1002. *
  1003. */
  1004. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1005. {
  1006. gfs2_glock_dq(gh);
  1007. gfs2_holder_uninit(gh);
  1008. }
  1009. /**
  1010. * gfs2_glock_nq_num - acquire a glock based on lock number
  1011. * @sdp: the filesystem
  1012. * @number: the lock number
  1013. * @glops: the glock operations for the type of glock
  1014. * @state: the state to acquire the glock in
  1015. * @flags: modifier flags for the aquisition
  1016. * @gh: the struct gfs2_holder
  1017. *
  1018. * Returns: errno
  1019. */
  1020. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
  1021. const struct gfs2_glock_operations *glops,
  1022. unsigned int state, int flags, struct gfs2_holder *gh)
  1023. {
  1024. struct gfs2_glock *gl;
  1025. int error;
  1026. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1027. if (!error) {
  1028. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1029. gfs2_glock_put(gl);
  1030. }
  1031. return error;
  1032. }
  1033. /**
  1034. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1035. * @arg_a: the first structure
  1036. * @arg_b: the second structure
  1037. *
  1038. */
  1039. static int glock_compare(const void *arg_a, const void *arg_b)
  1040. {
  1041. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1042. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1043. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1044. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1045. if (a->ln_number > b->ln_number)
  1046. return 1;
  1047. if (a->ln_number < b->ln_number)
  1048. return -1;
  1049. BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
  1050. return 0;
  1051. }
  1052. /**
  1053. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1054. * @num_gh: the number of structures
  1055. * @ghs: an array of struct gfs2_holder structures
  1056. *
  1057. * Returns: 0 on success (all glocks acquired),
  1058. * errno on failure (no glocks acquired)
  1059. */
  1060. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1061. struct gfs2_holder **p)
  1062. {
  1063. unsigned int x;
  1064. int error = 0;
  1065. for (x = 0; x < num_gh; x++)
  1066. p[x] = &ghs[x];
  1067. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1068. for (x = 0; x < num_gh; x++) {
  1069. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1070. error = gfs2_glock_nq(p[x]);
  1071. if (error) {
  1072. while (x--)
  1073. gfs2_glock_dq(p[x]);
  1074. break;
  1075. }
  1076. }
  1077. return error;
  1078. }
  1079. /**
  1080. * gfs2_glock_nq_m - acquire multiple glocks
  1081. * @num_gh: the number of structures
  1082. * @ghs: an array of struct gfs2_holder structures
  1083. *
  1084. *
  1085. * Returns: 0 on success (all glocks acquired),
  1086. * errno on failure (no glocks acquired)
  1087. */
  1088. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1089. {
  1090. struct gfs2_holder *tmp[4];
  1091. struct gfs2_holder **pph = tmp;
  1092. int error = 0;
  1093. switch(num_gh) {
  1094. case 0:
  1095. return 0;
  1096. case 1:
  1097. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1098. return gfs2_glock_nq(ghs);
  1099. default:
  1100. if (num_gh <= 4)
  1101. break;
  1102. pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
  1103. if (!pph)
  1104. return -ENOMEM;
  1105. }
  1106. error = nq_m_sync(num_gh, ghs, pph);
  1107. if (pph != tmp)
  1108. kfree(pph);
  1109. return error;
  1110. }
  1111. /**
  1112. * gfs2_glock_dq_m - release multiple glocks
  1113. * @num_gh: the number of structures
  1114. * @ghs: an array of struct gfs2_holder structures
  1115. *
  1116. */
  1117. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1118. {
  1119. unsigned int x;
  1120. for (x = 0; x < num_gh; x++)
  1121. gfs2_glock_dq(&ghs[x]);
  1122. }
  1123. /**
  1124. * gfs2_glock_dq_uninit_m - release multiple glocks
  1125. * @num_gh: the number of structures
  1126. * @ghs: an array of struct gfs2_holder structures
  1127. *
  1128. */
  1129. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1130. {
  1131. unsigned int x;
  1132. for (x = 0; x < num_gh; x++)
  1133. gfs2_glock_dq_uninit(&ghs[x]);
  1134. }
  1135. void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
  1136. {
  1137. unsigned long delay = 0;
  1138. unsigned long holdtime;
  1139. unsigned long now = jiffies;
  1140. gfs2_glock_hold(gl);
  1141. holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
  1142. if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
  1143. if (time_before(now, holdtime))
  1144. delay = holdtime - now;
  1145. if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
  1146. delay = gl->gl_ops->go_min_hold_time;
  1147. }
  1148. spin_lock(&gl->gl_spin);
  1149. handle_callback(gl, state, delay);
  1150. spin_unlock(&gl->gl_spin);
  1151. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  1152. gfs2_glock_put(gl);
  1153. }
  1154. /**
  1155. * gfs2_should_freeze - Figure out if glock should be frozen
  1156. * @gl: The glock in question
  1157. *
  1158. * Glocks are not frozen if (a) the result of the dlm operation is
  1159. * an error, (b) the locking operation was an unlock operation or
  1160. * (c) if there is a "noexp" flagged request anywhere in the queue
  1161. *
  1162. * Returns: 1 if freezing should occur, 0 otherwise
  1163. */
  1164. static int gfs2_should_freeze(const struct gfs2_glock *gl)
  1165. {
  1166. const struct gfs2_holder *gh;
  1167. if (gl->gl_reply & ~LM_OUT_ST_MASK)
  1168. return 0;
  1169. if (gl->gl_target == LM_ST_UNLOCKED)
  1170. return 0;
  1171. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1172. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1173. continue;
  1174. if (LM_FLAG_NOEXP & gh->gh_flags)
  1175. return 0;
  1176. }
  1177. return 1;
  1178. }
  1179. /**
  1180. * gfs2_glock_complete - Callback used by locking
  1181. * @gl: Pointer to the glock
  1182. * @ret: The return value from the dlm
  1183. *
  1184. */
  1185. void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
  1186. {
  1187. struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
  1188. gl->gl_reply = ret;
  1189. if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
  1190. spin_lock(&gl->gl_spin);
  1191. if (gfs2_should_freeze(gl)) {
  1192. set_bit(GLF_FROZEN, &gl->gl_flags);
  1193. spin_unlock(&gl->gl_spin);
  1194. return;
  1195. }
  1196. spin_unlock(&gl->gl_spin);
  1197. }
  1198. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1199. gfs2_glock_hold(gl);
  1200. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1201. gfs2_glock_put(gl);
  1202. }
  1203. static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  1204. {
  1205. struct gfs2_glock *gl;
  1206. int may_demote;
  1207. int nr_skipped = 0;
  1208. LIST_HEAD(skipped);
  1209. if (nr == 0)
  1210. goto out;
  1211. if (!(gfp_mask & __GFP_FS))
  1212. return -1;
  1213. spin_lock(&lru_lock);
  1214. while(nr && !list_empty(&lru_list)) {
  1215. gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
  1216. list_del_init(&gl->gl_lru);
  1217. atomic_dec(&lru_count);
  1218. /* Test for being demotable */
  1219. if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
  1220. gfs2_glock_hold(gl);
  1221. spin_unlock(&lru_lock);
  1222. spin_lock(&gl->gl_spin);
  1223. may_demote = demote_ok(gl);
  1224. if (may_demote) {
  1225. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1226. nr--;
  1227. }
  1228. clear_bit(GLF_LOCK, &gl->gl_flags);
  1229. smp_mb__after_clear_bit();
  1230. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1231. gfs2_glock_put_nolock(gl);
  1232. spin_unlock(&gl->gl_spin);
  1233. spin_lock(&lru_lock);
  1234. continue;
  1235. }
  1236. nr_skipped++;
  1237. list_add(&gl->gl_lru, &skipped);
  1238. }
  1239. list_splice(&skipped, &lru_list);
  1240. atomic_add(nr_skipped, &lru_count);
  1241. spin_unlock(&lru_lock);
  1242. out:
  1243. return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
  1244. }
  1245. static struct shrinker glock_shrinker = {
  1246. .shrink = gfs2_shrink_glock_memory,
  1247. .seeks = DEFAULT_SEEKS,
  1248. };
  1249. /**
  1250. * examine_bucket - Call a function for glock in a hash bucket
  1251. * @examiner: the function
  1252. * @sdp: the filesystem
  1253. * @bucket: the bucket
  1254. *
  1255. * Returns: 1 if the bucket has entries
  1256. */
  1257. static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
  1258. unsigned int hash)
  1259. {
  1260. struct gfs2_glock *gl, *prev = NULL;
  1261. int has_entries = 0;
  1262. struct hlist_head *head = &gl_hash_table[hash].hb_list;
  1263. read_lock(gl_lock_addr(hash));
  1264. /* Can't use hlist_for_each_entry - don't want prefetch here */
  1265. if (hlist_empty(head))
  1266. goto out;
  1267. gl = list_entry(head->first, struct gfs2_glock, gl_list);
  1268. while(1) {
  1269. if (!sdp || gl->gl_sbd == sdp) {
  1270. gfs2_glock_hold(gl);
  1271. read_unlock(gl_lock_addr(hash));
  1272. if (prev)
  1273. gfs2_glock_put(prev);
  1274. prev = gl;
  1275. examiner(gl);
  1276. has_entries = 1;
  1277. read_lock(gl_lock_addr(hash));
  1278. }
  1279. if (gl->gl_list.next == NULL)
  1280. break;
  1281. gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
  1282. }
  1283. out:
  1284. read_unlock(gl_lock_addr(hash));
  1285. if (prev)
  1286. gfs2_glock_put(prev);
  1287. cond_resched();
  1288. return has_entries;
  1289. }
  1290. /**
  1291. * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  1292. * @gl: The glock to thaw
  1293. *
  1294. * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
  1295. * so this has to result in the ref count being dropped by one.
  1296. */
  1297. static void thaw_glock(struct gfs2_glock *gl)
  1298. {
  1299. if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  1300. return;
  1301. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1302. gfs2_glock_hold(gl);
  1303. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1304. gfs2_glock_put(gl);
  1305. }
  1306. /**
  1307. * clear_glock - look at a glock and see if we can free it from glock cache
  1308. * @gl: the glock to look at
  1309. *
  1310. */
  1311. static void clear_glock(struct gfs2_glock *gl)
  1312. {
  1313. spin_lock(&lru_lock);
  1314. if (!list_empty(&gl->gl_lru)) {
  1315. list_del_init(&gl->gl_lru);
  1316. atomic_dec(&lru_count);
  1317. }
  1318. spin_unlock(&lru_lock);
  1319. spin_lock(&gl->gl_spin);
  1320. if (gl->gl_state != LM_ST_UNLOCKED)
  1321. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1322. spin_unlock(&gl->gl_spin);
  1323. gfs2_glock_hold(gl);
  1324. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1325. gfs2_glock_put(gl);
  1326. }
  1327. /**
  1328. * gfs2_glock_thaw - Thaw any frozen glocks
  1329. * @sdp: The super block
  1330. *
  1331. */
  1332. void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  1333. {
  1334. unsigned x;
  1335. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1336. examine_bucket(thaw_glock, sdp, x);
  1337. }
  1338. /**
  1339. * gfs2_gl_hash_clear - Empty out the glock hash table
  1340. * @sdp: the filesystem
  1341. * @wait: wait until it's all gone
  1342. *
  1343. * Called when unmounting the filesystem.
  1344. */
  1345. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  1346. {
  1347. unsigned int x;
  1348. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1349. examine_bucket(clear_glock, sdp, x);
  1350. flush_workqueue(glock_workqueue);
  1351. wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
  1352. gfs2_dump_lockstate(sdp);
  1353. }
  1354. void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
  1355. {
  1356. struct gfs2_glock *gl = ip->i_gl;
  1357. int ret;
  1358. ret = gfs2_truncatei_resume(ip);
  1359. gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
  1360. spin_lock(&gl->gl_spin);
  1361. clear_bit(GLF_LOCK, &gl->gl_flags);
  1362. run_queue(gl, 1);
  1363. spin_unlock(&gl->gl_spin);
  1364. }
  1365. static const char *state2str(unsigned state)
  1366. {
  1367. switch(state) {
  1368. case LM_ST_UNLOCKED:
  1369. return "UN";
  1370. case LM_ST_SHARED:
  1371. return "SH";
  1372. case LM_ST_DEFERRED:
  1373. return "DF";
  1374. case LM_ST_EXCLUSIVE:
  1375. return "EX";
  1376. }
  1377. return "??";
  1378. }
  1379. static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
  1380. {
  1381. char *p = buf;
  1382. if (flags & LM_FLAG_TRY)
  1383. *p++ = 't';
  1384. if (flags & LM_FLAG_TRY_1CB)
  1385. *p++ = 'T';
  1386. if (flags & LM_FLAG_NOEXP)
  1387. *p++ = 'e';
  1388. if (flags & LM_FLAG_ANY)
  1389. *p++ = 'A';
  1390. if (flags & LM_FLAG_PRIORITY)
  1391. *p++ = 'p';
  1392. if (flags & GL_ASYNC)
  1393. *p++ = 'a';
  1394. if (flags & GL_EXACT)
  1395. *p++ = 'E';
  1396. if (flags & GL_NOCACHE)
  1397. *p++ = 'c';
  1398. if (test_bit(HIF_HOLDER, &iflags))
  1399. *p++ = 'H';
  1400. if (test_bit(HIF_WAIT, &iflags))
  1401. *p++ = 'W';
  1402. if (test_bit(HIF_FIRST, &iflags))
  1403. *p++ = 'F';
  1404. *p = 0;
  1405. return buf;
  1406. }
  1407. /**
  1408. * dump_holder - print information about a glock holder
  1409. * @seq: the seq_file struct
  1410. * @gh: the glock holder
  1411. *
  1412. * Returns: 0 on success, -ENOBUFS when we run out of space
  1413. */
  1414. static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
  1415. {
  1416. struct task_struct *gh_owner = NULL;
  1417. char flags_buf[32];
  1418. if (gh->gh_owner_pid)
  1419. gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
  1420. gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
  1421. state2str(gh->gh_state),
  1422. hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
  1423. gh->gh_error,
  1424. gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
  1425. gh_owner ? gh_owner->comm : "(ended)",
  1426. (void *)gh->gh_ip);
  1427. return 0;
  1428. }
  1429. static const char *gflags2str(char *buf, const unsigned long *gflags)
  1430. {
  1431. char *p = buf;
  1432. if (test_bit(GLF_LOCK, gflags))
  1433. *p++ = 'l';
  1434. if (test_bit(GLF_DEMOTE, gflags))
  1435. *p++ = 'D';
  1436. if (test_bit(GLF_PENDING_DEMOTE, gflags))
  1437. *p++ = 'd';
  1438. if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
  1439. *p++ = 'p';
  1440. if (test_bit(GLF_DIRTY, gflags))
  1441. *p++ = 'y';
  1442. if (test_bit(GLF_LFLUSH, gflags))
  1443. *p++ = 'f';
  1444. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
  1445. *p++ = 'i';
  1446. if (test_bit(GLF_REPLY_PENDING, gflags))
  1447. *p++ = 'r';
  1448. if (test_bit(GLF_INITIAL, gflags))
  1449. *p++ = 'I';
  1450. if (test_bit(GLF_FROZEN, gflags))
  1451. *p++ = 'F';
  1452. if (test_bit(GLF_QUEUED, gflags))
  1453. *p++ = 'q';
  1454. *p = 0;
  1455. return buf;
  1456. }
  1457. /**
  1458. * __dump_glock - print information about a glock
  1459. * @seq: The seq_file struct
  1460. * @gl: the glock
  1461. *
  1462. * The file format is as follows:
  1463. * One line per object, capital letters are used to indicate objects
  1464. * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
  1465. * other objects are indented by a single space and follow the glock to
  1466. * which they are related. Fields are indicated by lower case letters
  1467. * followed by a colon and the field value, except for strings which are in
  1468. * [] so that its possible to see if they are composed of spaces for
  1469. * example. The field's are n = number (id of the object), f = flags,
  1470. * t = type, s = state, r = refcount, e = error, p = pid.
  1471. *
  1472. * Returns: 0 on success, -ENOBUFS when we run out of space
  1473. */
  1474. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
  1475. {
  1476. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1477. unsigned long long dtime;
  1478. const struct gfs2_holder *gh;
  1479. char gflags_buf[32];
  1480. int error = 0;
  1481. dtime = jiffies - gl->gl_demote_time;
  1482. dtime *= 1000000/HZ; /* demote time in uSec */
  1483. if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
  1484. dtime = 0;
  1485. gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
  1486. state2str(gl->gl_state),
  1487. gl->gl_name.ln_type,
  1488. (unsigned long long)gl->gl_name.ln_number,
  1489. gflags2str(gflags_buf, &gl->gl_flags),
  1490. state2str(gl->gl_target),
  1491. state2str(gl->gl_demote_state), dtime,
  1492. atomic_read(&gl->gl_ail_count),
  1493. atomic_read(&gl->gl_ref));
  1494. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1495. error = dump_holder(seq, gh);
  1496. if (error)
  1497. goto out;
  1498. }
  1499. if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
  1500. error = glops->go_dump(seq, gl);
  1501. out:
  1502. return error;
  1503. }
  1504. static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
  1505. {
  1506. int ret;
  1507. spin_lock(&gl->gl_spin);
  1508. ret = __dump_glock(seq, gl);
  1509. spin_unlock(&gl->gl_spin);
  1510. return ret;
  1511. }
  1512. /**
  1513. * gfs2_dump_lockstate - print out the current lockstate
  1514. * @sdp: the filesystem
  1515. * @ub: the buffer to copy the information into
  1516. *
  1517. * If @ub is NULL, dump the lockstate to the console.
  1518. *
  1519. */
  1520. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
  1521. {
  1522. struct gfs2_glock *gl;
  1523. struct hlist_node *h;
  1524. unsigned int x;
  1525. int error = 0;
  1526. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1527. read_lock(gl_lock_addr(x));
  1528. hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
  1529. if (gl->gl_sbd != sdp)
  1530. continue;
  1531. error = dump_glock(NULL, gl);
  1532. if (error)
  1533. break;
  1534. }
  1535. read_unlock(gl_lock_addr(x));
  1536. if (error)
  1537. break;
  1538. }
  1539. return error;
  1540. }
  1541. int __init gfs2_glock_init(void)
  1542. {
  1543. unsigned i;
  1544. for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
  1545. INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
  1546. }
  1547. #ifdef GL_HASH_LOCK_SZ
  1548. for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
  1549. rwlock_init(&gl_hash_locks[i]);
  1550. }
  1551. #endif
  1552. glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
  1553. WQ_HIGHPRI | WQ_FREEZEABLE, 0);
  1554. if (IS_ERR(glock_workqueue))
  1555. return PTR_ERR(glock_workqueue);
  1556. gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
  1557. WQ_MEM_RECLAIM | WQ_FREEZEABLE,
  1558. 0);
  1559. if (IS_ERR(gfs2_delete_workqueue)) {
  1560. destroy_workqueue(glock_workqueue);
  1561. return PTR_ERR(gfs2_delete_workqueue);
  1562. }
  1563. register_shrinker(&glock_shrinker);
  1564. return 0;
  1565. }
  1566. void gfs2_glock_exit(void)
  1567. {
  1568. unregister_shrinker(&glock_shrinker);
  1569. destroy_workqueue(glock_workqueue);
  1570. destroy_workqueue(gfs2_delete_workqueue);
  1571. }
  1572. static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
  1573. {
  1574. struct gfs2_glock *gl;
  1575. restart:
  1576. read_lock(gl_lock_addr(gi->hash));
  1577. gl = gi->gl;
  1578. if (gl) {
  1579. gi->gl = hlist_entry(gl->gl_list.next,
  1580. struct gfs2_glock, gl_list);
  1581. } else {
  1582. gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
  1583. struct gfs2_glock, gl_list);
  1584. }
  1585. if (gi->gl)
  1586. gfs2_glock_hold(gi->gl);
  1587. read_unlock(gl_lock_addr(gi->hash));
  1588. if (gl)
  1589. gfs2_glock_put(gl);
  1590. while (gi->gl == NULL) {
  1591. gi->hash++;
  1592. if (gi->hash >= GFS2_GL_HASH_SIZE)
  1593. return 1;
  1594. read_lock(gl_lock_addr(gi->hash));
  1595. gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
  1596. struct gfs2_glock, gl_list);
  1597. if (gi->gl)
  1598. gfs2_glock_hold(gi->gl);
  1599. read_unlock(gl_lock_addr(gi->hash));
  1600. }
  1601. if (gi->sdp != gi->gl->gl_sbd)
  1602. goto restart;
  1603. return 0;
  1604. }
  1605. static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
  1606. {
  1607. if (gi->gl)
  1608. gfs2_glock_put(gi->gl);
  1609. gi->gl = NULL;
  1610. }
  1611. static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  1612. {
  1613. struct gfs2_glock_iter *gi = seq->private;
  1614. loff_t n = *pos;
  1615. gi->hash = 0;
  1616. do {
  1617. if (gfs2_glock_iter_next(gi)) {
  1618. gfs2_glock_iter_free(gi);
  1619. return NULL;
  1620. }
  1621. } while (n--);
  1622. return gi->gl;
  1623. }
  1624. static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
  1625. loff_t *pos)
  1626. {
  1627. struct gfs2_glock_iter *gi = seq->private;
  1628. (*pos)++;
  1629. if (gfs2_glock_iter_next(gi)) {
  1630. gfs2_glock_iter_free(gi);
  1631. return NULL;
  1632. }
  1633. return gi->gl;
  1634. }
  1635. static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  1636. {
  1637. struct gfs2_glock_iter *gi = seq->private;
  1638. gfs2_glock_iter_free(gi);
  1639. }
  1640. static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
  1641. {
  1642. return dump_glock(seq, iter_ptr);
  1643. }
  1644. static const struct seq_operations gfs2_glock_seq_ops = {
  1645. .start = gfs2_glock_seq_start,
  1646. .next = gfs2_glock_seq_next,
  1647. .stop = gfs2_glock_seq_stop,
  1648. .show = gfs2_glock_seq_show,
  1649. };
  1650. static int gfs2_debugfs_open(struct inode *inode, struct file *file)
  1651. {
  1652. int ret = seq_open_private(file, &gfs2_glock_seq_ops,
  1653. sizeof(struct gfs2_glock_iter));
  1654. if (ret == 0) {
  1655. struct seq_file *seq = file->private_data;
  1656. struct gfs2_glock_iter *gi = seq->private;
  1657. gi->sdp = inode->i_private;
  1658. }
  1659. return ret;
  1660. }
  1661. static const struct file_operations gfs2_debug_fops = {
  1662. .owner = THIS_MODULE,
  1663. .open = gfs2_debugfs_open,
  1664. .read = seq_read,
  1665. .llseek = seq_lseek,
  1666. .release = seq_release_private,
  1667. };
  1668. int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
  1669. {
  1670. sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
  1671. if (!sdp->debugfs_dir)
  1672. return -ENOMEM;
  1673. sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
  1674. S_IFREG | S_IRUGO,
  1675. sdp->debugfs_dir, sdp,
  1676. &gfs2_debug_fops);
  1677. if (!sdp->debugfs_dentry_glocks)
  1678. return -ENOMEM;
  1679. return 0;
  1680. }
  1681. void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
  1682. {
  1683. if (sdp && sdp->debugfs_dir) {
  1684. if (sdp->debugfs_dentry_glocks) {
  1685. debugfs_remove(sdp->debugfs_dentry_glocks);
  1686. sdp->debugfs_dentry_glocks = NULL;
  1687. }
  1688. debugfs_remove(sdp->debugfs_dir);
  1689. sdp->debugfs_dir = NULL;
  1690. }
  1691. }
  1692. int gfs2_register_debugfs(void)
  1693. {
  1694. gfs2_root = debugfs_create_dir("gfs2", NULL);
  1695. return gfs2_root ? 0 : -ENOMEM;
  1696. }
  1697. void gfs2_unregister_debugfs(void)
  1698. {
  1699. debugfs_remove(gfs2_root);
  1700. gfs2_root = NULL;
  1701. }