glock.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/delay.h>
  14. #include <linux/sort.h>
  15. #include <linux/jhash.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/gfs2_ondisk.h>
  18. #include <linux/list.h>
  19. #include <linux/wait.h>
  20. #include <linux/module.h>
  21. #include <asm/uaccess.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/rcupdate.h>
  29. #include <linux/rculist_bl.h>
  30. #include <linux/bit_spinlock.h>
  31. #include "gfs2.h"
  32. #include "incore.h"
  33. #include "glock.h"
  34. #include "glops.h"
  35. #include "inode.h"
  36. #include "lops.h"
  37. #include "meta_io.h"
  38. #include "quota.h"
  39. #include "super.h"
  40. #include "util.h"
  41. #include "bmap.h"
  42. #define CREATE_TRACE_POINTS
  43. #include "trace_gfs2.h"
  44. struct gfs2_glock_iter {
  45. int hash; /* hash bucket index */
  46. struct gfs2_sbd *sdp; /* incore superblock */
  47. struct gfs2_glock *gl; /* current glock struct */
  48. char string[512]; /* scratch space */
  49. };
  50. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  51. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  52. #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  53. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  54. static struct dentry *gfs2_root;
  55. static struct workqueue_struct *glock_workqueue;
  56. struct workqueue_struct *gfs2_delete_workqueue;
  57. static LIST_HEAD(lru_list);
  58. static atomic_t lru_count = ATOMIC_INIT(0);
  59. static DEFINE_SPINLOCK(lru_lock);
  60. #define GFS2_GL_HASH_SHIFT 15
  61. #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
  62. #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
  63. static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
  64. static struct dentry *gfs2_root;
  65. /**
  66. * gl_hash() - Turn glock number into hash bucket number
  67. * @lock: The glock number
  68. *
  69. * Returns: The number of the corresponding hash bucket
  70. */
  71. static unsigned int gl_hash(const struct gfs2_sbd *sdp,
  72. const struct lm_lockname *name)
  73. {
  74. unsigned int h;
  75. h = jhash(&name->ln_number, sizeof(u64), 0);
  76. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  77. h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
  78. h &= GFS2_GL_HASH_MASK;
  79. return h;
  80. }
  81. static inline void spin_lock_bucket(unsigned int hash)
  82. {
  83. struct hlist_bl_head *bl = &gl_hash_table[hash];
  84. bit_spin_lock(0, (unsigned long *)bl);
  85. }
  86. static inline void spin_unlock_bucket(unsigned int hash)
  87. {
  88. struct hlist_bl_head *bl = &gl_hash_table[hash];
  89. __bit_spin_unlock(0, (unsigned long *)bl);
  90. }
  91. void gfs2_glock_free(struct rcu_head *rcu)
  92. {
  93. struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
  94. struct gfs2_sbd *sdp = gl->gl_sbd;
  95. if (gl->gl_ops->go_flags & GLOF_ASPACE)
  96. kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  97. else
  98. kmem_cache_free(gfs2_glock_cachep, gl);
  99. if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  100. wake_up(&sdp->sd_glock_wait);
  101. }
  102. /**
  103. * gfs2_glock_hold() - increment reference count on glock
  104. * @gl: The glock to hold
  105. *
  106. */
  107. void gfs2_glock_hold(struct gfs2_glock *gl)
  108. {
  109. GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
  110. atomic_inc(&gl->gl_ref);
  111. }
  112. /**
  113. * demote_ok - Check to see if it's ok to unlock a glock
  114. * @gl: the glock
  115. *
  116. * Returns: 1 if it's ok
  117. */
  118. static int demote_ok(const struct gfs2_glock *gl)
  119. {
  120. const struct gfs2_glock_operations *glops = gl->gl_ops;
  121. /* assert_spin_locked(&gl->gl_spin); */
  122. if (gl->gl_state == LM_ST_UNLOCKED)
  123. return 0;
  124. if (test_bit(GLF_LFLUSH, &gl->gl_flags))
  125. return 0;
  126. if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
  127. !list_empty(&gl->gl_holders))
  128. return 0;
  129. if (glops->go_demote_ok)
  130. return glops->go_demote_ok(gl);
  131. return 1;
  132. }
  133. /**
  134. * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  135. * @gl: the glock
  136. *
  137. * If the glock is demotable, then we add it (or move it) to the end
  138. * of the glock LRU list.
  139. */
  140. static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  141. {
  142. if (demote_ok(gl)) {
  143. spin_lock(&lru_lock);
  144. if (!list_empty(&gl->gl_lru))
  145. list_del_init(&gl->gl_lru);
  146. else
  147. atomic_inc(&lru_count);
  148. list_add_tail(&gl->gl_lru, &lru_list);
  149. spin_unlock(&lru_lock);
  150. }
  151. }
  152. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  153. {
  154. spin_lock(&gl->gl_spin);
  155. __gfs2_glock_schedule_for_reclaim(gl);
  156. spin_unlock(&gl->gl_spin);
  157. }
  158. /**
  159. * gfs2_glock_put_nolock() - Decrement reference count on glock
  160. * @gl: The glock to put
  161. *
  162. * This function should only be used if the caller has its own reference
  163. * to the glock, in addition to the one it is dropping.
  164. */
  165. void gfs2_glock_put_nolock(struct gfs2_glock *gl)
  166. {
  167. if (atomic_dec_and_test(&gl->gl_ref))
  168. GLOCK_BUG_ON(gl, 1);
  169. }
  170. /**
  171. * gfs2_glock_put() - Decrement reference count on glock
  172. * @gl: The glock to put
  173. *
  174. */
  175. void gfs2_glock_put(struct gfs2_glock *gl)
  176. {
  177. struct gfs2_sbd *sdp = gl->gl_sbd;
  178. struct address_space *mapping = gfs2_glock2aspace(gl);
  179. if (atomic_dec_and_test(&gl->gl_ref)) {
  180. spin_lock_bucket(gl->gl_hash);
  181. hlist_bl_del_rcu(&gl->gl_list);
  182. spin_unlock_bucket(gl->gl_hash);
  183. spin_lock(&lru_lock);
  184. if (!list_empty(&gl->gl_lru)) {
  185. list_del_init(&gl->gl_lru);
  186. atomic_dec(&lru_count);
  187. }
  188. spin_unlock(&lru_lock);
  189. GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
  190. GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
  191. trace_gfs2_glock_put(gl);
  192. sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
  193. }
  194. }
  195. /**
  196. * search_bucket() - Find struct gfs2_glock by lock number
  197. * @bucket: the bucket to search
  198. * @name: The lock name
  199. *
  200. * Returns: NULL, or the struct gfs2_glock with the requested number
  201. */
  202. static struct gfs2_glock *search_bucket(unsigned int hash,
  203. const struct gfs2_sbd *sdp,
  204. const struct lm_lockname *name)
  205. {
  206. struct gfs2_glock *gl;
  207. struct hlist_bl_node *h;
  208. hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
  209. if (!lm_name_equal(&gl->gl_name, name))
  210. continue;
  211. if (gl->gl_sbd != sdp)
  212. continue;
  213. if (atomic_inc_not_zero(&gl->gl_ref))
  214. return gl;
  215. }
  216. return NULL;
  217. }
  218. /**
  219. * may_grant - check if its ok to grant a new lock
  220. * @gl: The glock
  221. * @gh: The lock request which we wish to grant
  222. *
  223. * Returns: true if its ok to grant the lock
  224. */
  225. static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
  226. {
  227. const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
  228. if ((gh->gh_state == LM_ST_EXCLUSIVE ||
  229. gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
  230. return 0;
  231. if (gl->gl_state == gh->gh_state)
  232. return 1;
  233. if (gh->gh_flags & GL_EXACT)
  234. return 0;
  235. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  236. if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
  237. return 1;
  238. if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
  239. return 1;
  240. }
  241. if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
  242. return 1;
  243. return 0;
  244. }
  245. static void gfs2_holder_wake(struct gfs2_holder *gh)
  246. {
  247. clear_bit(HIF_WAIT, &gh->gh_iflags);
  248. smp_mb__after_clear_bit();
  249. wake_up_bit(&gh->gh_iflags, HIF_WAIT);
  250. }
  251. /**
  252. * do_error - Something unexpected has happened during a lock request
  253. *
  254. */
  255. static inline void do_error(struct gfs2_glock *gl, const int ret)
  256. {
  257. struct gfs2_holder *gh, *tmp;
  258. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  259. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  260. continue;
  261. if (ret & LM_OUT_ERROR)
  262. gh->gh_error = -EIO;
  263. else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
  264. gh->gh_error = GLR_TRYFAILED;
  265. else
  266. continue;
  267. list_del_init(&gh->gh_list);
  268. trace_gfs2_glock_queue(gh, 0);
  269. gfs2_holder_wake(gh);
  270. }
  271. }
  272. /**
  273. * do_promote - promote as many requests as possible on the current queue
  274. * @gl: The glock
  275. *
  276. * Returns: 1 if there is a blocked holder at the head of the list, or 2
  277. * if a type specific operation is underway.
  278. */
  279. static int do_promote(struct gfs2_glock *gl)
  280. __releases(&gl->gl_spin)
  281. __acquires(&gl->gl_spin)
  282. {
  283. const struct gfs2_glock_operations *glops = gl->gl_ops;
  284. struct gfs2_holder *gh, *tmp;
  285. int ret;
  286. restart:
  287. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  288. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  289. continue;
  290. if (may_grant(gl, gh)) {
  291. if (gh->gh_list.prev == &gl->gl_holders &&
  292. glops->go_lock) {
  293. spin_unlock(&gl->gl_spin);
  294. /* FIXME: eliminate this eventually */
  295. ret = glops->go_lock(gh);
  296. spin_lock(&gl->gl_spin);
  297. if (ret) {
  298. if (ret == 1)
  299. return 2;
  300. gh->gh_error = ret;
  301. list_del_init(&gh->gh_list);
  302. trace_gfs2_glock_queue(gh, 0);
  303. gfs2_holder_wake(gh);
  304. goto restart;
  305. }
  306. set_bit(HIF_HOLDER, &gh->gh_iflags);
  307. trace_gfs2_promote(gh, 1);
  308. gfs2_holder_wake(gh);
  309. goto restart;
  310. }
  311. set_bit(HIF_HOLDER, &gh->gh_iflags);
  312. trace_gfs2_promote(gh, 0);
  313. gfs2_holder_wake(gh);
  314. continue;
  315. }
  316. if (gh->gh_list.prev == &gl->gl_holders)
  317. return 1;
  318. do_error(gl, 0);
  319. break;
  320. }
  321. return 0;
  322. }
  323. /**
  324. * find_first_waiter - find the first gh that's waiting for the glock
  325. * @gl: the glock
  326. */
  327. static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
  328. {
  329. struct gfs2_holder *gh;
  330. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  331. if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
  332. return gh;
  333. }
  334. return NULL;
  335. }
  336. /**
  337. * state_change - record that the glock is now in a different state
  338. * @gl: the glock
  339. * @new_state the new state
  340. *
  341. */
  342. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  343. {
  344. int held1, held2;
  345. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  346. held2 = (new_state != LM_ST_UNLOCKED);
  347. if (held1 != held2) {
  348. if (held2)
  349. gfs2_glock_hold(gl);
  350. else
  351. gfs2_glock_put_nolock(gl);
  352. }
  353. if (held1 && held2 && list_empty(&gl->gl_holders))
  354. clear_bit(GLF_QUEUED, &gl->gl_flags);
  355. gl->gl_state = new_state;
  356. gl->gl_tchange = jiffies;
  357. }
  358. static void gfs2_demote_wake(struct gfs2_glock *gl)
  359. {
  360. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  361. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  362. smp_mb__after_clear_bit();
  363. wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
  364. }
  365. /**
  366. * finish_xmote - The DLM has replied to one of our lock requests
  367. * @gl: The glock
  368. * @ret: The status from the DLM
  369. *
  370. */
  371. static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
  372. {
  373. const struct gfs2_glock_operations *glops = gl->gl_ops;
  374. struct gfs2_holder *gh;
  375. unsigned state = ret & LM_OUT_ST_MASK;
  376. int rv;
  377. spin_lock(&gl->gl_spin);
  378. trace_gfs2_glock_state_change(gl, state);
  379. state_change(gl, state);
  380. gh = find_first_waiter(gl);
  381. /* Demote to UN request arrived during demote to SH or DF */
  382. if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
  383. state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
  384. gl->gl_target = LM_ST_UNLOCKED;
  385. /* Check for state != intended state */
  386. if (unlikely(state != gl->gl_target)) {
  387. if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
  388. /* move to back of queue and try next entry */
  389. if (ret & LM_OUT_CANCELED) {
  390. if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
  391. list_move_tail(&gh->gh_list, &gl->gl_holders);
  392. gh = find_first_waiter(gl);
  393. gl->gl_target = gh->gh_state;
  394. goto retry;
  395. }
  396. /* Some error or failed "try lock" - report it */
  397. if ((ret & LM_OUT_ERROR) ||
  398. (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  399. gl->gl_target = gl->gl_state;
  400. do_error(gl, ret);
  401. goto out;
  402. }
  403. }
  404. switch(state) {
  405. /* Unlocked due to conversion deadlock, try again */
  406. case LM_ST_UNLOCKED:
  407. retry:
  408. do_xmote(gl, gh, gl->gl_target);
  409. break;
  410. /* Conversion fails, unlock and try again */
  411. case LM_ST_SHARED:
  412. case LM_ST_DEFERRED:
  413. do_xmote(gl, gh, LM_ST_UNLOCKED);
  414. break;
  415. default: /* Everything else */
  416. printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
  417. GLOCK_BUG_ON(gl, 1);
  418. }
  419. spin_unlock(&gl->gl_spin);
  420. return;
  421. }
  422. /* Fast path - we got what we asked for */
  423. if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
  424. gfs2_demote_wake(gl);
  425. if (state != LM_ST_UNLOCKED) {
  426. if (glops->go_xmote_bh) {
  427. spin_unlock(&gl->gl_spin);
  428. rv = glops->go_xmote_bh(gl, gh);
  429. spin_lock(&gl->gl_spin);
  430. if (rv) {
  431. do_error(gl, rv);
  432. goto out;
  433. }
  434. }
  435. rv = do_promote(gl);
  436. if (rv == 2)
  437. goto out_locked;
  438. }
  439. out:
  440. clear_bit(GLF_LOCK, &gl->gl_flags);
  441. out_locked:
  442. spin_unlock(&gl->gl_spin);
  443. }
  444. /**
  445. * do_xmote - Calls the DLM to change the state of a lock
  446. * @gl: The lock state
  447. * @gh: The holder (only for promotes)
  448. * @target: The target lock state
  449. *
  450. */
  451. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
  452. __releases(&gl->gl_spin)
  453. __acquires(&gl->gl_spin)
  454. {
  455. const struct gfs2_glock_operations *glops = gl->gl_ops;
  456. struct gfs2_sbd *sdp = gl->gl_sbd;
  457. unsigned int lck_flags = gh ? gh->gh_flags : 0;
  458. int ret;
  459. lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
  460. LM_FLAG_PRIORITY);
  461. GLOCK_BUG_ON(gl, gl->gl_state == target);
  462. GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
  463. if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
  464. glops->go_inval) {
  465. set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  466. do_error(gl, 0); /* Fail queued try locks */
  467. }
  468. gl->gl_req = target;
  469. spin_unlock(&gl->gl_spin);
  470. if (glops->go_xmote_th)
  471. glops->go_xmote_th(gl);
  472. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  473. glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
  474. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  475. gfs2_glock_hold(gl);
  476. if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
  477. gl->gl_state == LM_ST_DEFERRED) &&
  478. !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  479. lck_flags |= LM_FLAG_TRY_1CB;
  480. if (sdp->sd_lockstruct.ls_ops->lm_lock) {
  481. /* lock_dlm */
  482. ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
  483. GLOCK_BUG_ON(gl, ret);
  484. } else { /* lock_nolock */
  485. finish_xmote(gl, target);
  486. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  487. gfs2_glock_put(gl);
  488. }
  489. spin_lock(&gl->gl_spin);
  490. }
  491. /**
  492. * find_first_holder - find the first "holder" gh
  493. * @gl: the glock
  494. */
  495. static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
  496. {
  497. struct gfs2_holder *gh;
  498. if (!list_empty(&gl->gl_holders)) {
  499. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  500. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  501. return gh;
  502. }
  503. return NULL;
  504. }
  505. /**
  506. * run_queue - do all outstanding tasks related to a glock
  507. * @gl: The glock in question
  508. * @nonblock: True if we must not block in run_queue
  509. *
  510. */
  511. static void run_queue(struct gfs2_glock *gl, const int nonblock)
  512. __releases(&gl->gl_spin)
  513. __acquires(&gl->gl_spin)
  514. {
  515. struct gfs2_holder *gh = NULL;
  516. int ret;
  517. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  518. return;
  519. GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
  520. if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  521. gl->gl_demote_state != gl->gl_state) {
  522. if (find_first_holder(gl))
  523. goto out_unlock;
  524. if (nonblock)
  525. goto out_sched;
  526. set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
  527. GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
  528. gl->gl_target = gl->gl_demote_state;
  529. } else {
  530. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  531. gfs2_demote_wake(gl);
  532. ret = do_promote(gl);
  533. if (ret == 0)
  534. goto out_unlock;
  535. if (ret == 2)
  536. goto out;
  537. gh = find_first_waiter(gl);
  538. gl->gl_target = gh->gh_state;
  539. if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  540. do_error(gl, 0); /* Fail queued try locks */
  541. }
  542. do_xmote(gl, gh, gl->gl_target);
  543. out:
  544. return;
  545. out_sched:
  546. clear_bit(GLF_LOCK, &gl->gl_flags);
  547. smp_mb__after_clear_bit();
  548. gfs2_glock_hold(gl);
  549. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  550. gfs2_glock_put_nolock(gl);
  551. return;
  552. out_unlock:
  553. clear_bit(GLF_LOCK, &gl->gl_flags);
  554. smp_mb__after_clear_bit();
  555. return;
  556. }
  557. static void delete_work_func(struct work_struct *work)
  558. {
  559. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
  560. struct gfs2_sbd *sdp = gl->gl_sbd;
  561. struct gfs2_inode *ip;
  562. struct inode *inode;
  563. u64 no_addr = gl->gl_name.ln_number;
  564. ip = gl->gl_object;
  565. /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
  566. if (ip)
  567. inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
  568. else
  569. inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
  570. if (inode && !IS_ERR(inode)) {
  571. d_prune_aliases(inode);
  572. iput(inode);
  573. }
  574. gfs2_glock_put(gl);
  575. }
  576. static void glock_work_func(struct work_struct *work)
  577. {
  578. unsigned long delay = 0;
  579. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
  580. int drop_ref = 0;
  581. if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
  582. finish_xmote(gl, gl->gl_reply);
  583. drop_ref = 1;
  584. }
  585. spin_lock(&gl->gl_spin);
  586. if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  587. gl->gl_state != LM_ST_UNLOCKED &&
  588. gl->gl_demote_state != LM_ST_EXCLUSIVE) {
  589. unsigned long holdtime, now = jiffies;
  590. holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
  591. if (time_before(now, holdtime))
  592. delay = holdtime - now;
  593. set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
  594. }
  595. run_queue(gl, 0);
  596. spin_unlock(&gl->gl_spin);
  597. if (!delay ||
  598. queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  599. gfs2_glock_put(gl);
  600. if (drop_ref)
  601. gfs2_glock_put(gl);
  602. }
  603. /**
  604. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  605. * @sdp: The GFS2 superblock
  606. * @number: the lock number
  607. * @glops: The glock_operations to use
  608. * @create: If 0, don't create the glock if it doesn't exist
  609. * @glp: the glock is returned here
  610. *
  611. * This does not lock a glock, just finds/creates structures for one.
  612. *
  613. * Returns: errno
  614. */
  615. int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
  616. const struct gfs2_glock_operations *glops, int create,
  617. struct gfs2_glock **glp)
  618. {
  619. struct super_block *s = sdp->sd_vfs;
  620. struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
  621. struct gfs2_glock *gl, *tmp;
  622. unsigned int hash = gl_hash(sdp, &name);
  623. struct address_space *mapping;
  624. struct kmem_cache *cachep;
  625. rcu_read_lock();
  626. gl = search_bucket(hash, sdp, &name);
  627. rcu_read_unlock();
  628. *glp = gl;
  629. if (gl)
  630. return 0;
  631. if (!create)
  632. return -ENOENT;
  633. if (glops->go_flags & GLOF_ASPACE)
  634. cachep = gfs2_glock_aspace_cachep;
  635. else
  636. cachep = gfs2_glock_cachep;
  637. gl = kmem_cache_alloc(cachep, GFP_KERNEL);
  638. if (!gl)
  639. return -ENOMEM;
  640. atomic_inc(&sdp->sd_glock_disposal);
  641. gl->gl_flags = 0;
  642. gl->gl_name = name;
  643. atomic_set(&gl->gl_ref, 1);
  644. gl->gl_state = LM_ST_UNLOCKED;
  645. gl->gl_target = LM_ST_UNLOCKED;
  646. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  647. gl->gl_hash = hash;
  648. gl->gl_ops = glops;
  649. snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
  650. memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
  651. gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
  652. gl->gl_tchange = jiffies;
  653. gl->gl_object = NULL;
  654. gl->gl_sbd = sdp;
  655. INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  656. INIT_WORK(&gl->gl_delete, delete_work_func);
  657. mapping = gfs2_glock2aspace(gl);
  658. if (mapping) {
  659. mapping->a_ops = &gfs2_meta_aops;
  660. mapping->host = s->s_bdev->bd_inode;
  661. mapping->flags = 0;
  662. mapping_set_gfp_mask(mapping, GFP_NOFS);
  663. mapping->assoc_mapping = NULL;
  664. mapping->backing_dev_info = s->s_bdi;
  665. mapping->writeback_index = 0;
  666. }
  667. spin_lock_bucket(hash);
  668. tmp = search_bucket(hash, sdp, &name);
  669. if (tmp) {
  670. spin_unlock_bucket(hash);
  671. kmem_cache_free(cachep, gl);
  672. gl = tmp;
  673. } else {
  674. hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
  675. spin_unlock_bucket(hash);
  676. }
  677. *glp = gl;
  678. return 0;
  679. }
  680. /**
  681. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  682. * @gl: the glock
  683. * @state: the state we're requesting
  684. * @flags: the modifier flags
  685. * @gh: the holder structure
  686. *
  687. */
  688. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  689. struct gfs2_holder *gh)
  690. {
  691. INIT_LIST_HEAD(&gh->gh_list);
  692. gh->gh_gl = gl;
  693. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  694. gh->gh_owner_pid = get_pid(task_pid(current));
  695. gh->gh_state = state;
  696. gh->gh_flags = flags;
  697. gh->gh_error = 0;
  698. gh->gh_iflags = 0;
  699. gfs2_glock_hold(gl);
  700. }
  701. /**
  702. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  703. * @state: the state we're requesting
  704. * @flags: the modifier flags
  705. * @gh: the holder structure
  706. *
  707. * Don't mess with the glock.
  708. *
  709. */
  710. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  711. {
  712. gh->gh_state = state;
  713. gh->gh_flags = flags;
  714. gh->gh_iflags = 0;
  715. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  716. if (gh->gh_owner_pid)
  717. put_pid(gh->gh_owner_pid);
  718. gh->gh_owner_pid = get_pid(task_pid(current));
  719. }
  720. /**
  721. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  722. * @gh: the holder structure
  723. *
  724. */
  725. void gfs2_holder_uninit(struct gfs2_holder *gh)
  726. {
  727. put_pid(gh->gh_owner_pid);
  728. gfs2_glock_put(gh->gh_gl);
  729. gh->gh_gl = NULL;
  730. gh->gh_ip = 0;
  731. }
  732. /**
  733. * gfs2_glock_holder_wait
  734. * @word: unused
  735. *
  736. * This function and gfs2_glock_demote_wait both show up in the WCHAN
  737. * field. Thus I've separated these otherwise identical functions in
  738. * order to be more informative to the user.
  739. */
  740. static int gfs2_glock_holder_wait(void *word)
  741. {
  742. schedule();
  743. return 0;
  744. }
  745. static int gfs2_glock_demote_wait(void *word)
  746. {
  747. schedule();
  748. return 0;
  749. }
  750. static void wait_on_holder(struct gfs2_holder *gh)
  751. {
  752. might_sleep();
  753. wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
  754. }
  755. static void wait_on_demote(struct gfs2_glock *gl)
  756. {
  757. might_sleep();
  758. wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
  759. }
  760. /**
  761. * handle_callback - process a demote request
  762. * @gl: the glock
  763. * @state: the state the caller wants us to change to
  764. *
  765. * There are only two requests that we are going to see in actual
  766. * practise: LM_ST_SHARED and LM_ST_UNLOCKED
  767. */
  768. static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  769. unsigned long delay)
  770. {
  771. int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
  772. set_bit(bit, &gl->gl_flags);
  773. if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
  774. gl->gl_demote_state = state;
  775. gl->gl_demote_time = jiffies;
  776. } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
  777. gl->gl_demote_state != state) {
  778. gl->gl_demote_state = LM_ST_UNLOCKED;
  779. }
  780. if (gl->gl_ops->go_callback)
  781. gl->gl_ops->go_callback(gl);
  782. trace_gfs2_demote_rq(gl);
  783. }
  784. /**
  785. * gfs2_glock_wait - wait on a glock acquisition
  786. * @gh: the glock holder
  787. *
  788. * Returns: 0 on success
  789. */
  790. int gfs2_glock_wait(struct gfs2_holder *gh)
  791. {
  792. wait_on_holder(gh);
  793. return gh->gh_error;
  794. }
  795. void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
  796. {
  797. struct va_format vaf;
  798. va_list args;
  799. va_start(args, fmt);
  800. if (seq) {
  801. struct gfs2_glock_iter *gi = seq->private;
  802. vsprintf(gi->string, fmt, args);
  803. seq_printf(seq, gi->string);
  804. } else {
  805. vaf.fmt = fmt;
  806. vaf.va = &args;
  807. printk(KERN_ERR " %pV", &vaf);
  808. }
  809. va_end(args);
  810. }
  811. /**
  812. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  813. * @gh: the holder structure to add
  814. *
  815. * Eventually we should move the recursive locking trap to a
  816. * debugging option or something like that. This is the fast
  817. * path and needs to have the minimum number of distractions.
  818. *
  819. */
  820. static inline void add_to_queue(struct gfs2_holder *gh)
  821. __releases(&gl->gl_spin)
  822. __acquires(&gl->gl_spin)
  823. {
  824. struct gfs2_glock *gl = gh->gh_gl;
  825. struct gfs2_sbd *sdp = gl->gl_sbd;
  826. struct list_head *insert_pt = NULL;
  827. struct gfs2_holder *gh2;
  828. int try_lock = 0;
  829. BUG_ON(gh->gh_owner_pid == NULL);
  830. if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
  831. BUG();
  832. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  833. if (test_bit(GLF_LOCK, &gl->gl_flags))
  834. try_lock = 1;
  835. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  836. goto fail;
  837. }
  838. list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
  839. if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
  840. (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
  841. goto trap_recursive;
  842. if (try_lock &&
  843. !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
  844. !may_grant(gl, gh)) {
  845. fail:
  846. gh->gh_error = GLR_TRYFAILED;
  847. gfs2_holder_wake(gh);
  848. return;
  849. }
  850. if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
  851. continue;
  852. if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
  853. insert_pt = &gh2->gh_list;
  854. }
  855. set_bit(GLF_QUEUED, &gl->gl_flags);
  856. if (likely(insert_pt == NULL)) {
  857. list_add_tail(&gh->gh_list, &gl->gl_holders);
  858. if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
  859. goto do_cancel;
  860. return;
  861. }
  862. trace_gfs2_glock_queue(gh, 1);
  863. list_add_tail(&gh->gh_list, insert_pt);
  864. do_cancel:
  865. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  866. if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
  867. spin_unlock(&gl->gl_spin);
  868. if (sdp->sd_lockstruct.ls_ops->lm_cancel)
  869. sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
  870. spin_lock(&gl->gl_spin);
  871. }
  872. return;
  873. trap_recursive:
  874. print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
  875. printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
  876. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  877. gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
  878. print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
  879. printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
  880. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  881. gh->gh_gl->gl_name.ln_type, gh->gh_state);
  882. __dump_glock(NULL, gl);
  883. BUG();
  884. }
  885. /**
  886. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  887. * @gh: the holder structure
  888. *
  889. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  890. *
  891. * Returns: 0, GLR_TRYFAILED, or errno on failure
  892. */
  893. int gfs2_glock_nq(struct gfs2_holder *gh)
  894. {
  895. struct gfs2_glock *gl = gh->gh_gl;
  896. struct gfs2_sbd *sdp = gl->gl_sbd;
  897. int error = 0;
  898. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  899. return -EIO;
  900. spin_lock(&gl->gl_spin);
  901. add_to_queue(gh);
  902. if ((LM_FLAG_NOEXP & gh->gh_flags) &&
  903. test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  904. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  905. run_queue(gl, 1);
  906. spin_unlock(&gl->gl_spin);
  907. if (!(gh->gh_flags & GL_ASYNC))
  908. error = gfs2_glock_wait(gh);
  909. return error;
  910. }
  911. /**
  912. * gfs2_glock_poll - poll to see if an async request has been completed
  913. * @gh: the holder
  914. *
  915. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  916. */
  917. int gfs2_glock_poll(struct gfs2_holder *gh)
  918. {
  919. return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
  920. }
  921. /**
  922. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  923. * @gh: the glock holder
  924. *
  925. */
  926. void gfs2_glock_dq(struct gfs2_holder *gh)
  927. {
  928. struct gfs2_glock *gl = gh->gh_gl;
  929. const struct gfs2_glock_operations *glops = gl->gl_ops;
  930. unsigned delay = 0;
  931. int fast_path = 0;
  932. spin_lock(&gl->gl_spin);
  933. if (gh->gh_flags & GL_NOCACHE)
  934. handle_callback(gl, LM_ST_UNLOCKED, 0);
  935. list_del_init(&gh->gh_list);
  936. if (find_first_holder(gl) == NULL) {
  937. if (glops->go_unlock) {
  938. GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
  939. spin_unlock(&gl->gl_spin);
  940. glops->go_unlock(gh);
  941. spin_lock(&gl->gl_spin);
  942. clear_bit(GLF_LOCK, &gl->gl_flags);
  943. }
  944. if (list_empty(&gl->gl_holders) &&
  945. !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  946. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  947. fast_path = 1;
  948. }
  949. __gfs2_glock_schedule_for_reclaim(gl);
  950. trace_gfs2_glock_queue(gh, 0);
  951. spin_unlock(&gl->gl_spin);
  952. if (likely(fast_path))
  953. return;
  954. gfs2_glock_hold(gl);
  955. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  956. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  957. delay = gl->gl_ops->go_min_hold_time;
  958. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  959. gfs2_glock_put(gl);
  960. }
  961. void gfs2_glock_dq_wait(struct gfs2_holder *gh)
  962. {
  963. struct gfs2_glock *gl = gh->gh_gl;
  964. gfs2_glock_dq(gh);
  965. wait_on_demote(gl);
  966. }
  967. /**
  968. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  969. * @gh: the holder structure
  970. *
  971. */
  972. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  973. {
  974. gfs2_glock_dq(gh);
  975. gfs2_holder_uninit(gh);
  976. }
  977. /**
  978. * gfs2_glock_nq_num - acquire a glock based on lock number
  979. * @sdp: the filesystem
  980. * @number: the lock number
  981. * @glops: the glock operations for the type of glock
  982. * @state: the state to acquire the glock in
  983. * @flags: modifier flags for the aquisition
  984. * @gh: the struct gfs2_holder
  985. *
  986. * Returns: errno
  987. */
  988. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
  989. const struct gfs2_glock_operations *glops,
  990. unsigned int state, int flags, struct gfs2_holder *gh)
  991. {
  992. struct gfs2_glock *gl;
  993. int error;
  994. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  995. if (!error) {
  996. error = gfs2_glock_nq_init(gl, state, flags, gh);
  997. gfs2_glock_put(gl);
  998. }
  999. return error;
  1000. }
  1001. /**
  1002. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1003. * @arg_a: the first structure
  1004. * @arg_b: the second structure
  1005. *
  1006. */
  1007. static int glock_compare(const void *arg_a, const void *arg_b)
  1008. {
  1009. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1010. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1011. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1012. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1013. if (a->ln_number > b->ln_number)
  1014. return 1;
  1015. if (a->ln_number < b->ln_number)
  1016. return -1;
  1017. BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
  1018. return 0;
  1019. }
  1020. /**
  1021. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1022. * @num_gh: the number of structures
  1023. * @ghs: an array of struct gfs2_holder structures
  1024. *
  1025. * Returns: 0 on success (all glocks acquired),
  1026. * errno on failure (no glocks acquired)
  1027. */
  1028. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1029. struct gfs2_holder **p)
  1030. {
  1031. unsigned int x;
  1032. int error = 0;
  1033. for (x = 0; x < num_gh; x++)
  1034. p[x] = &ghs[x];
  1035. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1036. for (x = 0; x < num_gh; x++) {
  1037. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1038. error = gfs2_glock_nq(p[x]);
  1039. if (error) {
  1040. while (x--)
  1041. gfs2_glock_dq(p[x]);
  1042. break;
  1043. }
  1044. }
  1045. return error;
  1046. }
  1047. /**
  1048. * gfs2_glock_nq_m - acquire multiple glocks
  1049. * @num_gh: the number of structures
  1050. * @ghs: an array of struct gfs2_holder structures
  1051. *
  1052. *
  1053. * Returns: 0 on success (all glocks acquired),
  1054. * errno on failure (no glocks acquired)
  1055. */
  1056. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1057. {
  1058. struct gfs2_holder *tmp[4];
  1059. struct gfs2_holder **pph = tmp;
  1060. int error = 0;
  1061. switch(num_gh) {
  1062. case 0:
  1063. return 0;
  1064. case 1:
  1065. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1066. return gfs2_glock_nq(ghs);
  1067. default:
  1068. if (num_gh <= 4)
  1069. break;
  1070. pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
  1071. if (!pph)
  1072. return -ENOMEM;
  1073. }
  1074. error = nq_m_sync(num_gh, ghs, pph);
  1075. if (pph != tmp)
  1076. kfree(pph);
  1077. return error;
  1078. }
  1079. /**
  1080. * gfs2_glock_dq_m - release multiple glocks
  1081. * @num_gh: the number of structures
  1082. * @ghs: an array of struct gfs2_holder structures
  1083. *
  1084. */
  1085. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1086. {
  1087. unsigned int x;
  1088. for (x = 0; x < num_gh; x++)
  1089. gfs2_glock_dq(&ghs[x]);
  1090. }
  1091. /**
  1092. * gfs2_glock_dq_uninit_m - release multiple glocks
  1093. * @num_gh: the number of structures
  1094. * @ghs: an array of struct gfs2_holder structures
  1095. *
  1096. */
  1097. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1098. {
  1099. unsigned int x;
  1100. for (x = 0; x < num_gh; x++)
  1101. gfs2_glock_dq_uninit(&ghs[x]);
  1102. }
  1103. void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
  1104. {
  1105. unsigned long delay = 0;
  1106. unsigned long holdtime;
  1107. unsigned long now = jiffies;
  1108. gfs2_glock_hold(gl);
  1109. holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
  1110. if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
  1111. if (time_before(now, holdtime))
  1112. delay = holdtime - now;
  1113. if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
  1114. delay = gl->gl_ops->go_min_hold_time;
  1115. }
  1116. spin_lock(&gl->gl_spin);
  1117. handle_callback(gl, state, delay);
  1118. spin_unlock(&gl->gl_spin);
  1119. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  1120. gfs2_glock_put(gl);
  1121. }
  1122. /**
  1123. * gfs2_should_freeze - Figure out if glock should be frozen
  1124. * @gl: The glock in question
  1125. *
  1126. * Glocks are not frozen if (a) the result of the dlm operation is
  1127. * an error, (b) the locking operation was an unlock operation or
  1128. * (c) if there is a "noexp" flagged request anywhere in the queue
  1129. *
  1130. * Returns: 1 if freezing should occur, 0 otherwise
  1131. */
  1132. static int gfs2_should_freeze(const struct gfs2_glock *gl)
  1133. {
  1134. const struct gfs2_holder *gh;
  1135. if (gl->gl_reply & ~LM_OUT_ST_MASK)
  1136. return 0;
  1137. if (gl->gl_target == LM_ST_UNLOCKED)
  1138. return 0;
  1139. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1140. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1141. continue;
  1142. if (LM_FLAG_NOEXP & gh->gh_flags)
  1143. return 0;
  1144. }
  1145. return 1;
  1146. }
  1147. /**
  1148. * gfs2_glock_complete - Callback used by locking
  1149. * @gl: Pointer to the glock
  1150. * @ret: The return value from the dlm
  1151. *
  1152. * The gl_reply field is under the gl_spin lock so that it is ok
  1153. * to use a bitfield shared with other glock state fields.
  1154. */
  1155. void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
  1156. {
  1157. struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
  1158. spin_lock(&gl->gl_spin);
  1159. gl->gl_reply = ret;
  1160. if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
  1161. if (gfs2_should_freeze(gl)) {
  1162. set_bit(GLF_FROZEN, &gl->gl_flags);
  1163. spin_unlock(&gl->gl_spin);
  1164. return;
  1165. }
  1166. }
  1167. spin_unlock(&gl->gl_spin);
  1168. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1169. smp_wmb();
  1170. gfs2_glock_hold(gl);
  1171. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1172. gfs2_glock_put(gl);
  1173. }
  1174. static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  1175. {
  1176. struct gfs2_glock *gl;
  1177. int may_demote;
  1178. int nr_skipped = 0;
  1179. LIST_HEAD(skipped);
  1180. if (nr == 0)
  1181. goto out;
  1182. if (!(gfp_mask & __GFP_FS))
  1183. return -1;
  1184. spin_lock(&lru_lock);
  1185. while(nr && !list_empty(&lru_list)) {
  1186. gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
  1187. list_del_init(&gl->gl_lru);
  1188. atomic_dec(&lru_count);
  1189. /* Test for being demotable */
  1190. if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
  1191. gfs2_glock_hold(gl);
  1192. spin_unlock(&lru_lock);
  1193. spin_lock(&gl->gl_spin);
  1194. may_demote = demote_ok(gl);
  1195. if (may_demote) {
  1196. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1197. nr--;
  1198. }
  1199. clear_bit(GLF_LOCK, &gl->gl_flags);
  1200. smp_mb__after_clear_bit();
  1201. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1202. gfs2_glock_put_nolock(gl);
  1203. spin_unlock(&gl->gl_spin);
  1204. spin_lock(&lru_lock);
  1205. continue;
  1206. }
  1207. nr_skipped++;
  1208. list_add(&gl->gl_lru, &skipped);
  1209. }
  1210. list_splice(&skipped, &lru_list);
  1211. atomic_add(nr_skipped, &lru_count);
  1212. spin_unlock(&lru_lock);
  1213. out:
  1214. return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
  1215. }
  1216. static struct shrinker glock_shrinker = {
  1217. .shrink = gfs2_shrink_glock_memory,
  1218. .seeks = DEFAULT_SEEKS,
  1219. };
  1220. /**
  1221. * examine_bucket - Call a function for glock in a hash bucket
  1222. * @examiner: the function
  1223. * @sdp: the filesystem
  1224. * @bucket: the bucket
  1225. *
  1226. */
  1227. static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
  1228. unsigned int hash)
  1229. {
  1230. struct gfs2_glock *gl;
  1231. struct hlist_bl_head *head = &gl_hash_table[hash];
  1232. struct hlist_bl_node *pos;
  1233. rcu_read_lock();
  1234. hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
  1235. if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
  1236. examiner(gl);
  1237. }
  1238. rcu_read_unlock();
  1239. cond_resched();
  1240. }
  1241. static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  1242. {
  1243. unsigned x;
  1244. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1245. examine_bucket(examiner, sdp, x);
  1246. }
  1247. /**
  1248. * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  1249. * @gl: The glock to thaw
  1250. *
  1251. * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
  1252. * so this has to result in the ref count being dropped by one.
  1253. */
  1254. static void thaw_glock(struct gfs2_glock *gl)
  1255. {
  1256. if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  1257. return;
  1258. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1259. gfs2_glock_hold(gl);
  1260. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1261. gfs2_glock_put(gl);
  1262. }
  1263. /**
  1264. * clear_glock - look at a glock and see if we can free it from glock cache
  1265. * @gl: the glock to look at
  1266. *
  1267. */
  1268. static void clear_glock(struct gfs2_glock *gl)
  1269. {
  1270. spin_lock(&lru_lock);
  1271. if (!list_empty(&gl->gl_lru)) {
  1272. list_del_init(&gl->gl_lru);
  1273. atomic_dec(&lru_count);
  1274. }
  1275. spin_unlock(&lru_lock);
  1276. spin_lock(&gl->gl_spin);
  1277. if (gl->gl_state != LM_ST_UNLOCKED)
  1278. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1279. spin_unlock(&gl->gl_spin);
  1280. gfs2_glock_hold(gl);
  1281. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1282. gfs2_glock_put(gl);
  1283. }
  1284. /**
  1285. * gfs2_glock_thaw - Thaw any frozen glocks
  1286. * @sdp: The super block
  1287. *
  1288. */
  1289. void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  1290. {
  1291. glock_hash_walk(thaw_glock, sdp);
  1292. }
  1293. static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
  1294. {
  1295. int ret;
  1296. spin_lock(&gl->gl_spin);
  1297. ret = __dump_glock(seq, gl);
  1298. spin_unlock(&gl->gl_spin);
  1299. return ret;
  1300. }
  1301. static void dump_glock_func(struct gfs2_glock *gl)
  1302. {
  1303. dump_glock(NULL, gl);
  1304. }
  1305. /**
  1306. * gfs2_gl_hash_clear - Empty out the glock hash table
  1307. * @sdp: the filesystem
  1308. * @wait: wait until it's all gone
  1309. *
  1310. * Called when unmounting the filesystem.
  1311. */
  1312. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  1313. {
  1314. glock_hash_walk(clear_glock, sdp);
  1315. flush_workqueue(glock_workqueue);
  1316. wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
  1317. glock_hash_walk(dump_glock_func, sdp);
  1318. }
  1319. void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
  1320. {
  1321. struct gfs2_glock *gl = ip->i_gl;
  1322. int ret;
  1323. ret = gfs2_truncatei_resume(ip);
  1324. gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
  1325. spin_lock(&gl->gl_spin);
  1326. clear_bit(GLF_LOCK, &gl->gl_flags);
  1327. run_queue(gl, 1);
  1328. spin_unlock(&gl->gl_spin);
  1329. }
  1330. static const char *state2str(unsigned state)
  1331. {
  1332. switch(state) {
  1333. case LM_ST_UNLOCKED:
  1334. return "UN";
  1335. case LM_ST_SHARED:
  1336. return "SH";
  1337. case LM_ST_DEFERRED:
  1338. return "DF";
  1339. case LM_ST_EXCLUSIVE:
  1340. return "EX";
  1341. }
  1342. return "??";
  1343. }
  1344. static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
  1345. {
  1346. char *p = buf;
  1347. if (flags & LM_FLAG_TRY)
  1348. *p++ = 't';
  1349. if (flags & LM_FLAG_TRY_1CB)
  1350. *p++ = 'T';
  1351. if (flags & LM_FLAG_NOEXP)
  1352. *p++ = 'e';
  1353. if (flags & LM_FLAG_ANY)
  1354. *p++ = 'A';
  1355. if (flags & LM_FLAG_PRIORITY)
  1356. *p++ = 'p';
  1357. if (flags & GL_ASYNC)
  1358. *p++ = 'a';
  1359. if (flags & GL_EXACT)
  1360. *p++ = 'E';
  1361. if (flags & GL_NOCACHE)
  1362. *p++ = 'c';
  1363. if (test_bit(HIF_HOLDER, &iflags))
  1364. *p++ = 'H';
  1365. if (test_bit(HIF_WAIT, &iflags))
  1366. *p++ = 'W';
  1367. if (test_bit(HIF_FIRST, &iflags))
  1368. *p++ = 'F';
  1369. *p = 0;
  1370. return buf;
  1371. }
  1372. /**
  1373. * dump_holder - print information about a glock holder
  1374. * @seq: the seq_file struct
  1375. * @gh: the glock holder
  1376. *
  1377. * Returns: 0 on success, -ENOBUFS when we run out of space
  1378. */
  1379. static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
  1380. {
  1381. struct task_struct *gh_owner = NULL;
  1382. char flags_buf[32];
  1383. if (gh->gh_owner_pid)
  1384. gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
  1385. gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
  1386. state2str(gh->gh_state),
  1387. hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
  1388. gh->gh_error,
  1389. gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
  1390. gh_owner ? gh_owner->comm : "(ended)",
  1391. (void *)gh->gh_ip);
  1392. return 0;
  1393. }
  1394. static const char *gflags2str(char *buf, const unsigned long *gflags)
  1395. {
  1396. char *p = buf;
  1397. if (test_bit(GLF_LOCK, gflags))
  1398. *p++ = 'l';
  1399. if (test_bit(GLF_DEMOTE, gflags))
  1400. *p++ = 'D';
  1401. if (test_bit(GLF_PENDING_DEMOTE, gflags))
  1402. *p++ = 'd';
  1403. if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
  1404. *p++ = 'p';
  1405. if (test_bit(GLF_DIRTY, gflags))
  1406. *p++ = 'y';
  1407. if (test_bit(GLF_LFLUSH, gflags))
  1408. *p++ = 'f';
  1409. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
  1410. *p++ = 'i';
  1411. if (test_bit(GLF_REPLY_PENDING, gflags))
  1412. *p++ = 'r';
  1413. if (test_bit(GLF_INITIAL, gflags))
  1414. *p++ = 'I';
  1415. if (test_bit(GLF_FROZEN, gflags))
  1416. *p++ = 'F';
  1417. if (test_bit(GLF_QUEUED, gflags))
  1418. *p++ = 'q';
  1419. *p = 0;
  1420. return buf;
  1421. }
  1422. /**
  1423. * __dump_glock - print information about a glock
  1424. * @seq: The seq_file struct
  1425. * @gl: the glock
  1426. *
  1427. * The file format is as follows:
  1428. * One line per object, capital letters are used to indicate objects
  1429. * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
  1430. * other objects are indented by a single space and follow the glock to
  1431. * which they are related. Fields are indicated by lower case letters
  1432. * followed by a colon and the field value, except for strings which are in
  1433. * [] so that its possible to see if they are composed of spaces for
  1434. * example. The field's are n = number (id of the object), f = flags,
  1435. * t = type, s = state, r = refcount, e = error, p = pid.
  1436. *
  1437. * Returns: 0 on success, -ENOBUFS when we run out of space
  1438. */
  1439. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
  1440. {
  1441. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1442. unsigned long long dtime;
  1443. const struct gfs2_holder *gh;
  1444. char gflags_buf[32];
  1445. int error = 0;
  1446. dtime = jiffies - gl->gl_demote_time;
  1447. dtime *= 1000000/HZ; /* demote time in uSec */
  1448. if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
  1449. dtime = 0;
  1450. gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
  1451. state2str(gl->gl_state),
  1452. gl->gl_name.ln_type,
  1453. (unsigned long long)gl->gl_name.ln_number,
  1454. gflags2str(gflags_buf, &gl->gl_flags),
  1455. state2str(gl->gl_target),
  1456. state2str(gl->gl_demote_state), dtime,
  1457. atomic_read(&gl->gl_ail_count),
  1458. atomic_read(&gl->gl_ref));
  1459. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1460. error = dump_holder(seq, gh);
  1461. if (error)
  1462. goto out;
  1463. }
  1464. if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
  1465. error = glops->go_dump(seq, gl);
  1466. out:
  1467. return error;
  1468. }
  1469. int __init gfs2_glock_init(void)
  1470. {
  1471. unsigned i;
  1472. for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
  1473. INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
  1474. }
  1475. glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
  1476. WQ_HIGHPRI | WQ_FREEZEABLE, 0);
  1477. if (IS_ERR(glock_workqueue))
  1478. return PTR_ERR(glock_workqueue);
  1479. gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
  1480. WQ_MEM_RECLAIM | WQ_FREEZEABLE,
  1481. 0);
  1482. if (IS_ERR(gfs2_delete_workqueue)) {
  1483. destroy_workqueue(glock_workqueue);
  1484. return PTR_ERR(gfs2_delete_workqueue);
  1485. }
  1486. register_shrinker(&glock_shrinker);
  1487. return 0;
  1488. }
  1489. void gfs2_glock_exit(void)
  1490. {
  1491. unregister_shrinker(&glock_shrinker);
  1492. destroy_workqueue(glock_workqueue);
  1493. destroy_workqueue(gfs2_delete_workqueue);
  1494. }
  1495. static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
  1496. {
  1497. return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
  1498. struct gfs2_glock, gl_list);
  1499. }
  1500. static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
  1501. {
  1502. return hlist_bl_entry(rcu_dereference_raw(gl->gl_list.next),
  1503. struct gfs2_glock, gl_list);
  1504. }
  1505. static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
  1506. {
  1507. struct gfs2_glock *gl;
  1508. do {
  1509. gl = gi->gl;
  1510. if (gl) {
  1511. gi->gl = glock_hash_next(gl);
  1512. } else {
  1513. gi->gl = glock_hash_chain(gi->hash);
  1514. }
  1515. while (gi->gl == NULL) {
  1516. gi->hash++;
  1517. if (gi->hash >= GFS2_GL_HASH_SIZE) {
  1518. rcu_read_unlock();
  1519. return 1;
  1520. }
  1521. gi->gl = glock_hash_chain(gi->hash);
  1522. }
  1523. /* Skip entries for other sb and dead entries */
  1524. } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
  1525. return 0;
  1526. }
  1527. static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  1528. {
  1529. struct gfs2_glock_iter *gi = seq->private;
  1530. loff_t n = *pos;
  1531. gi->hash = 0;
  1532. rcu_read_lock();
  1533. do {
  1534. if (gfs2_glock_iter_next(gi))
  1535. return NULL;
  1536. } while (n--);
  1537. return gi->gl;
  1538. }
  1539. static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
  1540. loff_t *pos)
  1541. {
  1542. struct gfs2_glock_iter *gi = seq->private;
  1543. (*pos)++;
  1544. if (gfs2_glock_iter_next(gi))
  1545. return NULL;
  1546. return gi->gl;
  1547. }
  1548. static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  1549. {
  1550. struct gfs2_glock_iter *gi = seq->private;
  1551. if (gi->gl)
  1552. rcu_read_unlock();
  1553. gi->gl = NULL;
  1554. }
  1555. static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
  1556. {
  1557. return dump_glock(seq, iter_ptr);
  1558. }
  1559. static const struct seq_operations gfs2_glock_seq_ops = {
  1560. .start = gfs2_glock_seq_start,
  1561. .next = gfs2_glock_seq_next,
  1562. .stop = gfs2_glock_seq_stop,
  1563. .show = gfs2_glock_seq_show,
  1564. };
  1565. static int gfs2_debugfs_open(struct inode *inode, struct file *file)
  1566. {
  1567. int ret = seq_open_private(file, &gfs2_glock_seq_ops,
  1568. sizeof(struct gfs2_glock_iter));
  1569. if (ret == 0) {
  1570. struct seq_file *seq = file->private_data;
  1571. struct gfs2_glock_iter *gi = seq->private;
  1572. gi->sdp = inode->i_private;
  1573. }
  1574. return ret;
  1575. }
  1576. static const struct file_operations gfs2_debug_fops = {
  1577. .owner = THIS_MODULE,
  1578. .open = gfs2_debugfs_open,
  1579. .read = seq_read,
  1580. .llseek = seq_lseek,
  1581. .release = seq_release_private,
  1582. };
  1583. int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
  1584. {
  1585. sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
  1586. if (!sdp->debugfs_dir)
  1587. return -ENOMEM;
  1588. sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
  1589. S_IFREG | S_IRUGO,
  1590. sdp->debugfs_dir, sdp,
  1591. &gfs2_debug_fops);
  1592. if (!sdp->debugfs_dentry_glocks)
  1593. return -ENOMEM;
  1594. return 0;
  1595. }
  1596. void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
  1597. {
  1598. if (sdp && sdp->debugfs_dir) {
  1599. if (sdp->debugfs_dentry_glocks) {
  1600. debugfs_remove(sdp->debugfs_dentry_glocks);
  1601. sdp->debugfs_dentry_glocks = NULL;
  1602. }
  1603. debugfs_remove(sdp->debugfs_dir);
  1604. sdp->debugfs_dir = NULL;
  1605. }
  1606. }
  1607. int gfs2_register_debugfs(void)
  1608. {
  1609. gfs2_root = debugfs_create_dir("gfs2", NULL);
  1610. return gfs2_root ? 0 : -ENOMEM;
  1611. }
  1612. void gfs2_unregister_debugfs(void)
  1613. {
  1614. debugfs_remove(gfs2_root);
  1615. gfs2_root = NULL;
  1616. }