glock.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/delay.h>
  14. #include <linux/sort.h>
  15. #include <linux/jhash.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/gfs2_ondisk.h>
  18. #include <linux/list.h>
  19. #include <linux/wait.h>
  20. #include <linux/module.h>
  21. #include <asm/uaccess.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/rcupdate.h>
  29. #include <linux/rculist_bl.h>
  30. #include <linux/bit_spinlock.h>
  31. #include "gfs2.h"
  32. #include "incore.h"
  33. #include "glock.h"
  34. #include "glops.h"
  35. #include "inode.h"
  36. #include "lops.h"
  37. #include "meta_io.h"
  38. #include "quota.h"
  39. #include "super.h"
  40. #include "util.h"
  41. #include "bmap.h"
  42. #define CREATE_TRACE_POINTS
  43. #include "trace_gfs2.h"
  44. struct gfs2_glock_iter {
  45. int hash; /* hash bucket index */
  46. struct gfs2_sbd *sdp; /* incore superblock */
  47. struct gfs2_glock *gl; /* current glock struct */
  48. char string[512]; /* scratch space */
  49. };
  50. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  51. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  52. #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  53. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  54. static struct dentry *gfs2_root;
  55. static struct workqueue_struct *glock_workqueue;
  56. struct workqueue_struct *gfs2_delete_workqueue;
  57. static LIST_HEAD(lru_list);
  58. static atomic_t lru_count = ATOMIC_INIT(0);
  59. static DEFINE_SPINLOCK(lru_lock);
  60. #define GFS2_GL_HASH_SHIFT 15
  61. #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
  62. #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
  63. static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
  64. static struct dentry *gfs2_root;
  65. /**
  66. * gl_hash() - Turn glock number into hash bucket number
  67. * @lock: The glock number
  68. *
  69. * Returns: The number of the corresponding hash bucket
  70. */
  71. static unsigned int gl_hash(const struct gfs2_sbd *sdp,
  72. const struct lm_lockname *name)
  73. {
  74. unsigned int h;
  75. h = jhash(&name->ln_number, sizeof(u64), 0);
  76. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  77. h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
  78. h &= GFS2_GL_HASH_MASK;
  79. return h;
  80. }
  81. static inline void spin_lock_bucket(unsigned int hash)
  82. {
  83. struct hlist_bl_head *bl = &gl_hash_table[hash];
  84. bit_spin_lock(0, (unsigned long *)bl);
  85. }
  86. static inline void spin_unlock_bucket(unsigned int hash)
  87. {
  88. struct hlist_bl_head *bl = &gl_hash_table[hash];
  89. __bit_spin_unlock(0, (unsigned long *)bl);
  90. }
  91. static void gfs2_glock_dealloc(struct rcu_head *rcu)
  92. {
  93. struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
  94. if (gl->gl_ops->go_flags & GLOF_ASPACE)
  95. kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  96. else
  97. kmem_cache_free(gfs2_glock_cachep, gl);
  98. }
  99. void gfs2_glock_free(struct gfs2_glock *gl)
  100. {
  101. struct gfs2_sbd *sdp = gl->gl_sbd;
  102. call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
  103. if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  104. wake_up(&sdp->sd_glock_wait);
  105. }
  106. /**
  107. * gfs2_glock_hold() - increment reference count on glock
  108. * @gl: The glock to hold
  109. *
  110. */
  111. void gfs2_glock_hold(struct gfs2_glock *gl)
  112. {
  113. GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
  114. atomic_inc(&gl->gl_ref);
  115. }
  116. /**
  117. * demote_ok - Check to see if it's ok to unlock a glock
  118. * @gl: the glock
  119. *
  120. * Returns: 1 if it's ok
  121. */
  122. static int demote_ok(const struct gfs2_glock *gl)
  123. {
  124. const struct gfs2_glock_operations *glops = gl->gl_ops;
  125. /* assert_spin_locked(&gl->gl_spin); */
  126. if (gl->gl_state == LM_ST_UNLOCKED)
  127. return 0;
  128. if (test_bit(GLF_LFLUSH, &gl->gl_flags))
  129. return 0;
  130. if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
  131. !list_empty(&gl->gl_holders))
  132. return 0;
  133. if (glops->go_demote_ok)
  134. return glops->go_demote_ok(gl);
  135. return 1;
  136. }
  137. /**
  138. * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  139. * @gl: the glock
  140. *
  141. * If the glock is demotable, then we add it (or move it) to the end
  142. * of the glock LRU list.
  143. */
  144. static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  145. {
  146. if (demote_ok(gl)) {
  147. spin_lock(&lru_lock);
  148. if (!list_empty(&gl->gl_lru))
  149. list_del_init(&gl->gl_lru);
  150. else
  151. atomic_inc(&lru_count);
  152. list_add_tail(&gl->gl_lru, &lru_list);
  153. spin_unlock(&lru_lock);
  154. }
  155. }
  156. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  157. {
  158. spin_lock(&gl->gl_spin);
  159. __gfs2_glock_schedule_for_reclaim(gl);
  160. spin_unlock(&gl->gl_spin);
  161. }
  162. /**
  163. * gfs2_glock_put_nolock() - Decrement reference count on glock
  164. * @gl: The glock to put
  165. *
  166. * This function should only be used if the caller has its own reference
  167. * to the glock, in addition to the one it is dropping.
  168. */
  169. void gfs2_glock_put_nolock(struct gfs2_glock *gl)
  170. {
  171. if (atomic_dec_and_test(&gl->gl_ref))
  172. GLOCK_BUG_ON(gl, 1);
  173. }
  174. /**
  175. * gfs2_glock_put() - Decrement reference count on glock
  176. * @gl: The glock to put
  177. *
  178. */
  179. void gfs2_glock_put(struct gfs2_glock *gl)
  180. {
  181. struct gfs2_sbd *sdp = gl->gl_sbd;
  182. struct address_space *mapping = gfs2_glock2aspace(gl);
  183. if (atomic_dec_and_test(&gl->gl_ref)) {
  184. spin_lock_bucket(gl->gl_hash);
  185. hlist_bl_del_rcu(&gl->gl_list);
  186. spin_unlock_bucket(gl->gl_hash);
  187. spin_lock(&lru_lock);
  188. if (!list_empty(&gl->gl_lru)) {
  189. list_del_init(&gl->gl_lru);
  190. atomic_dec(&lru_count);
  191. }
  192. spin_unlock(&lru_lock);
  193. GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
  194. GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
  195. trace_gfs2_glock_put(gl);
  196. sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
  197. }
  198. }
  199. /**
  200. * search_bucket() - Find struct gfs2_glock by lock number
  201. * @bucket: the bucket to search
  202. * @name: The lock name
  203. *
  204. * Returns: NULL, or the struct gfs2_glock with the requested number
  205. */
  206. static struct gfs2_glock *search_bucket(unsigned int hash,
  207. const struct gfs2_sbd *sdp,
  208. const struct lm_lockname *name)
  209. {
  210. struct gfs2_glock *gl;
  211. struct hlist_bl_node *h;
  212. hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
  213. if (!lm_name_equal(&gl->gl_name, name))
  214. continue;
  215. if (gl->gl_sbd != sdp)
  216. continue;
  217. if (atomic_inc_not_zero(&gl->gl_ref))
  218. return gl;
  219. }
  220. return NULL;
  221. }
  222. /**
  223. * may_grant - check if its ok to grant a new lock
  224. * @gl: The glock
  225. * @gh: The lock request which we wish to grant
  226. *
  227. * Returns: true if its ok to grant the lock
  228. */
  229. static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
  230. {
  231. const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
  232. if ((gh->gh_state == LM_ST_EXCLUSIVE ||
  233. gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
  234. return 0;
  235. if (gl->gl_state == gh->gh_state)
  236. return 1;
  237. if (gh->gh_flags & GL_EXACT)
  238. return 0;
  239. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  240. if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
  241. return 1;
  242. if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
  243. return 1;
  244. }
  245. if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
  246. return 1;
  247. return 0;
  248. }
  249. static void gfs2_holder_wake(struct gfs2_holder *gh)
  250. {
  251. clear_bit(HIF_WAIT, &gh->gh_iflags);
  252. smp_mb__after_clear_bit();
  253. wake_up_bit(&gh->gh_iflags, HIF_WAIT);
  254. }
  255. /**
  256. * do_error - Something unexpected has happened during a lock request
  257. *
  258. */
  259. static inline void do_error(struct gfs2_glock *gl, const int ret)
  260. {
  261. struct gfs2_holder *gh, *tmp;
  262. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  263. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  264. continue;
  265. if (ret & LM_OUT_ERROR)
  266. gh->gh_error = -EIO;
  267. else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
  268. gh->gh_error = GLR_TRYFAILED;
  269. else
  270. continue;
  271. list_del_init(&gh->gh_list);
  272. trace_gfs2_glock_queue(gh, 0);
  273. gfs2_holder_wake(gh);
  274. }
  275. }
  276. /**
  277. * do_promote - promote as many requests as possible on the current queue
  278. * @gl: The glock
  279. *
  280. * Returns: 1 if there is a blocked holder at the head of the list, or 2
  281. * if a type specific operation is underway.
  282. */
  283. static int do_promote(struct gfs2_glock *gl)
  284. __releases(&gl->gl_spin)
  285. __acquires(&gl->gl_spin)
  286. {
  287. const struct gfs2_glock_operations *glops = gl->gl_ops;
  288. struct gfs2_holder *gh, *tmp;
  289. int ret;
  290. restart:
  291. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  292. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  293. continue;
  294. if (may_grant(gl, gh)) {
  295. if (gh->gh_list.prev == &gl->gl_holders &&
  296. glops->go_lock) {
  297. spin_unlock(&gl->gl_spin);
  298. /* FIXME: eliminate this eventually */
  299. ret = glops->go_lock(gh);
  300. spin_lock(&gl->gl_spin);
  301. if (ret) {
  302. if (ret == 1)
  303. return 2;
  304. gh->gh_error = ret;
  305. list_del_init(&gh->gh_list);
  306. trace_gfs2_glock_queue(gh, 0);
  307. gfs2_holder_wake(gh);
  308. goto restart;
  309. }
  310. set_bit(HIF_HOLDER, &gh->gh_iflags);
  311. trace_gfs2_promote(gh, 1);
  312. gfs2_holder_wake(gh);
  313. goto restart;
  314. }
  315. set_bit(HIF_HOLDER, &gh->gh_iflags);
  316. trace_gfs2_promote(gh, 0);
  317. gfs2_holder_wake(gh);
  318. continue;
  319. }
  320. if (gh->gh_list.prev == &gl->gl_holders)
  321. return 1;
  322. do_error(gl, 0);
  323. break;
  324. }
  325. return 0;
  326. }
  327. /**
  328. * find_first_waiter - find the first gh that's waiting for the glock
  329. * @gl: the glock
  330. */
  331. static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
  332. {
  333. struct gfs2_holder *gh;
  334. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  335. if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
  336. return gh;
  337. }
  338. return NULL;
  339. }
  340. /**
  341. * state_change - record that the glock is now in a different state
  342. * @gl: the glock
  343. * @new_state the new state
  344. *
  345. */
  346. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  347. {
  348. int held1, held2;
  349. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  350. held2 = (new_state != LM_ST_UNLOCKED);
  351. if (held1 != held2) {
  352. if (held2)
  353. gfs2_glock_hold(gl);
  354. else
  355. gfs2_glock_put_nolock(gl);
  356. }
  357. if (held1 && held2 && list_empty(&gl->gl_holders))
  358. clear_bit(GLF_QUEUED, &gl->gl_flags);
  359. gl->gl_state = new_state;
  360. gl->gl_tchange = jiffies;
  361. }
  362. static void gfs2_demote_wake(struct gfs2_glock *gl)
  363. {
  364. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  365. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  366. smp_mb__after_clear_bit();
  367. wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
  368. }
  369. /**
  370. * finish_xmote - The DLM has replied to one of our lock requests
  371. * @gl: The glock
  372. * @ret: The status from the DLM
  373. *
  374. */
  375. static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
  376. {
  377. const struct gfs2_glock_operations *glops = gl->gl_ops;
  378. struct gfs2_holder *gh;
  379. unsigned state = ret & LM_OUT_ST_MASK;
  380. int rv;
  381. spin_lock(&gl->gl_spin);
  382. trace_gfs2_glock_state_change(gl, state);
  383. state_change(gl, state);
  384. gh = find_first_waiter(gl);
  385. /* Demote to UN request arrived during demote to SH or DF */
  386. if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
  387. state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
  388. gl->gl_target = LM_ST_UNLOCKED;
  389. /* Check for state != intended state */
  390. if (unlikely(state != gl->gl_target)) {
  391. if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
  392. /* move to back of queue and try next entry */
  393. if (ret & LM_OUT_CANCELED) {
  394. if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
  395. list_move_tail(&gh->gh_list, &gl->gl_holders);
  396. gh = find_first_waiter(gl);
  397. gl->gl_target = gh->gh_state;
  398. goto retry;
  399. }
  400. /* Some error or failed "try lock" - report it */
  401. if ((ret & LM_OUT_ERROR) ||
  402. (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  403. gl->gl_target = gl->gl_state;
  404. do_error(gl, ret);
  405. goto out;
  406. }
  407. }
  408. switch(state) {
  409. /* Unlocked due to conversion deadlock, try again */
  410. case LM_ST_UNLOCKED:
  411. retry:
  412. do_xmote(gl, gh, gl->gl_target);
  413. break;
  414. /* Conversion fails, unlock and try again */
  415. case LM_ST_SHARED:
  416. case LM_ST_DEFERRED:
  417. do_xmote(gl, gh, LM_ST_UNLOCKED);
  418. break;
  419. default: /* Everything else */
  420. printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
  421. GLOCK_BUG_ON(gl, 1);
  422. }
  423. spin_unlock(&gl->gl_spin);
  424. return;
  425. }
  426. /* Fast path - we got what we asked for */
  427. if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
  428. gfs2_demote_wake(gl);
  429. if (state != LM_ST_UNLOCKED) {
  430. if (glops->go_xmote_bh) {
  431. spin_unlock(&gl->gl_spin);
  432. rv = glops->go_xmote_bh(gl, gh);
  433. spin_lock(&gl->gl_spin);
  434. if (rv) {
  435. do_error(gl, rv);
  436. goto out;
  437. }
  438. }
  439. rv = do_promote(gl);
  440. if (rv == 2)
  441. goto out_locked;
  442. }
  443. out:
  444. clear_bit(GLF_LOCK, &gl->gl_flags);
  445. out_locked:
  446. spin_unlock(&gl->gl_spin);
  447. }
  448. /**
  449. * do_xmote - Calls the DLM to change the state of a lock
  450. * @gl: The lock state
  451. * @gh: The holder (only for promotes)
  452. * @target: The target lock state
  453. *
  454. */
  455. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
  456. __releases(&gl->gl_spin)
  457. __acquires(&gl->gl_spin)
  458. {
  459. const struct gfs2_glock_operations *glops = gl->gl_ops;
  460. struct gfs2_sbd *sdp = gl->gl_sbd;
  461. unsigned int lck_flags = gh ? gh->gh_flags : 0;
  462. int ret;
  463. lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
  464. LM_FLAG_PRIORITY);
  465. GLOCK_BUG_ON(gl, gl->gl_state == target);
  466. GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
  467. if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
  468. glops->go_inval) {
  469. set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  470. do_error(gl, 0); /* Fail queued try locks */
  471. }
  472. gl->gl_req = target;
  473. spin_unlock(&gl->gl_spin);
  474. if (glops->go_xmote_th)
  475. glops->go_xmote_th(gl);
  476. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  477. glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
  478. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  479. gfs2_glock_hold(gl);
  480. if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
  481. gl->gl_state == LM_ST_DEFERRED) &&
  482. !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  483. lck_flags |= LM_FLAG_TRY_1CB;
  484. if (sdp->sd_lockstruct.ls_ops->lm_lock) {
  485. /* lock_dlm */
  486. ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
  487. GLOCK_BUG_ON(gl, ret);
  488. } else { /* lock_nolock */
  489. finish_xmote(gl, target);
  490. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  491. gfs2_glock_put(gl);
  492. }
  493. spin_lock(&gl->gl_spin);
  494. }
  495. /**
  496. * find_first_holder - find the first "holder" gh
  497. * @gl: the glock
  498. */
  499. static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
  500. {
  501. struct gfs2_holder *gh;
  502. if (!list_empty(&gl->gl_holders)) {
  503. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  504. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  505. return gh;
  506. }
  507. return NULL;
  508. }
  509. /**
  510. * run_queue - do all outstanding tasks related to a glock
  511. * @gl: The glock in question
  512. * @nonblock: True if we must not block in run_queue
  513. *
  514. */
  515. static void run_queue(struct gfs2_glock *gl, const int nonblock)
  516. __releases(&gl->gl_spin)
  517. __acquires(&gl->gl_spin)
  518. {
  519. struct gfs2_holder *gh = NULL;
  520. int ret;
  521. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  522. return;
  523. GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
  524. if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  525. gl->gl_demote_state != gl->gl_state) {
  526. if (find_first_holder(gl))
  527. goto out_unlock;
  528. if (nonblock)
  529. goto out_sched;
  530. set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
  531. GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
  532. gl->gl_target = gl->gl_demote_state;
  533. } else {
  534. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  535. gfs2_demote_wake(gl);
  536. ret = do_promote(gl);
  537. if (ret == 0)
  538. goto out_unlock;
  539. if (ret == 2)
  540. goto out;
  541. gh = find_first_waiter(gl);
  542. gl->gl_target = gh->gh_state;
  543. if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  544. do_error(gl, 0); /* Fail queued try locks */
  545. }
  546. do_xmote(gl, gh, gl->gl_target);
  547. out:
  548. return;
  549. out_sched:
  550. clear_bit(GLF_LOCK, &gl->gl_flags);
  551. smp_mb__after_clear_bit();
  552. gfs2_glock_hold(gl);
  553. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  554. gfs2_glock_put_nolock(gl);
  555. return;
  556. out_unlock:
  557. clear_bit(GLF_LOCK, &gl->gl_flags);
  558. smp_mb__after_clear_bit();
  559. return;
  560. }
  561. static void delete_work_func(struct work_struct *work)
  562. {
  563. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
  564. struct gfs2_sbd *sdp = gl->gl_sbd;
  565. struct gfs2_inode *ip;
  566. struct inode *inode;
  567. u64 no_addr = gl->gl_name.ln_number;
  568. ip = gl->gl_object;
  569. /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
  570. if (ip)
  571. inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
  572. else
  573. inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
  574. if (inode && !IS_ERR(inode)) {
  575. d_prune_aliases(inode);
  576. iput(inode);
  577. }
  578. gfs2_glock_put(gl);
  579. }
  580. static void glock_work_func(struct work_struct *work)
  581. {
  582. unsigned long delay = 0;
  583. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
  584. int drop_ref = 0;
  585. if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
  586. finish_xmote(gl, gl->gl_reply);
  587. drop_ref = 1;
  588. }
  589. spin_lock(&gl->gl_spin);
  590. if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  591. gl->gl_state != LM_ST_UNLOCKED &&
  592. gl->gl_demote_state != LM_ST_EXCLUSIVE) {
  593. unsigned long holdtime, now = jiffies;
  594. holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
  595. if (time_before(now, holdtime))
  596. delay = holdtime - now;
  597. set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
  598. }
  599. run_queue(gl, 0);
  600. spin_unlock(&gl->gl_spin);
  601. if (!delay ||
  602. queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  603. gfs2_glock_put(gl);
  604. if (drop_ref)
  605. gfs2_glock_put(gl);
  606. }
  607. /**
  608. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  609. * @sdp: The GFS2 superblock
  610. * @number: the lock number
  611. * @glops: The glock_operations to use
  612. * @create: If 0, don't create the glock if it doesn't exist
  613. * @glp: the glock is returned here
  614. *
  615. * This does not lock a glock, just finds/creates structures for one.
  616. *
  617. * Returns: errno
  618. */
  619. int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
  620. const struct gfs2_glock_operations *glops, int create,
  621. struct gfs2_glock **glp)
  622. {
  623. struct super_block *s = sdp->sd_vfs;
  624. struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
  625. struct gfs2_glock *gl, *tmp;
  626. unsigned int hash = gl_hash(sdp, &name);
  627. struct address_space *mapping;
  628. struct kmem_cache *cachep;
  629. rcu_read_lock();
  630. gl = search_bucket(hash, sdp, &name);
  631. rcu_read_unlock();
  632. *glp = gl;
  633. if (gl)
  634. return 0;
  635. if (!create)
  636. return -ENOENT;
  637. if (glops->go_flags & GLOF_ASPACE)
  638. cachep = gfs2_glock_aspace_cachep;
  639. else
  640. cachep = gfs2_glock_cachep;
  641. gl = kmem_cache_alloc(cachep, GFP_KERNEL);
  642. if (!gl)
  643. return -ENOMEM;
  644. atomic_inc(&sdp->sd_glock_disposal);
  645. gl->gl_flags = 0;
  646. gl->gl_name = name;
  647. atomic_set(&gl->gl_ref, 1);
  648. gl->gl_state = LM_ST_UNLOCKED;
  649. gl->gl_target = LM_ST_UNLOCKED;
  650. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  651. gl->gl_hash = hash;
  652. gl->gl_ops = glops;
  653. snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
  654. memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
  655. gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
  656. gl->gl_tchange = jiffies;
  657. gl->gl_object = NULL;
  658. gl->gl_sbd = sdp;
  659. INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  660. INIT_WORK(&gl->gl_delete, delete_work_func);
  661. mapping = gfs2_glock2aspace(gl);
  662. if (mapping) {
  663. mapping->a_ops = &gfs2_meta_aops;
  664. mapping->host = s->s_bdev->bd_inode;
  665. mapping->flags = 0;
  666. mapping_set_gfp_mask(mapping, GFP_NOFS);
  667. mapping->assoc_mapping = NULL;
  668. mapping->backing_dev_info = s->s_bdi;
  669. mapping->writeback_index = 0;
  670. }
  671. spin_lock_bucket(hash);
  672. tmp = search_bucket(hash, sdp, &name);
  673. if (tmp) {
  674. spin_unlock_bucket(hash);
  675. kmem_cache_free(cachep, gl);
  676. atomic_dec(&sdp->sd_glock_disposal);
  677. gl = tmp;
  678. } else {
  679. hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
  680. spin_unlock_bucket(hash);
  681. }
  682. *glp = gl;
  683. return 0;
  684. }
  685. /**
  686. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  687. * @gl: the glock
  688. * @state: the state we're requesting
  689. * @flags: the modifier flags
  690. * @gh: the holder structure
  691. *
  692. */
  693. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  694. struct gfs2_holder *gh)
  695. {
  696. INIT_LIST_HEAD(&gh->gh_list);
  697. gh->gh_gl = gl;
  698. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  699. gh->gh_owner_pid = get_pid(task_pid(current));
  700. gh->gh_state = state;
  701. gh->gh_flags = flags;
  702. gh->gh_error = 0;
  703. gh->gh_iflags = 0;
  704. gfs2_glock_hold(gl);
  705. }
  706. /**
  707. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  708. * @state: the state we're requesting
  709. * @flags: the modifier flags
  710. * @gh: the holder structure
  711. *
  712. * Don't mess with the glock.
  713. *
  714. */
  715. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  716. {
  717. gh->gh_state = state;
  718. gh->gh_flags = flags;
  719. gh->gh_iflags = 0;
  720. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  721. if (gh->gh_owner_pid)
  722. put_pid(gh->gh_owner_pid);
  723. gh->gh_owner_pid = get_pid(task_pid(current));
  724. }
  725. /**
  726. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  727. * @gh: the holder structure
  728. *
  729. */
  730. void gfs2_holder_uninit(struct gfs2_holder *gh)
  731. {
  732. put_pid(gh->gh_owner_pid);
  733. gfs2_glock_put(gh->gh_gl);
  734. gh->gh_gl = NULL;
  735. gh->gh_ip = 0;
  736. }
  737. /**
  738. * gfs2_glock_holder_wait
  739. * @word: unused
  740. *
  741. * This function and gfs2_glock_demote_wait both show up in the WCHAN
  742. * field. Thus I've separated these otherwise identical functions in
  743. * order to be more informative to the user.
  744. */
  745. static int gfs2_glock_holder_wait(void *word)
  746. {
  747. schedule();
  748. return 0;
  749. }
  750. static int gfs2_glock_demote_wait(void *word)
  751. {
  752. schedule();
  753. return 0;
  754. }
  755. static void wait_on_holder(struct gfs2_holder *gh)
  756. {
  757. might_sleep();
  758. wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
  759. }
  760. static void wait_on_demote(struct gfs2_glock *gl)
  761. {
  762. might_sleep();
  763. wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
  764. }
  765. /**
  766. * handle_callback - process a demote request
  767. * @gl: the glock
  768. * @state: the state the caller wants us to change to
  769. *
  770. * There are only two requests that we are going to see in actual
  771. * practise: LM_ST_SHARED and LM_ST_UNLOCKED
  772. */
  773. static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  774. unsigned long delay)
  775. {
  776. int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
  777. set_bit(bit, &gl->gl_flags);
  778. if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
  779. gl->gl_demote_state = state;
  780. gl->gl_demote_time = jiffies;
  781. } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
  782. gl->gl_demote_state != state) {
  783. gl->gl_demote_state = LM_ST_UNLOCKED;
  784. }
  785. if (gl->gl_ops->go_callback)
  786. gl->gl_ops->go_callback(gl);
  787. trace_gfs2_demote_rq(gl);
  788. }
  789. /**
  790. * gfs2_glock_wait - wait on a glock acquisition
  791. * @gh: the glock holder
  792. *
  793. * Returns: 0 on success
  794. */
  795. int gfs2_glock_wait(struct gfs2_holder *gh)
  796. {
  797. wait_on_holder(gh);
  798. return gh->gh_error;
  799. }
  800. void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
  801. {
  802. struct va_format vaf;
  803. va_list args;
  804. va_start(args, fmt);
  805. if (seq) {
  806. struct gfs2_glock_iter *gi = seq->private;
  807. vsprintf(gi->string, fmt, args);
  808. seq_printf(seq, gi->string);
  809. } else {
  810. vaf.fmt = fmt;
  811. vaf.va = &args;
  812. printk(KERN_ERR " %pV", &vaf);
  813. }
  814. va_end(args);
  815. }
  816. /**
  817. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  818. * @gh: the holder structure to add
  819. *
  820. * Eventually we should move the recursive locking trap to a
  821. * debugging option or something like that. This is the fast
  822. * path and needs to have the minimum number of distractions.
  823. *
  824. */
  825. static inline void add_to_queue(struct gfs2_holder *gh)
  826. __releases(&gl->gl_spin)
  827. __acquires(&gl->gl_spin)
  828. {
  829. struct gfs2_glock *gl = gh->gh_gl;
  830. struct gfs2_sbd *sdp = gl->gl_sbd;
  831. struct list_head *insert_pt = NULL;
  832. struct gfs2_holder *gh2;
  833. int try_lock = 0;
  834. BUG_ON(gh->gh_owner_pid == NULL);
  835. if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
  836. BUG();
  837. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  838. if (test_bit(GLF_LOCK, &gl->gl_flags))
  839. try_lock = 1;
  840. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  841. goto fail;
  842. }
  843. list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
  844. if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
  845. (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
  846. goto trap_recursive;
  847. if (try_lock &&
  848. !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
  849. !may_grant(gl, gh)) {
  850. fail:
  851. gh->gh_error = GLR_TRYFAILED;
  852. gfs2_holder_wake(gh);
  853. return;
  854. }
  855. if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
  856. continue;
  857. if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
  858. insert_pt = &gh2->gh_list;
  859. }
  860. set_bit(GLF_QUEUED, &gl->gl_flags);
  861. trace_gfs2_glock_queue(gh, 1);
  862. if (likely(insert_pt == NULL)) {
  863. list_add_tail(&gh->gh_list, &gl->gl_holders);
  864. if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
  865. goto do_cancel;
  866. return;
  867. }
  868. list_add_tail(&gh->gh_list, insert_pt);
  869. do_cancel:
  870. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  871. if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
  872. spin_unlock(&gl->gl_spin);
  873. if (sdp->sd_lockstruct.ls_ops->lm_cancel)
  874. sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
  875. spin_lock(&gl->gl_spin);
  876. }
  877. return;
  878. trap_recursive:
  879. print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
  880. printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
  881. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  882. gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
  883. print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
  884. printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
  885. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  886. gh->gh_gl->gl_name.ln_type, gh->gh_state);
  887. __dump_glock(NULL, gl);
  888. BUG();
  889. }
  890. /**
  891. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  892. * @gh: the holder structure
  893. *
  894. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  895. *
  896. * Returns: 0, GLR_TRYFAILED, or errno on failure
  897. */
  898. int gfs2_glock_nq(struct gfs2_holder *gh)
  899. {
  900. struct gfs2_glock *gl = gh->gh_gl;
  901. struct gfs2_sbd *sdp = gl->gl_sbd;
  902. int error = 0;
  903. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  904. return -EIO;
  905. spin_lock(&gl->gl_spin);
  906. add_to_queue(gh);
  907. if ((LM_FLAG_NOEXP & gh->gh_flags) &&
  908. test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  909. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  910. run_queue(gl, 1);
  911. spin_unlock(&gl->gl_spin);
  912. if (!(gh->gh_flags & GL_ASYNC))
  913. error = gfs2_glock_wait(gh);
  914. return error;
  915. }
  916. /**
  917. * gfs2_glock_poll - poll to see if an async request has been completed
  918. * @gh: the holder
  919. *
  920. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  921. */
  922. int gfs2_glock_poll(struct gfs2_holder *gh)
  923. {
  924. return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
  925. }
  926. /**
  927. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  928. * @gh: the glock holder
  929. *
  930. */
  931. void gfs2_glock_dq(struct gfs2_holder *gh)
  932. {
  933. struct gfs2_glock *gl = gh->gh_gl;
  934. const struct gfs2_glock_operations *glops = gl->gl_ops;
  935. unsigned delay = 0;
  936. int fast_path = 0;
  937. spin_lock(&gl->gl_spin);
  938. if (gh->gh_flags & GL_NOCACHE)
  939. handle_callback(gl, LM_ST_UNLOCKED, 0);
  940. list_del_init(&gh->gh_list);
  941. if (find_first_holder(gl) == NULL) {
  942. if (glops->go_unlock) {
  943. GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
  944. spin_unlock(&gl->gl_spin);
  945. glops->go_unlock(gh);
  946. spin_lock(&gl->gl_spin);
  947. clear_bit(GLF_LOCK, &gl->gl_flags);
  948. }
  949. if (list_empty(&gl->gl_holders) &&
  950. !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  951. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  952. fast_path = 1;
  953. }
  954. __gfs2_glock_schedule_for_reclaim(gl);
  955. trace_gfs2_glock_queue(gh, 0);
  956. spin_unlock(&gl->gl_spin);
  957. if (likely(fast_path))
  958. return;
  959. gfs2_glock_hold(gl);
  960. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  961. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  962. delay = gl->gl_ops->go_min_hold_time;
  963. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  964. gfs2_glock_put(gl);
  965. }
  966. void gfs2_glock_dq_wait(struct gfs2_holder *gh)
  967. {
  968. struct gfs2_glock *gl = gh->gh_gl;
  969. gfs2_glock_dq(gh);
  970. wait_on_demote(gl);
  971. }
  972. /**
  973. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  974. * @gh: the holder structure
  975. *
  976. */
  977. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  978. {
  979. gfs2_glock_dq(gh);
  980. gfs2_holder_uninit(gh);
  981. }
  982. /**
  983. * gfs2_glock_nq_num - acquire a glock based on lock number
  984. * @sdp: the filesystem
  985. * @number: the lock number
  986. * @glops: the glock operations for the type of glock
  987. * @state: the state to acquire the glock in
  988. * @flags: modifier flags for the aquisition
  989. * @gh: the struct gfs2_holder
  990. *
  991. * Returns: errno
  992. */
  993. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
  994. const struct gfs2_glock_operations *glops,
  995. unsigned int state, int flags, struct gfs2_holder *gh)
  996. {
  997. struct gfs2_glock *gl;
  998. int error;
  999. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1000. if (!error) {
  1001. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1002. gfs2_glock_put(gl);
  1003. }
  1004. return error;
  1005. }
  1006. /**
  1007. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1008. * @arg_a: the first structure
  1009. * @arg_b: the second structure
  1010. *
  1011. */
  1012. static int glock_compare(const void *arg_a, const void *arg_b)
  1013. {
  1014. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1015. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1016. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1017. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1018. if (a->ln_number > b->ln_number)
  1019. return 1;
  1020. if (a->ln_number < b->ln_number)
  1021. return -1;
  1022. BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
  1023. return 0;
  1024. }
  1025. /**
  1026. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1027. * @num_gh: the number of structures
  1028. * @ghs: an array of struct gfs2_holder structures
  1029. *
  1030. * Returns: 0 on success (all glocks acquired),
  1031. * errno on failure (no glocks acquired)
  1032. */
  1033. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1034. struct gfs2_holder **p)
  1035. {
  1036. unsigned int x;
  1037. int error = 0;
  1038. for (x = 0; x < num_gh; x++)
  1039. p[x] = &ghs[x];
  1040. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1041. for (x = 0; x < num_gh; x++) {
  1042. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1043. error = gfs2_glock_nq(p[x]);
  1044. if (error) {
  1045. while (x--)
  1046. gfs2_glock_dq(p[x]);
  1047. break;
  1048. }
  1049. }
  1050. return error;
  1051. }
  1052. /**
  1053. * gfs2_glock_nq_m - acquire multiple glocks
  1054. * @num_gh: the number of structures
  1055. * @ghs: an array of struct gfs2_holder structures
  1056. *
  1057. *
  1058. * Returns: 0 on success (all glocks acquired),
  1059. * errno on failure (no glocks acquired)
  1060. */
  1061. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1062. {
  1063. struct gfs2_holder *tmp[4];
  1064. struct gfs2_holder **pph = tmp;
  1065. int error = 0;
  1066. switch(num_gh) {
  1067. case 0:
  1068. return 0;
  1069. case 1:
  1070. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1071. return gfs2_glock_nq(ghs);
  1072. default:
  1073. if (num_gh <= 4)
  1074. break;
  1075. pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
  1076. if (!pph)
  1077. return -ENOMEM;
  1078. }
  1079. error = nq_m_sync(num_gh, ghs, pph);
  1080. if (pph != tmp)
  1081. kfree(pph);
  1082. return error;
  1083. }
  1084. /**
  1085. * gfs2_glock_dq_m - release multiple glocks
  1086. * @num_gh: the number of structures
  1087. * @ghs: an array of struct gfs2_holder structures
  1088. *
  1089. */
  1090. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1091. {
  1092. while (num_gh--)
  1093. gfs2_glock_dq(&ghs[num_gh]);
  1094. }
  1095. /**
  1096. * gfs2_glock_dq_uninit_m - release multiple glocks
  1097. * @num_gh: the number of structures
  1098. * @ghs: an array of struct gfs2_holder structures
  1099. *
  1100. */
  1101. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1102. {
  1103. while (num_gh--)
  1104. gfs2_glock_dq_uninit(&ghs[num_gh]);
  1105. }
  1106. void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
  1107. {
  1108. unsigned long delay = 0;
  1109. unsigned long holdtime;
  1110. unsigned long now = jiffies;
  1111. gfs2_glock_hold(gl);
  1112. holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
  1113. if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
  1114. if (time_before(now, holdtime))
  1115. delay = holdtime - now;
  1116. if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
  1117. delay = gl->gl_ops->go_min_hold_time;
  1118. }
  1119. spin_lock(&gl->gl_spin);
  1120. handle_callback(gl, state, delay);
  1121. spin_unlock(&gl->gl_spin);
  1122. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  1123. gfs2_glock_put(gl);
  1124. }
  1125. /**
  1126. * gfs2_should_freeze - Figure out if glock should be frozen
  1127. * @gl: The glock in question
  1128. *
  1129. * Glocks are not frozen if (a) the result of the dlm operation is
  1130. * an error, (b) the locking operation was an unlock operation or
  1131. * (c) if there is a "noexp" flagged request anywhere in the queue
  1132. *
  1133. * Returns: 1 if freezing should occur, 0 otherwise
  1134. */
  1135. static int gfs2_should_freeze(const struct gfs2_glock *gl)
  1136. {
  1137. const struct gfs2_holder *gh;
  1138. if (gl->gl_reply & ~LM_OUT_ST_MASK)
  1139. return 0;
  1140. if (gl->gl_target == LM_ST_UNLOCKED)
  1141. return 0;
  1142. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1143. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1144. continue;
  1145. if (LM_FLAG_NOEXP & gh->gh_flags)
  1146. return 0;
  1147. }
  1148. return 1;
  1149. }
  1150. /**
  1151. * gfs2_glock_complete - Callback used by locking
  1152. * @gl: Pointer to the glock
  1153. * @ret: The return value from the dlm
  1154. *
  1155. * The gl_reply field is under the gl_spin lock so that it is ok
  1156. * to use a bitfield shared with other glock state fields.
  1157. */
  1158. void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
  1159. {
  1160. struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
  1161. spin_lock(&gl->gl_spin);
  1162. gl->gl_reply = ret;
  1163. if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
  1164. if (gfs2_should_freeze(gl)) {
  1165. set_bit(GLF_FROZEN, &gl->gl_flags);
  1166. spin_unlock(&gl->gl_spin);
  1167. return;
  1168. }
  1169. }
  1170. spin_unlock(&gl->gl_spin);
  1171. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1172. smp_wmb();
  1173. gfs2_glock_hold(gl);
  1174. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1175. gfs2_glock_put(gl);
  1176. }
  1177. static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  1178. {
  1179. struct gfs2_glock *gl;
  1180. int may_demote;
  1181. int nr_skipped = 0;
  1182. LIST_HEAD(skipped);
  1183. if (nr == 0)
  1184. goto out;
  1185. if (!(gfp_mask & __GFP_FS))
  1186. return -1;
  1187. spin_lock(&lru_lock);
  1188. while(nr && !list_empty(&lru_list)) {
  1189. gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
  1190. list_del_init(&gl->gl_lru);
  1191. atomic_dec(&lru_count);
  1192. /* Test for being demotable */
  1193. if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
  1194. gfs2_glock_hold(gl);
  1195. spin_unlock(&lru_lock);
  1196. spin_lock(&gl->gl_spin);
  1197. may_demote = demote_ok(gl);
  1198. if (may_demote) {
  1199. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1200. nr--;
  1201. }
  1202. clear_bit(GLF_LOCK, &gl->gl_flags);
  1203. smp_mb__after_clear_bit();
  1204. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1205. gfs2_glock_put_nolock(gl);
  1206. spin_unlock(&gl->gl_spin);
  1207. spin_lock(&lru_lock);
  1208. continue;
  1209. }
  1210. nr_skipped++;
  1211. list_add(&gl->gl_lru, &skipped);
  1212. }
  1213. list_splice(&skipped, &lru_list);
  1214. atomic_add(nr_skipped, &lru_count);
  1215. spin_unlock(&lru_lock);
  1216. out:
  1217. return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
  1218. }
  1219. static struct shrinker glock_shrinker = {
  1220. .shrink = gfs2_shrink_glock_memory,
  1221. .seeks = DEFAULT_SEEKS,
  1222. };
  1223. /**
  1224. * examine_bucket - Call a function for glock in a hash bucket
  1225. * @examiner: the function
  1226. * @sdp: the filesystem
  1227. * @bucket: the bucket
  1228. *
  1229. */
  1230. static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
  1231. unsigned int hash)
  1232. {
  1233. struct gfs2_glock *gl;
  1234. struct hlist_bl_head *head = &gl_hash_table[hash];
  1235. struct hlist_bl_node *pos;
  1236. rcu_read_lock();
  1237. hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
  1238. if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
  1239. examiner(gl);
  1240. }
  1241. rcu_read_unlock();
  1242. cond_resched();
  1243. }
  1244. static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  1245. {
  1246. unsigned x;
  1247. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1248. examine_bucket(examiner, sdp, x);
  1249. }
  1250. /**
  1251. * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  1252. * @gl: The glock to thaw
  1253. *
  1254. * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
  1255. * so this has to result in the ref count being dropped by one.
  1256. */
  1257. static void thaw_glock(struct gfs2_glock *gl)
  1258. {
  1259. if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  1260. return;
  1261. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1262. gfs2_glock_hold(gl);
  1263. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1264. gfs2_glock_put(gl);
  1265. }
  1266. /**
  1267. * clear_glock - look at a glock and see if we can free it from glock cache
  1268. * @gl: the glock to look at
  1269. *
  1270. */
  1271. static void clear_glock(struct gfs2_glock *gl)
  1272. {
  1273. spin_lock(&lru_lock);
  1274. if (!list_empty(&gl->gl_lru)) {
  1275. list_del_init(&gl->gl_lru);
  1276. atomic_dec(&lru_count);
  1277. }
  1278. spin_unlock(&lru_lock);
  1279. spin_lock(&gl->gl_spin);
  1280. if (gl->gl_state != LM_ST_UNLOCKED)
  1281. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1282. spin_unlock(&gl->gl_spin);
  1283. gfs2_glock_hold(gl);
  1284. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1285. gfs2_glock_put(gl);
  1286. }
  1287. /**
  1288. * gfs2_glock_thaw - Thaw any frozen glocks
  1289. * @sdp: The super block
  1290. *
  1291. */
  1292. void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  1293. {
  1294. glock_hash_walk(thaw_glock, sdp);
  1295. }
  1296. static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
  1297. {
  1298. int ret;
  1299. spin_lock(&gl->gl_spin);
  1300. ret = __dump_glock(seq, gl);
  1301. spin_unlock(&gl->gl_spin);
  1302. return ret;
  1303. }
  1304. static void dump_glock_func(struct gfs2_glock *gl)
  1305. {
  1306. dump_glock(NULL, gl);
  1307. }
  1308. /**
  1309. * gfs2_gl_hash_clear - Empty out the glock hash table
  1310. * @sdp: the filesystem
  1311. * @wait: wait until it's all gone
  1312. *
  1313. * Called when unmounting the filesystem.
  1314. */
  1315. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  1316. {
  1317. glock_hash_walk(clear_glock, sdp);
  1318. flush_workqueue(glock_workqueue);
  1319. wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
  1320. glock_hash_walk(dump_glock_func, sdp);
  1321. }
  1322. void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
  1323. {
  1324. struct gfs2_glock *gl = ip->i_gl;
  1325. int ret;
  1326. ret = gfs2_truncatei_resume(ip);
  1327. gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
  1328. spin_lock(&gl->gl_spin);
  1329. clear_bit(GLF_LOCK, &gl->gl_flags);
  1330. run_queue(gl, 1);
  1331. spin_unlock(&gl->gl_spin);
  1332. }
  1333. static const char *state2str(unsigned state)
  1334. {
  1335. switch(state) {
  1336. case LM_ST_UNLOCKED:
  1337. return "UN";
  1338. case LM_ST_SHARED:
  1339. return "SH";
  1340. case LM_ST_DEFERRED:
  1341. return "DF";
  1342. case LM_ST_EXCLUSIVE:
  1343. return "EX";
  1344. }
  1345. return "??";
  1346. }
  1347. static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
  1348. {
  1349. char *p = buf;
  1350. if (flags & LM_FLAG_TRY)
  1351. *p++ = 't';
  1352. if (flags & LM_FLAG_TRY_1CB)
  1353. *p++ = 'T';
  1354. if (flags & LM_FLAG_NOEXP)
  1355. *p++ = 'e';
  1356. if (flags & LM_FLAG_ANY)
  1357. *p++ = 'A';
  1358. if (flags & LM_FLAG_PRIORITY)
  1359. *p++ = 'p';
  1360. if (flags & GL_ASYNC)
  1361. *p++ = 'a';
  1362. if (flags & GL_EXACT)
  1363. *p++ = 'E';
  1364. if (flags & GL_NOCACHE)
  1365. *p++ = 'c';
  1366. if (test_bit(HIF_HOLDER, &iflags))
  1367. *p++ = 'H';
  1368. if (test_bit(HIF_WAIT, &iflags))
  1369. *p++ = 'W';
  1370. if (test_bit(HIF_FIRST, &iflags))
  1371. *p++ = 'F';
  1372. *p = 0;
  1373. return buf;
  1374. }
  1375. /**
  1376. * dump_holder - print information about a glock holder
  1377. * @seq: the seq_file struct
  1378. * @gh: the glock holder
  1379. *
  1380. * Returns: 0 on success, -ENOBUFS when we run out of space
  1381. */
  1382. static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
  1383. {
  1384. struct task_struct *gh_owner = NULL;
  1385. char flags_buf[32];
  1386. if (gh->gh_owner_pid)
  1387. gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
  1388. gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
  1389. state2str(gh->gh_state),
  1390. hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
  1391. gh->gh_error,
  1392. gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
  1393. gh_owner ? gh_owner->comm : "(ended)",
  1394. (void *)gh->gh_ip);
  1395. return 0;
  1396. }
  1397. static const char *gflags2str(char *buf, const unsigned long *gflags)
  1398. {
  1399. char *p = buf;
  1400. if (test_bit(GLF_LOCK, gflags))
  1401. *p++ = 'l';
  1402. if (test_bit(GLF_DEMOTE, gflags))
  1403. *p++ = 'D';
  1404. if (test_bit(GLF_PENDING_DEMOTE, gflags))
  1405. *p++ = 'd';
  1406. if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
  1407. *p++ = 'p';
  1408. if (test_bit(GLF_DIRTY, gflags))
  1409. *p++ = 'y';
  1410. if (test_bit(GLF_LFLUSH, gflags))
  1411. *p++ = 'f';
  1412. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
  1413. *p++ = 'i';
  1414. if (test_bit(GLF_REPLY_PENDING, gflags))
  1415. *p++ = 'r';
  1416. if (test_bit(GLF_INITIAL, gflags))
  1417. *p++ = 'I';
  1418. if (test_bit(GLF_FROZEN, gflags))
  1419. *p++ = 'F';
  1420. if (test_bit(GLF_QUEUED, gflags))
  1421. *p++ = 'q';
  1422. *p = 0;
  1423. return buf;
  1424. }
  1425. /**
  1426. * __dump_glock - print information about a glock
  1427. * @seq: The seq_file struct
  1428. * @gl: the glock
  1429. *
  1430. * The file format is as follows:
  1431. * One line per object, capital letters are used to indicate objects
  1432. * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
  1433. * other objects are indented by a single space and follow the glock to
  1434. * which they are related. Fields are indicated by lower case letters
  1435. * followed by a colon and the field value, except for strings which are in
  1436. * [] so that its possible to see if they are composed of spaces for
  1437. * example. The field's are n = number (id of the object), f = flags,
  1438. * t = type, s = state, r = refcount, e = error, p = pid.
  1439. *
  1440. * Returns: 0 on success, -ENOBUFS when we run out of space
  1441. */
  1442. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
  1443. {
  1444. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1445. unsigned long long dtime;
  1446. const struct gfs2_holder *gh;
  1447. char gflags_buf[32];
  1448. int error = 0;
  1449. dtime = jiffies - gl->gl_demote_time;
  1450. dtime *= 1000000/HZ; /* demote time in uSec */
  1451. if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
  1452. dtime = 0;
  1453. gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
  1454. state2str(gl->gl_state),
  1455. gl->gl_name.ln_type,
  1456. (unsigned long long)gl->gl_name.ln_number,
  1457. gflags2str(gflags_buf, &gl->gl_flags),
  1458. state2str(gl->gl_target),
  1459. state2str(gl->gl_demote_state), dtime,
  1460. atomic_read(&gl->gl_ail_count),
  1461. atomic_read(&gl->gl_ref));
  1462. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1463. error = dump_holder(seq, gh);
  1464. if (error)
  1465. goto out;
  1466. }
  1467. if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
  1468. error = glops->go_dump(seq, gl);
  1469. out:
  1470. return error;
  1471. }
  1472. int __init gfs2_glock_init(void)
  1473. {
  1474. unsigned i;
  1475. for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
  1476. INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
  1477. }
  1478. glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
  1479. WQ_HIGHPRI | WQ_FREEZABLE, 0);
  1480. if (IS_ERR(glock_workqueue))
  1481. return PTR_ERR(glock_workqueue);
  1482. gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
  1483. WQ_MEM_RECLAIM | WQ_FREEZABLE,
  1484. 0);
  1485. if (IS_ERR(gfs2_delete_workqueue)) {
  1486. destroy_workqueue(glock_workqueue);
  1487. return PTR_ERR(gfs2_delete_workqueue);
  1488. }
  1489. register_shrinker(&glock_shrinker);
  1490. return 0;
  1491. }
  1492. void gfs2_glock_exit(void)
  1493. {
  1494. unregister_shrinker(&glock_shrinker);
  1495. destroy_workqueue(glock_workqueue);
  1496. destroy_workqueue(gfs2_delete_workqueue);
  1497. }
  1498. static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
  1499. {
  1500. return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
  1501. struct gfs2_glock, gl_list);
  1502. }
  1503. static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
  1504. {
  1505. return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
  1506. struct gfs2_glock, gl_list);
  1507. }
  1508. static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
  1509. {
  1510. struct gfs2_glock *gl;
  1511. do {
  1512. gl = gi->gl;
  1513. if (gl) {
  1514. gi->gl = glock_hash_next(gl);
  1515. } else {
  1516. gi->gl = glock_hash_chain(gi->hash);
  1517. }
  1518. while (gi->gl == NULL) {
  1519. gi->hash++;
  1520. if (gi->hash >= GFS2_GL_HASH_SIZE) {
  1521. rcu_read_unlock();
  1522. return 1;
  1523. }
  1524. gi->gl = glock_hash_chain(gi->hash);
  1525. }
  1526. /* Skip entries for other sb and dead entries */
  1527. } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
  1528. return 0;
  1529. }
  1530. static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  1531. {
  1532. struct gfs2_glock_iter *gi = seq->private;
  1533. loff_t n = *pos;
  1534. gi->hash = 0;
  1535. rcu_read_lock();
  1536. do {
  1537. if (gfs2_glock_iter_next(gi))
  1538. return NULL;
  1539. } while (n--);
  1540. return gi->gl;
  1541. }
  1542. static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
  1543. loff_t *pos)
  1544. {
  1545. struct gfs2_glock_iter *gi = seq->private;
  1546. (*pos)++;
  1547. if (gfs2_glock_iter_next(gi))
  1548. return NULL;
  1549. return gi->gl;
  1550. }
  1551. static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  1552. {
  1553. struct gfs2_glock_iter *gi = seq->private;
  1554. if (gi->gl)
  1555. rcu_read_unlock();
  1556. gi->gl = NULL;
  1557. }
  1558. static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
  1559. {
  1560. return dump_glock(seq, iter_ptr);
  1561. }
  1562. static const struct seq_operations gfs2_glock_seq_ops = {
  1563. .start = gfs2_glock_seq_start,
  1564. .next = gfs2_glock_seq_next,
  1565. .stop = gfs2_glock_seq_stop,
  1566. .show = gfs2_glock_seq_show,
  1567. };
  1568. static int gfs2_debugfs_open(struct inode *inode, struct file *file)
  1569. {
  1570. int ret = seq_open_private(file, &gfs2_glock_seq_ops,
  1571. sizeof(struct gfs2_glock_iter));
  1572. if (ret == 0) {
  1573. struct seq_file *seq = file->private_data;
  1574. struct gfs2_glock_iter *gi = seq->private;
  1575. gi->sdp = inode->i_private;
  1576. }
  1577. return ret;
  1578. }
  1579. static const struct file_operations gfs2_debug_fops = {
  1580. .owner = THIS_MODULE,
  1581. .open = gfs2_debugfs_open,
  1582. .read = seq_read,
  1583. .llseek = seq_lseek,
  1584. .release = seq_release_private,
  1585. };
  1586. int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
  1587. {
  1588. sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
  1589. if (!sdp->debugfs_dir)
  1590. return -ENOMEM;
  1591. sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
  1592. S_IFREG | S_IRUGO,
  1593. sdp->debugfs_dir, sdp,
  1594. &gfs2_debug_fops);
  1595. if (!sdp->debugfs_dentry_glocks)
  1596. return -ENOMEM;
  1597. return 0;
  1598. }
  1599. void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
  1600. {
  1601. if (sdp && sdp->debugfs_dir) {
  1602. if (sdp->debugfs_dentry_glocks) {
  1603. debugfs_remove(sdp->debugfs_dentry_glocks);
  1604. sdp->debugfs_dentry_glocks = NULL;
  1605. }
  1606. debugfs_remove(sdp->debugfs_dir);
  1607. sdp->debugfs_dir = NULL;
  1608. }
  1609. }
  1610. int gfs2_register_debugfs(void)
  1611. {
  1612. gfs2_root = debugfs_create_dir("gfs2", NULL);
  1613. return gfs2_root ? 0 : -ENOMEM;
  1614. }
  1615. void gfs2_unregister_debugfs(void)
  1616. {
  1617. debugfs_remove(gfs2_root);
  1618. gfs2_root = NULL;
  1619. }