glock.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/delay.h>
  14. #include <linux/sort.h>
  15. #include <linux/jhash.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/gfs2_ondisk.h>
  18. #include <linux/list.h>
  19. #include <linux/wait.h>
  20. #include <linux/module.h>
  21. #include <asm/uaccess.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/rcupdate.h>
  29. #include <linux/rculist_bl.h>
  30. #include <linux/bit_spinlock.h>
  31. #include <linux/percpu.h>
  32. #include "gfs2.h"
  33. #include "incore.h"
  34. #include "glock.h"
  35. #include "glops.h"
  36. #include "inode.h"
  37. #include "lops.h"
  38. #include "meta_io.h"
  39. #include "quota.h"
  40. #include "super.h"
  41. #include "util.h"
  42. #include "bmap.h"
  43. #define CREATE_TRACE_POINTS
  44. #include "trace_gfs2.h"
  45. struct gfs2_glock_iter {
  46. int hash; /* hash bucket index */
  47. unsigned nhash; /* Index within current bucket */
  48. struct gfs2_sbd *sdp; /* incore superblock */
  49. struct gfs2_glock *gl; /* current glock struct */
  50. loff_t last_pos; /* last position */
  51. };
  52. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  53. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
  54. #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
  55. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  56. static struct dentry *gfs2_root;
  57. static struct workqueue_struct *glock_workqueue;
  58. struct workqueue_struct *gfs2_delete_workqueue;
  59. static LIST_HEAD(lru_list);
  60. static atomic_t lru_count = ATOMIC_INIT(0);
  61. static DEFINE_SPINLOCK(lru_lock);
  62. #define GFS2_GL_HASH_SHIFT 15
  63. #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
  64. #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
  65. static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
  66. static struct dentry *gfs2_root;
  67. /**
  68. * gl_hash() - Turn glock number into hash bucket number
  69. * @lock: The glock number
  70. *
  71. * Returns: The number of the corresponding hash bucket
  72. */
  73. static unsigned int gl_hash(const struct gfs2_sbd *sdp,
  74. const struct lm_lockname *name)
  75. {
  76. unsigned int h;
  77. h = jhash(&name->ln_number, sizeof(u64), 0);
  78. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  79. h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
  80. h &= GFS2_GL_HASH_MASK;
  81. return h;
  82. }
  83. static inline void spin_lock_bucket(unsigned int hash)
  84. {
  85. hlist_bl_lock(&gl_hash_table[hash]);
  86. }
  87. static inline void spin_unlock_bucket(unsigned int hash)
  88. {
  89. hlist_bl_unlock(&gl_hash_table[hash]);
  90. }
  91. static void gfs2_glock_dealloc(struct rcu_head *rcu)
  92. {
  93. struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
  94. if (gl->gl_ops->go_flags & GLOF_ASPACE)
  95. kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  96. else
  97. kmem_cache_free(gfs2_glock_cachep, gl);
  98. }
  99. void gfs2_glock_free(struct gfs2_glock *gl)
  100. {
  101. struct gfs2_sbd *sdp = gl->gl_sbd;
  102. call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
  103. if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  104. wake_up(&sdp->sd_glock_wait);
  105. }
  106. /**
  107. * gfs2_glock_hold() - increment reference count on glock
  108. * @gl: The glock to hold
  109. *
  110. */
  111. void gfs2_glock_hold(struct gfs2_glock *gl)
  112. {
  113. GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
  114. atomic_inc(&gl->gl_ref);
  115. }
  116. /**
  117. * demote_ok - Check to see if it's ok to unlock a glock
  118. * @gl: the glock
  119. *
  120. * Returns: 1 if it's ok
  121. */
  122. static int demote_ok(const struct gfs2_glock *gl)
  123. {
  124. const struct gfs2_glock_operations *glops = gl->gl_ops;
  125. if (gl->gl_state == LM_ST_UNLOCKED)
  126. return 0;
  127. if (!list_empty(&gl->gl_holders))
  128. return 0;
  129. if (glops->go_demote_ok)
  130. return glops->go_demote_ok(gl);
  131. return 1;
  132. }
  133. void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
  134. {
  135. spin_lock(&lru_lock);
  136. if (!list_empty(&gl->gl_lru))
  137. list_del_init(&gl->gl_lru);
  138. else
  139. atomic_inc(&lru_count);
  140. list_add_tail(&gl->gl_lru, &lru_list);
  141. set_bit(GLF_LRU, &gl->gl_flags);
  142. spin_unlock(&lru_lock);
  143. }
  144. static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
  145. {
  146. if (!list_empty(&gl->gl_lru)) {
  147. list_del_init(&gl->gl_lru);
  148. atomic_dec(&lru_count);
  149. clear_bit(GLF_LRU, &gl->gl_flags);
  150. }
  151. }
  152. static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
  153. {
  154. spin_lock(&lru_lock);
  155. __gfs2_glock_remove_from_lru(gl);
  156. spin_unlock(&lru_lock);
  157. }
  158. /**
  159. * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  160. * @gl: the glock
  161. *
  162. * If the glock is demotable, then we add it (or move it) to the end
  163. * of the glock LRU list.
  164. */
  165. static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  166. {
  167. if (demote_ok(gl))
  168. gfs2_glock_add_to_lru(gl);
  169. }
  170. /**
  171. * gfs2_glock_put_nolock() - Decrement reference count on glock
  172. * @gl: The glock to put
  173. *
  174. * This function should only be used if the caller has its own reference
  175. * to the glock, in addition to the one it is dropping.
  176. */
  177. void gfs2_glock_put_nolock(struct gfs2_glock *gl)
  178. {
  179. if (atomic_dec_and_test(&gl->gl_ref))
  180. GLOCK_BUG_ON(gl, 1);
  181. }
  182. /**
  183. * gfs2_glock_put() - Decrement reference count on glock
  184. * @gl: The glock to put
  185. *
  186. */
  187. void gfs2_glock_put(struct gfs2_glock *gl)
  188. {
  189. struct gfs2_sbd *sdp = gl->gl_sbd;
  190. struct address_space *mapping = gfs2_glock2aspace(gl);
  191. if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
  192. __gfs2_glock_remove_from_lru(gl);
  193. spin_unlock(&lru_lock);
  194. spin_lock_bucket(gl->gl_hash);
  195. hlist_bl_del_rcu(&gl->gl_list);
  196. spin_unlock_bucket(gl->gl_hash);
  197. GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
  198. GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
  199. trace_gfs2_glock_put(gl);
  200. sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
  201. }
  202. }
  203. /**
  204. * search_bucket() - Find struct gfs2_glock by lock number
  205. * @bucket: the bucket to search
  206. * @name: The lock name
  207. *
  208. * Returns: NULL, or the struct gfs2_glock with the requested number
  209. */
  210. static struct gfs2_glock *search_bucket(unsigned int hash,
  211. const struct gfs2_sbd *sdp,
  212. const struct lm_lockname *name)
  213. {
  214. struct gfs2_glock *gl;
  215. struct hlist_bl_node *h;
  216. hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
  217. if (!lm_name_equal(&gl->gl_name, name))
  218. continue;
  219. if (gl->gl_sbd != sdp)
  220. continue;
  221. if (atomic_inc_not_zero(&gl->gl_ref))
  222. return gl;
  223. }
  224. return NULL;
  225. }
  226. /**
  227. * may_grant - check if its ok to grant a new lock
  228. * @gl: The glock
  229. * @gh: The lock request which we wish to grant
  230. *
  231. * Returns: true if its ok to grant the lock
  232. */
  233. static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
  234. {
  235. const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
  236. if ((gh->gh_state == LM_ST_EXCLUSIVE ||
  237. gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
  238. return 0;
  239. if (gl->gl_state == gh->gh_state)
  240. return 1;
  241. if (gh->gh_flags & GL_EXACT)
  242. return 0;
  243. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  244. if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
  245. return 1;
  246. if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
  247. return 1;
  248. }
  249. if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
  250. return 1;
  251. return 0;
  252. }
  253. static void gfs2_holder_wake(struct gfs2_holder *gh)
  254. {
  255. clear_bit(HIF_WAIT, &gh->gh_iflags);
  256. smp_mb__after_clear_bit();
  257. wake_up_bit(&gh->gh_iflags, HIF_WAIT);
  258. }
  259. /**
  260. * do_error - Something unexpected has happened during a lock request
  261. *
  262. */
  263. static inline void do_error(struct gfs2_glock *gl, const int ret)
  264. {
  265. struct gfs2_holder *gh, *tmp;
  266. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  267. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  268. continue;
  269. if (ret & LM_OUT_ERROR)
  270. gh->gh_error = -EIO;
  271. else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
  272. gh->gh_error = GLR_TRYFAILED;
  273. else
  274. continue;
  275. list_del_init(&gh->gh_list);
  276. trace_gfs2_glock_queue(gh, 0);
  277. gfs2_holder_wake(gh);
  278. }
  279. }
  280. /**
  281. * do_promote - promote as many requests as possible on the current queue
  282. * @gl: The glock
  283. *
  284. * Returns: 1 if there is a blocked holder at the head of the list, or 2
  285. * if a type specific operation is underway.
  286. */
  287. static int do_promote(struct gfs2_glock *gl)
  288. __releases(&gl->gl_spin)
  289. __acquires(&gl->gl_spin)
  290. {
  291. const struct gfs2_glock_operations *glops = gl->gl_ops;
  292. struct gfs2_holder *gh, *tmp;
  293. int ret;
  294. restart:
  295. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  296. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  297. continue;
  298. if (may_grant(gl, gh)) {
  299. if (gh->gh_list.prev == &gl->gl_holders &&
  300. glops->go_lock) {
  301. spin_unlock(&gl->gl_spin);
  302. /* FIXME: eliminate this eventually */
  303. ret = glops->go_lock(gh);
  304. spin_lock(&gl->gl_spin);
  305. if (ret) {
  306. if (ret == 1)
  307. return 2;
  308. gh->gh_error = ret;
  309. list_del_init(&gh->gh_list);
  310. trace_gfs2_glock_queue(gh, 0);
  311. gfs2_holder_wake(gh);
  312. goto restart;
  313. }
  314. set_bit(HIF_HOLDER, &gh->gh_iflags);
  315. trace_gfs2_promote(gh, 1);
  316. gfs2_holder_wake(gh);
  317. goto restart;
  318. }
  319. set_bit(HIF_HOLDER, &gh->gh_iflags);
  320. trace_gfs2_promote(gh, 0);
  321. gfs2_holder_wake(gh);
  322. continue;
  323. }
  324. if (gh->gh_list.prev == &gl->gl_holders)
  325. return 1;
  326. do_error(gl, 0);
  327. break;
  328. }
  329. return 0;
  330. }
  331. /**
  332. * find_first_waiter - find the first gh that's waiting for the glock
  333. * @gl: the glock
  334. */
  335. static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
  336. {
  337. struct gfs2_holder *gh;
  338. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  339. if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
  340. return gh;
  341. }
  342. return NULL;
  343. }
  344. /**
  345. * state_change - record that the glock is now in a different state
  346. * @gl: the glock
  347. * @new_state the new state
  348. *
  349. */
  350. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  351. {
  352. int held1, held2;
  353. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  354. held2 = (new_state != LM_ST_UNLOCKED);
  355. if (held1 != held2) {
  356. if (held2)
  357. gfs2_glock_hold(gl);
  358. else
  359. gfs2_glock_put_nolock(gl);
  360. }
  361. if (held1 && held2 && list_empty(&gl->gl_holders))
  362. clear_bit(GLF_QUEUED, &gl->gl_flags);
  363. if (new_state != gl->gl_target)
  364. /* shorten our minimum hold time */
  365. gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
  366. GL_GLOCK_MIN_HOLD);
  367. gl->gl_state = new_state;
  368. gl->gl_tchange = jiffies;
  369. }
  370. static void gfs2_demote_wake(struct gfs2_glock *gl)
  371. {
  372. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  373. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  374. smp_mb__after_clear_bit();
  375. wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
  376. }
  377. /**
  378. * finish_xmote - The DLM has replied to one of our lock requests
  379. * @gl: The glock
  380. * @ret: The status from the DLM
  381. *
  382. */
  383. static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
  384. {
  385. const struct gfs2_glock_operations *glops = gl->gl_ops;
  386. struct gfs2_holder *gh;
  387. unsigned state = ret & LM_OUT_ST_MASK;
  388. int rv;
  389. spin_lock(&gl->gl_spin);
  390. trace_gfs2_glock_state_change(gl, state);
  391. state_change(gl, state);
  392. gh = find_first_waiter(gl);
  393. /* Demote to UN request arrived during demote to SH or DF */
  394. if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
  395. state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
  396. gl->gl_target = LM_ST_UNLOCKED;
  397. /* Check for state != intended state */
  398. if (unlikely(state != gl->gl_target)) {
  399. if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
  400. /* move to back of queue and try next entry */
  401. if (ret & LM_OUT_CANCELED) {
  402. if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
  403. list_move_tail(&gh->gh_list, &gl->gl_holders);
  404. gh = find_first_waiter(gl);
  405. gl->gl_target = gh->gh_state;
  406. goto retry;
  407. }
  408. /* Some error or failed "try lock" - report it */
  409. if ((ret & LM_OUT_ERROR) ||
  410. (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  411. gl->gl_target = gl->gl_state;
  412. do_error(gl, ret);
  413. goto out;
  414. }
  415. }
  416. switch(state) {
  417. /* Unlocked due to conversion deadlock, try again */
  418. case LM_ST_UNLOCKED:
  419. retry:
  420. do_xmote(gl, gh, gl->gl_target);
  421. break;
  422. /* Conversion fails, unlock and try again */
  423. case LM_ST_SHARED:
  424. case LM_ST_DEFERRED:
  425. do_xmote(gl, gh, LM_ST_UNLOCKED);
  426. break;
  427. default: /* Everything else */
  428. printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
  429. GLOCK_BUG_ON(gl, 1);
  430. }
  431. spin_unlock(&gl->gl_spin);
  432. return;
  433. }
  434. /* Fast path - we got what we asked for */
  435. if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
  436. gfs2_demote_wake(gl);
  437. if (state != LM_ST_UNLOCKED) {
  438. if (glops->go_xmote_bh) {
  439. spin_unlock(&gl->gl_spin);
  440. rv = glops->go_xmote_bh(gl, gh);
  441. spin_lock(&gl->gl_spin);
  442. if (rv) {
  443. do_error(gl, rv);
  444. goto out;
  445. }
  446. }
  447. rv = do_promote(gl);
  448. if (rv == 2)
  449. goto out_locked;
  450. }
  451. out:
  452. clear_bit(GLF_LOCK, &gl->gl_flags);
  453. out_locked:
  454. spin_unlock(&gl->gl_spin);
  455. }
  456. /**
  457. * do_xmote - Calls the DLM to change the state of a lock
  458. * @gl: The lock state
  459. * @gh: The holder (only for promotes)
  460. * @target: The target lock state
  461. *
  462. */
  463. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
  464. __releases(&gl->gl_spin)
  465. __acquires(&gl->gl_spin)
  466. {
  467. const struct gfs2_glock_operations *glops = gl->gl_ops;
  468. struct gfs2_sbd *sdp = gl->gl_sbd;
  469. unsigned int lck_flags = gh ? gh->gh_flags : 0;
  470. int ret;
  471. lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
  472. LM_FLAG_PRIORITY);
  473. GLOCK_BUG_ON(gl, gl->gl_state == target);
  474. GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
  475. if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
  476. glops->go_inval) {
  477. set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  478. do_error(gl, 0); /* Fail queued try locks */
  479. }
  480. gl->gl_req = target;
  481. set_bit(GLF_BLOCKING, &gl->gl_flags);
  482. if ((gl->gl_req == LM_ST_UNLOCKED) ||
  483. (gl->gl_state == LM_ST_EXCLUSIVE) ||
  484. (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
  485. clear_bit(GLF_BLOCKING, &gl->gl_flags);
  486. spin_unlock(&gl->gl_spin);
  487. if (glops->go_xmote_th)
  488. glops->go_xmote_th(gl);
  489. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  490. glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
  491. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  492. gfs2_glock_hold(gl);
  493. if (sdp->sd_lockstruct.ls_ops->lm_lock) {
  494. /* lock_dlm */
  495. ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
  496. GLOCK_BUG_ON(gl, ret);
  497. } else { /* lock_nolock */
  498. finish_xmote(gl, target);
  499. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  500. gfs2_glock_put(gl);
  501. }
  502. spin_lock(&gl->gl_spin);
  503. }
  504. /**
  505. * find_first_holder - find the first "holder" gh
  506. * @gl: the glock
  507. */
  508. static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
  509. {
  510. struct gfs2_holder *gh;
  511. if (!list_empty(&gl->gl_holders)) {
  512. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  513. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  514. return gh;
  515. }
  516. return NULL;
  517. }
  518. /**
  519. * run_queue - do all outstanding tasks related to a glock
  520. * @gl: The glock in question
  521. * @nonblock: True if we must not block in run_queue
  522. *
  523. */
  524. static void run_queue(struct gfs2_glock *gl, const int nonblock)
  525. __releases(&gl->gl_spin)
  526. __acquires(&gl->gl_spin)
  527. {
  528. struct gfs2_holder *gh = NULL;
  529. int ret;
  530. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  531. return;
  532. GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
  533. if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  534. gl->gl_demote_state != gl->gl_state) {
  535. if (find_first_holder(gl))
  536. goto out_unlock;
  537. if (nonblock)
  538. goto out_sched;
  539. set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
  540. GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
  541. gl->gl_target = gl->gl_demote_state;
  542. } else {
  543. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  544. gfs2_demote_wake(gl);
  545. ret = do_promote(gl);
  546. if (ret == 0)
  547. goto out_unlock;
  548. if (ret == 2)
  549. goto out;
  550. gh = find_first_waiter(gl);
  551. gl->gl_target = gh->gh_state;
  552. if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  553. do_error(gl, 0); /* Fail queued try locks */
  554. }
  555. do_xmote(gl, gh, gl->gl_target);
  556. out:
  557. return;
  558. out_sched:
  559. clear_bit(GLF_LOCK, &gl->gl_flags);
  560. smp_mb__after_clear_bit();
  561. gfs2_glock_hold(gl);
  562. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  563. gfs2_glock_put_nolock(gl);
  564. return;
  565. out_unlock:
  566. clear_bit(GLF_LOCK, &gl->gl_flags);
  567. smp_mb__after_clear_bit();
  568. return;
  569. }
  570. static void delete_work_func(struct work_struct *work)
  571. {
  572. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
  573. struct gfs2_sbd *sdp = gl->gl_sbd;
  574. struct gfs2_inode *ip;
  575. struct inode *inode;
  576. u64 no_addr = gl->gl_name.ln_number;
  577. ip = gl->gl_object;
  578. /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
  579. if (ip)
  580. inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
  581. else
  582. inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
  583. if (inode && !IS_ERR(inode)) {
  584. d_prune_aliases(inode);
  585. iput(inode);
  586. }
  587. gfs2_glock_put(gl);
  588. }
  589. static void glock_work_func(struct work_struct *work)
  590. {
  591. unsigned long delay = 0;
  592. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
  593. int drop_ref = 0;
  594. if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
  595. finish_xmote(gl, gl->gl_reply);
  596. drop_ref = 1;
  597. }
  598. spin_lock(&gl->gl_spin);
  599. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  600. gl->gl_state != LM_ST_UNLOCKED &&
  601. gl->gl_demote_state != LM_ST_EXCLUSIVE) {
  602. unsigned long holdtime, now = jiffies;
  603. holdtime = gl->gl_tchange + gl->gl_hold_time;
  604. if (time_before(now, holdtime))
  605. delay = holdtime - now;
  606. if (!delay) {
  607. clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
  608. set_bit(GLF_DEMOTE, &gl->gl_flags);
  609. }
  610. }
  611. run_queue(gl, 0);
  612. spin_unlock(&gl->gl_spin);
  613. if (!delay)
  614. gfs2_glock_put(gl);
  615. else {
  616. if (gl->gl_name.ln_type != LM_TYPE_INODE)
  617. delay = 0;
  618. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  619. gfs2_glock_put(gl);
  620. }
  621. if (drop_ref)
  622. gfs2_glock_put(gl);
  623. }
  624. /**
  625. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  626. * @sdp: The GFS2 superblock
  627. * @number: the lock number
  628. * @glops: The glock_operations to use
  629. * @create: If 0, don't create the glock if it doesn't exist
  630. * @glp: the glock is returned here
  631. *
  632. * This does not lock a glock, just finds/creates structures for one.
  633. *
  634. * Returns: errno
  635. */
  636. int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
  637. const struct gfs2_glock_operations *glops, int create,
  638. struct gfs2_glock **glp)
  639. {
  640. struct super_block *s = sdp->sd_vfs;
  641. struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
  642. struct gfs2_glock *gl, *tmp;
  643. unsigned int hash = gl_hash(sdp, &name);
  644. struct address_space *mapping;
  645. struct kmem_cache *cachep;
  646. rcu_read_lock();
  647. gl = search_bucket(hash, sdp, &name);
  648. rcu_read_unlock();
  649. *glp = gl;
  650. if (gl)
  651. return 0;
  652. if (!create)
  653. return -ENOENT;
  654. if (glops->go_flags & GLOF_ASPACE)
  655. cachep = gfs2_glock_aspace_cachep;
  656. else
  657. cachep = gfs2_glock_cachep;
  658. gl = kmem_cache_alloc(cachep, GFP_KERNEL);
  659. if (!gl)
  660. return -ENOMEM;
  661. atomic_inc(&sdp->sd_glock_disposal);
  662. gl->gl_sbd = sdp;
  663. gl->gl_flags = 0;
  664. gl->gl_name = name;
  665. atomic_set(&gl->gl_ref, 1);
  666. gl->gl_state = LM_ST_UNLOCKED;
  667. gl->gl_target = LM_ST_UNLOCKED;
  668. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  669. gl->gl_hash = hash;
  670. gl->gl_ops = glops;
  671. gl->gl_dstamp = ktime_set(0, 0);
  672. preempt_disable();
  673. /* We use the global stats to estimate the initial per-glock stats */
  674. gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
  675. preempt_enable();
  676. gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
  677. gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
  678. memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
  679. memset(gl->gl_lvb, 0, 32 * sizeof(char));
  680. gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
  681. gl->gl_tchange = jiffies;
  682. gl->gl_object = NULL;
  683. gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
  684. INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  685. INIT_WORK(&gl->gl_delete, delete_work_func);
  686. mapping = gfs2_glock2aspace(gl);
  687. if (mapping) {
  688. mapping->a_ops = &gfs2_meta_aops;
  689. mapping->host = s->s_bdev->bd_inode;
  690. mapping->flags = 0;
  691. mapping_set_gfp_mask(mapping, GFP_NOFS);
  692. mapping->assoc_mapping = NULL;
  693. mapping->backing_dev_info = s->s_bdi;
  694. mapping->writeback_index = 0;
  695. }
  696. spin_lock_bucket(hash);
  697. tmp = search_bucket(hash, sdp, &name);
  698. if (tmp) {
  699. spin_unlock_bucket(hash);
  700. kmem_cache_free(cachep, gl);
  701. atomic_dec(&sdp->sd_glock_disposal);
  702. gl = tmp;
  703. } else {
  704. hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
  705. spin_unlock_bucket(hash);
  706. }
  707. *glp = gl;
  708. return 0;
  709. }
  710. /**
  711. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  712. * @gl: the glock
  713. * @state: the state we're requesting
  714. * @flags: the modifier flags
  715. * @gh: the holder structure
  716. *
  717. */
  718. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  719. struct gfs2_holder *gh)
  720. {
  721. INIT_LIST_HEAD(&gh->gh_list);
  722. gh->gh_gl = gl;
  723. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  724. gh->gh_owner_pid = get_pid(task_pid(current));
  725. gh->gh_state = state;
  726. gh->gh_flags = flags;
  727. gh->gh_error = 0;
  728. gh->gh_iflags = 0;
  729. gfs2_glock_hold(gl);
  730. }
  731. /**
  732. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  733. * @state: the state we're requesting
  734. * @flags: the modifier flags
  735. * @gh: the holder structure
  736. *
  737. * Don't mess with the glock.
  738. *
  739. */
  740. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  741. {
  742. gh->gh_state = state;
  743. gh->gh_flags = flags;
  744. gh->gh_iflags = 0;
  745. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  746. if (gh->gh_owner_pid)
  747. put_pid(gh->gh_owner_pid);
  748. gh->gh_owner_pid = get_pid(task_pid(current));
  749. }
  750. /**
  751. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  752. * @gh: the holder structure
  753. *
  754. */
  755. void gfs2_holder_uninit(struct gfs2_holder *gh)
  756. {
  757. put_pid(gh->gh_owner_pid);
  758. gfs2_glock_put(gh->gh_gl);
  759. gh->gh_gl = NULL;
  760. gh->gh_ip = 0;
  761. }
  762. /**
  763. * gfs2_glock_holder_wait
  764. * @word: unused
  765. *
  766. * This function and gfs2_glock_demote_wait both show up in the WCHAN
  767. * field. Thus I've separated these otherwise identical functions in
  768. * order to be more informative to the user.
  769. */
  770. static int gfs2_glock_holder_wait(void *word)
  771. {
  772. schedule();
  773. return 0;
  774. }
  775. static int gfs2_glock_demote_wait(void *word)
  776. {
  777. schedule();
  778. return 0;
  779. }
  780. static void wait_on_holder(struct gfs2_holder *gh)
  781. {
  782. unsigned long time1 = jiffies;
  783. might_sleep();
  784. wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
  785. if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
  786. /* Lengthen the minimum hold time. */
  787. gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
  788. GL_GLOCK_HOLD_INCR,
  789. GL_GLOCK_MAX_HOLD);
  790. }
  791. static void wait_on_demote(struct gfs2_glock *gl)
  792. {
  793. might_sleep();
  794. wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
  795. }
  796. /**
  797. * handle_callback - process a demote request
  798. * @gl: the glock
  799. * @state: the state the caller wants us to change to
  800. *
  801. * There are only two requests that we are going to see in actual
  802. * practise: LM_ST_SHARED and LM_ST_UNLOCKED
  803. */
  804. static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  805. unsigned long delay)
  806. {
  807. int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
  808. set_bit(bit, &gl->gl_flags);
  809. if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
  810. gl->gl_demote_state = state;
  811. gl->gl_demote_time = jiffies;
  812. } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
  813. gl->gl_demote_state != state) {
  814. gl->gl_demote_state = LM_ST_UNLOCKED;
  815. }
  816. if (gl->gl_ops->go_callback)
  817. gl->gl_ops->go_callback(gl);
  818. trace_gfs2_demote_rq(gl);
  819. }
  820. /**
  821. * gfs2_glock_wait - wait on a glock acquisition
  822. * @gh: the glock holder
  823. *
  824. * Returns: 0 on success
  825. */
  826. int gfs2_glock_wait(struct gfs2_holder *gh)
  827. {
  828. wait_on_holder(gh);
  829. return gh->gh_error;
  830. }
  831. void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
  832. {
  833. struct va_format vaf;
  834. va_list args;
  835. va_start(args, fmt);
  836. if (seq) {
  837. seq_vprintf(seq, fmt, args);
  838. } else {
  839. vaf.fmt = fmt;
  840. vaf.va = &args;
  841. printk(KERN_ERR " %pV", &vaf);
  842. }
  843. va_end(args);
  844. }
  845. /**
  846. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  847. * @gh: the holder structure to add
  848. *
  849. * Eventually we should move the recursive locking trap to a
  850. * debugging option or something like that. This is the fast
  851. * path and needs to have the minimum number of distractions.
  852. *
  853. */
  854. static inline void add_to_queue(struct gfs2_holder *gh)
  855. __releases(&gl->gl_spin)
  856. __acquires(&gl->gl_spin)
  857. {
  858. struct gfs2_glock *gl = gh->gh_gl;
  859. struct gfs2_sbd *sdp = gl->gl_sbd;
  860. struct list_head *insert_pt = NULL;
  861. struct gfs2_holder *gh2;
  862. int try_lock = 0;
  863. BUG_ON(gh->gh_owner_pid == NULL);
  864. if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
  865. BUG();
  866. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  867. if (test_bit(GLF_LOCK, &gl->gl_flags))
  868. try_lock = 1;
  869. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  870. goto fail;
  871. }
  872. list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
  873. if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
  874. (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
  875. goto trap_recursive;
  876. if (try_lock &&
  877. !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
  878. !may_grant(gl, gh)) {
  879. fail:
  880. gh->gh_error = GLR_TRYFAILED;
  881. gfs2_holder_wake(gh);
  882. return;
  883. }
  884. if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
  885. continue;
  886. if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
  887. insert_pt = &gh2->gh_list;
  888. }
  889. set_bit(GLF_QUEUED, &gl->gl_flags);
  890. trace_gfs2_glock_queue(gh, 1);
  891. gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
  892. gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
  893. if (likely(insert_pt == NULL)) {
  894. list_add_tail(&gh->gh_list, &gl->gl_holders);
  895. if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
  896. goto do_cancel;
  897. return;
  898. }
  899. list_add_tail(&gh->gh_list, insert_pt);
  900. do_cancel:
  901. gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
  902. if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
  903. spin_unlock(&gl->gl_spin);
  904. if (sdp->sd_lockstruct.ls_ops->lm_cancel)
  905. sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
  906. spin_lock(&gl->gl_spin);
  907. }
  908. return;
  909. trap_recursive:
  910. print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
  911. printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
  912. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  913. gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
  914. print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
  915. printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
  916. printk(KERN_ERR "lock type: %d req lock state : %d\n",
  917. gh->gh_gl->gl_name.ln_type, gh->gh_state);
  918. __dump_glock(NULL, gl);
  919. BUG();
  920. }
  921. /**
  922. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  923. * @gh: the holder structure
  924. *
  925. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  926. *
  927. * Returns: 0, GLR_TRYFAILED, or errno on failure
  928. */
  929. int gfs2_glock_nq(struct gfs2_holder *gh)
  930. {
  931. struct gfs2_glock *gl = gh->gh_gl;
  932. struct gfs2_sbd *sdp = gl->gl_sbd;
  933. int error = 0;
  934. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  935. return -EIO;
  936. if (test_bit(GLF_LRU, &gl->gl_flags))
  937. gfs2_glock_remove_from_lru(gl);
  938. spin_lock(&gl->gl_spin);
  939. add_to_queue(gh);
  940. if ((LM_FLAG_NOEXP & gh->gh_flags) &&
  941. test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  942. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  943. run_queue(gl, 1);
  944. spin_unlock(&gl->gl_spin);
  945. if (!(gh->gh_flags & GL_ASYNC))
  946. error = gfs2_glock_wait(gh);
  947. return error;
  948. }
  949. /**
  950. * gfs2_glock_poll - poll to see if an async request has been completed
  951. * @gh: the holder
  952. *
  953. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  954. */
  955. int gfs2_glock_poll(struct gfs2_holder *gh)
  956. {
  957. return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
  958. }
  959. /**
  960. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  961. * @gh: the glock holder
  962. *
  963. */
  964. void gfs2_glock_dq(struct gfs2_holder *gh)
  965. {
  966. struct gfs2_glock *gl = gh->gh_gl;
  967. const struct gfs2_glock_operations *glops = gl->gl_ops;
  968. unsigned delay = 0;
  969. int fast_path = 0;
  970. spin_lock(&gl->gl_spin);
  971. if (gh->gh_flags & GL_NOCACHE)
  972. handle_callback(gl, LM_ST_UNLOCKED, 0);
  973. list_del_init(&gh->gh_list);
  974. if (find_first_holder(gl) == NULL) {
  975. if (glops->go_unlock) {
  976. GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
  977. spin_unlock(&gl->gl_spin);
  978. glops->go_unlock(gh);
  979. spin_lock(&gl->gl_spin);
  980. clear_bit(GLF_LOCK, &gl->gl_flags);
  981. }
  982. if (list_empty(&gl->gl_holders) &&
  983. !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  984. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  985. fast_path = 1;
  986. }
  987. if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
  988. __gfs2_glock_schedule_for_reclaim(gl);
  989. trace_gfs2_glock_queue(gh, 0);
  990. spin_unlock(&gl->gl_spin);
  991. if (likely(fast_path))
  992. return;
  993. gfs2_glock_hold(gl);
  994. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  995. !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  996. gl->gl_name.ln_type == LM_TYPE_INODE)
  997. delay = gl->gl_hold_time;
  998. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  999. gfs2_glock_put(gl);
  1000. }
  1001. void gfs2_glock_dq_wait(struct gfs2_holder *gh)
  1002. {
  1003. struct gfs2_glock *gl = gh->gh_gl;
  1004. gfs2_glock_dq(gh);
  1005. wait_on_demote(gl);
  1006. }
  1007. /**
  1008. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1009. * @gh: the holder structure
  1010. *
  1011. */
  1012. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1013. {
  1014. gfs2_glock_dq(gh);
  1015. gfs2_holder_uninit(gh);
  1016. }
  1017. /**
  1018. * gfs2_glock_nq_num - acquire a glock based on lock number
  1019. * @sdp: the filesystem
  1020. * @number: the lock number
  1021. * @glops: the glock operations for the type of glock
  1022. * @state: the state to acquire the glock in
  1023. * @flags: modifier flags for the acquisition
  1024. * @gh: the struct gfs2_holder
  1025. *
  1026. * Returns: errno
  1027. */
  1028. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
  1029. const struct gfs2_glock_operations *glops,
  1030. unsigned int state, int flags, struct gfs2_holder *gh)
  1031. {
  1032. struct gfs2_glock *gl;
  1033. int error;
  1034. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1035. if (!error) {
  1036. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1037. gfs2_glock_put(gl);
  1038. }
  1039. return error;
  1040. }
  1041. /**
  1042. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1043. * @arg_a: the first structure
  1044. * @arg_b: the second structure
  1045. *
  1046. */
  1047. static int glock_compare(const void *arg_a, const void *arg_b)
  1048. {
  1049. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1050. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1051. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1052. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1053. if (a->ln_number > b->ln_number)
  1054. return 1;
  1055. if (a->ln_number < b->ln_number)
  1056. return -1;
  1057. BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
  1058. return 0;
  1059. }
  1060. /**
  1061. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1062. * @num_gh: the number of structures
  1063. * @ghs: an array of struct gfs2_holder structures
  1064. *
  1065. * Returns: 0 on success (all glocks acquired),
  1066. * errno on failure (no glocks acquired)
  1067. */
  1068. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1069. struct gfs2_holder **p)
  1070. {
  1071. unsigned int x;
  1072. int error = 0;
  1073. for (x = 0; x < num_gh; x++)
  1074. p[x] = &ghs[x];
  1075. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1076. for (x = 0; x < num_gh; x++) {
  1077. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1078. error = gfs2_glock_nq(p[x]);
  1079. if (error) {
  1080. while (x--)
  1081. gfs2_glock_dq(p[x]);
  1082. break;
  1083. }
  1084. }
  1085. return error;
  1086. }
  1087. /**
  1088. * gfs2_glock_nq_m - acquire multiple glocks
  1089. * @num_gh: the number of structures
  1090. * @ghs: an array of struct gfs2_holder structures
  1091. *
  1092. *
  1093. * Returns: 0 on success (all glocks acquired),
  1094. * errno on failure (no glocks acquired)
  1095. */
  1096. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1097. {
  1098. struct gfs2_holder *tmp[4];
  1099. struct gfs2_holder **pph = tmp;
  1100. int error = 0;
  1101. switch(num_gh) {
  1102. case 0:
  1103. return 0;
  1104. case 1:
  1105. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1106. return gfs2_glock_nq(ghs);
  1107. default:
  1108. if (num_gh <= 4)
  1109. break;
  1110. pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
  1111. if (!pph)
  1112. return -ENOMEM;
  1113. }
  1114. error = nq_m_sync(num_gh, ghs, pph);
  1115. if (pph != tmp)
  1116. kfree(pph);
  1117. return error;
  1118. }
  1119. /**
  1120. * gfs2_glock_dq_m - release multiple glocks
  1121. * @num_gh: the number of structures
  1122. * @ghs: an array of struct gfs2_holder structures
  1123. *
  1124. */
  1125. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1126. {
  1127. while (num_gh--)
  1128. gfs2_glock_dq(&ghs[num_gh]);
  1129. }
  1130. /**
  1131. * gfs2_glock_dq_uninit_m - release multiple glocks
  1132. * @num_gh: the number of structures
  1133. * @ghs: an array of struct gfs2_holder structures
  1134. *
  1135. */
  1136. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1137. {
  1138. while (num_gh--)
  1139. gfs2_glock_dq_uninit(&ghs[num_gh]);
  1140. }
  1141. void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
  1142. {
  1143. unsigned long delay = 0;
  1144. unsigned long holdtime;
  1145. unsigned long now = jiffies;
  1146. gfs2_glock_hold(gl);
  1147. holdtime = gl->gl_tchange + gl->gl_hold_time;
  1148. if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
  1149. gl->gl_name.ln_type == LM_TYPE_INODE) {
  1150. if (time_before(now, holdtime))
  1151. delay = holdtime - now;
  1152. if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
  1153. delay = gl->gl_hold_time;
  1154. }
  1155. spin_lock(&gl->gl_spin);
  1156. handle_callback(gl, state, delay);
  1157. spin_unlock(&gl->gl_spin);
  1158. if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
  1159. gfs2_glock_put(gl);
  1160. }
  1161. /**
  1162. * gfs2_should_freeze - Figure out if glock should be frozen
  1163. * @gl: The glock in question
  1164. *
  1165. * Glocks are not frozen if (a) the result of the dlm operation is
  1166. * an error, (b) the locking operation was an unlock operation or
  1167. * (c) if there is a "noexp" flagged request anywhere in the queue
  1168. *
  1169. * Returns: 1 if freezing should occur, 0 otherwise
  1170. */
  1171. static int gfs2_should_freeze(const struct gfs2_glock *gl)
  1172. {
  1173. const struct gfs2_holder *gh;
  1174. if (gl->gl_reply & ~LM_OUT_ST_MASK)
  1175. return 0;
  1176. if (gl->gl_target == LM_ST_UNLOCKED)
  1177. return 0;
  1178. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1179. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1180. continue;
  1181. if (LM_FLAG_NOEXP & gh->gh_flags)
  1182. return 0;
  1183. }
  1184. return 1;
  1185. }
  1186. /**
  1187. * gfs2_glock_complete - Callback used by locking
  1188. * @gl: Pointer to the glock
  1189. * @ret: The return value from the dlm
  1190. *
  1191. * The gl_reply field is under the gl_spin lock so that it is ok
  1192. * to use a bitfield shared with other glock state fields.
  1193. */
  1194. void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
  1195. {
  1196. struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
  1197. spin_lock(&gl->gl_spin);
  1198. gl->gl_reply = ret;
  1199. if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
  1200. if (gfs2_should_freeze(gl)) {
  1201. set_bit(GLF_FROZEN, &gl->gl_flags);
  1202. spin_unlock(&gl->gl_spin);
  1203. return;
  1204. }
  1205. }
  1206. spin_unlock(&gl->gl_spin);
  1207. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1208. smp_wmb();
  1209. gfs2_glock_hold(gl);
  1210. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1211. gfs2_glock_put(gl);
  1212. }
  1213. static int gfs2_shrink_glock_memory(struct shrinker *shrink,
  1214. struct shrink_control *sc)
  1215. {
  1216. struct gfs2_glock *gl;
  1217. int may_demote;
  1218. int nr_skipped = 0;
  1219. int nr = sc->nr_to_scan;
  1220. gfp_t gfp_mask = sc->gfp_mask;
  1221. LIST_HEAD(skipped);
  1222. if (nr == 0)
  1223. goto out;
  1224. if (!(gfp_mask & __GFP_FS))
  1225. return -1;
  1226. spin_lock(&lru_lock);
  1227. while(nr && !list_empty(&lru_list)) {
  1228. gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
  1229. list_del_init(&gl->gl_lru);
  1230. clear_bit(GLF_LRU, &gl->gl_flags);
  1231. atomic_dec(&lru_count);
  1232. /* Test for being demotable */
  1233. if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
  1234. gfs2_glock_hold(gl);
  1235. spin_unlock(&lru_lock);
  1236. spin_lock(&gl->gl_spin);
  1237. may_demote = demote_ok(gl);
  1238. if (may_demote) {
  1239. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1240. nr--;
  1241. }
  1242. clear_bit(GLF_LOCK, &gl->gl_flags);
  1243. smp_mb__after_clear_bit();
  1244. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1245. gfs2_glock_put_nolock(gl);
  1246. spin_unlock(&gl->gl_spin);
  1247. spin_lock(&lru_lock);
  1248. continue;
  1249. }
  1250. nr_skipped++;
  1251. list_add(&gl->gl_lru, &skipped);
  1252. set_bit(GLF_LRU, &gl->gl_flags);
  1253. }
  1254. list_splice(&skipped, &lru_list);
  1255. atomic_add(nr_skipped, &lru_count);
  1256. spin_unlock(&lru_lock);
  1257. out:
  1258. return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
  1259. }
  1260. static struct shrinker glock_shrinker = {
  1261. .shrink = gfs2_shrink_glock_memory,
  1262. .seeks = DEFAULT_SEEKS,
  1263. };
  1264. /**
  1265. * examine_bucket - Call a function for glock in a hash bucket
  1266. * @examiner: the function
  1267. * @sdp: the filesystem
  1268. * @bucket: the bucket
  1269. *
  1270. */
  1271. static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
  1272. unsigned int hash)
  1273. {
  1274. struct gfs2_glock *gl;
  1275. struct hlist_bl_head *head = &gl_hash_table[hash];
  1276. struct hlist_bl_node *pos;
  1277. rcu_read_lock();
  1278. hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
  1279. if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
  1280. examiner(gl);
  1281. }
  1282. rcu_read_unlock();
  1283. cond_resched();
  1284. }
  1285. static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  1286. {
  1287. unsigned x;
  1288. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1289. examine_bucket(examiner, sdp, x);
  1290. }
  1291. /**
  1292. * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  1293. * @gl: The glock to thaw
  1294. *
  1295. * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
  1296. * so this has to result in the ref count being dropped by one.
  1297. */
  1298. static void thaw_glock(struct gfs2_glock *gl)
  1299. {
  1300. if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  1301. return;
  1302. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1303. gfs2_glock_hold(gl);
  1304. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1305. gfs2_glock_put(gl);
  1306. }
  1307. /**
  1308. * clear_glock - look at a glock and see if we can free it from glock cache
  1309. * @gl: the glock to look at
  1310. *
  1311. */
  1312. static void clear_glock(struct gfs2_glock *gl)
  1313. {
  1314. gfs2_glock_remove_from_lru(gl);
  1315. spin_lock(&gl->gl_spin);
  1316. if (gl->gl_state != LM_ST_UNLOCKED)
  1317. handle_callback(gl, LM_ST_UNLOCKED, 0);
  1318. spin_unlock(&gl->gl_spin);
  1319. gfs2_glock_hold(gl);
  1320. if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
  1321. gfs2_glock_put(gl);
  1322. }
  1323. /**
  1324. * gfs2_glock_thaw - Thaw any frozen glocks
  1325. * @sdp: The super block
  1326. *
  1327. */
  1328. void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  1329. {
  1330. glock_hash_walk(thaw_glock, sdp);
  1331. }
  1332. static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
  1333. {
  1334. int ret;
  1335. spin_lock(&gl->gl_spin);
  1336. ret = __dump_glock(seq, gl);
  1337. spin_unlock(&gl->gl_spin);
  1338. return ret;
  1339. }
  1340. static void dump_glock_func(struct gfs2_glock *gl)
  1341. {
  1342. dump_glock(NULL, gl);
  1343. }
  1344. /**
  1345. * gfs2_gl_hash_clear - Empty out the glock hash table
  1346. * @sdp: the filesystem
  1347. * @wait: wait until it's all gone
  1348. *
  1349. * Called when unmounting the filesystem.
  1350. */
  1351. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  1352. {
  1353. glock_hash_walk(clear_glock, sdp);
  1354. flush_workqueue(glock_workqueue);
  1355. wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
  1356. glock_hash_walk(dump_glock_func, sdp);
  1357. }
  1358. void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
  1359. {
  1360. struct gfs2_glock *gl = ip->i_gl;
  1361. int ret;
  1362. ret = gfs2_truncatei_resume(ip);
  1363. gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
  1364. spin_lock(&gl->gl_spin);
  1365. clear_bit(GLF_LOCK, &gl->gl_flags);
  1366. run_queue(gl, 1);
  1367. spin_unlock(&gl->gl_spin);
  1368. }
  1369. static const char *state2str(unsigned state)
  1370. {
  1371. switch(state) {
  1372. case LM_ST_UNLOCKED:
  1373. return "UN";
  1374. case LM_ST_SHARED:
  1375. return "SH";
  1376. case LM_ST_DEFERRED:
  1377. return "DF";
  1378. case LM_ST_EXCLUSIVE:
  1379. return "EX";
  1380. }
  1381. return "??";
  1382. }
  1383. static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
  1384. {
  1385. char *p = buf;
  1386. if (flags & LM_FLAG_TRY)
  1387. *p++ = 't';
  1388. if (flags & LM_FLAG_TRY_1CB)
  1389. *p++ = 'T';
  1390. if (flags & LM_FLAG_NOEXP)
  1391. *p++ = 'e';
  1392. if (flags & LM_FLAG_ANY)
  1393. *p++ = 'A';
  1394. if (flags & LM_FLAG_PRIORITY)
  1395. *p++ = 'p';
  1396. if (flags & GL_ASYNC)
  1397. *p++ = 'a';
  1398. if (flags & GL_EXACT)
  1399. *p++ = 'E';
  1400. if (flags & GL_NOCACHE)
  1401. *p++ = 'c';
  1402. if (test_bit(HIF_HOLDER, &iflags))
  1403. *p++ = 'H';
  1404. if (test_bit(HIF_WAIT, &iflags))
  1405. *p++ = 'W';
  1406. if (test_bit(HIF_FIRST, &iflags))
  1407. *p++ = 'F';
  1408. *p = 0;
  1409. return buf;
  1410. }
  1411. /**
  1412. * dump_holder - print information about a glock holder
  1413. * @seq: the seq_file struct
  1414. * @gh: the glock holder
  1415. *
  1416. * Returns: 0 on success, -ENOBUFS when we run out of space
  1417. */
  1418. static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
  1419. {
  1420. struct task_struct *gh_owner = NULL;
  1421. char flags_buf[32];
  1422. if (gh->gh_owner_pid)
  1423. gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
  1424. gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
  1425. state2str(gh->gh_state),
  1426. hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
  1427. gh->gh_error,
  1428. gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
  1429. gh_owner ? gh_owner->comm : "(ended)",
  1430. (void *)gh->gh_ip);
  1431. return 0;
  1432. }
  1433. static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
  1434. {
  1435. const unsigned long *gflags = &gl->gl_flags;
  1436. char *p = buf;
  1437. if (test_bit(GLF_LOCK, gflags))
  1438. *p++ = 'l';
  1439. if (test_bit(GLF_DEMOTE, gflags))
  1440. *p++ = 'D';
  1441. if (test_bit(GLF_PENDING_DEMOTE, gflags))
  1442. *p++ = 'd';
  1443. if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
  1444. *p++ = 'p';
  1445. if (test_bit(GLF_DIRTY, gflags))
  1446. *p++ = 'y';
  1447. if (test_bit(GLF_LFLUSH, gflags))
  1448. *p++ = 'f';
  1449. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
  1450. *p++ = 'i';
  1451. if (test_bit(GLF_REPLY_PENDING, gflags))
  1452. *p++ = 'r';
  1453. if (test_bit(GLF_INITIAL, gflags))
  1454. *p++ = 'I';
  1455. if (test_bit(GLF_FROZEN, gflags))
  1456. *p++ = 'F';
  1457. if (test_bit(GLF_QUEUED, gflags))
  1458. *p++ = 'q';
  1459. if (test_bit(GLF_LRU, gflags))
  1460. *p++ = 'L';
  1461. if (gl->gl_object)
  1462. *p++ = 'o';
  1463. if (test_bit(GLF_BLOCKING, gflags))
  1464. *p++ = 'b';
  1465. *p = 0;
  1466. return buf;
  1467. }
  1468. /**
  1469. * __dump_glock - print information about a glock
  1470. * @seq: The seq_file struct
  1471. * @gl: the glock
  1472. *
  1473. * The file format is as follows:
  1474. * One line per object, capital letters are used to indicate objects
  1475. * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
  1476. * other objects are indented by a single space and follow the glock to
  1477. * which they are related. Fields are indicated by lower case letters
  1478. * followed by a colon and the field value, except for strings which are in
  1479. * [] so that its possible to see if they are composed of spaces for
  1480. * example. The field's are n = number (id of the object), f = flags,
  1481. * t = type, s = state, r = refcount, e = error, p = pid.
  1482. *
  1483. * Returns: 0 on success, -ENOBUFS when we run out of space
  1484. */
  1485. static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
  1486. {
  1487. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1488. unsigned long long dtime;
  1489. const struct gfs2_holder *gh;
  1490. char gflags_buf[32];
  1491. int error = 0;
  1492. dtime = jiffies - gl->gl_demote_time;
  1493. dtime *= 1000000/HZ; /* demote time in uSec */
  1494. if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
  1495. dtime = 0;
  1496. gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
  1497. state2str(gl->gl_state),
  1498. gl->gl_name.ln_type,
  1499. (unsigned long long)gl->gl_name.ln_number,
  1500. gflags2str(gflags_buf, gl),
  1501. state2str(gl->gl_target),
  1502. state2str(gl->gl_demote_state), dtime,
  1503. atomic_read(&gl->gl_ail_count),
  1504. atomic_read(&gl->gl_revokes),
  1505. atomic_read(&gl->gl_ref), gl->gl_hold_time);
  1506. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1507. error = dump_holder(seq, gh);
  1508. if (error)
  1509. goto out;
  1510. }
  1511. if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
  1512. error = glops->go_dump(seq, gl);
  1513. out:
  1514. return error;
  1515. }
  1516. static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
  1517. {
  1518. struct gfs2_glock *gl = iter_ptr;
  1519. seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
  1520. gl->gl_name.ln_type,
  1521. (unsigned long long)gl->gl_name.ln_number,
  1522. (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
  1523. (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
  1524. (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
  1525. (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
  1526. (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
  1527. (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
  1528. (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
  1529. (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
  1530. return 0;
  1531. }
  1532. static const char *gfs2_gltype[] = {
  1533. "type",
  1534. "reserved",
  1535. "nondisk",
  1536. "inode",
  1537. "rgrp",
  1538. "meta",
  1539. "iopen",
  1540. "flock",
  1541. "plock",
  1542. "quota",
  1543. "journal",
  1544. };
  1545. static const char *gfs2_stype[] = {
  1546. [GFS2_LKS_SRTT] = "srtt",
  1547. [GFS2_LKS_SRTTVAR] = "srttvar",
  1548. [GFS2_LKS_SRTTB] = "srttb",
  1549. [GFS2_LKS_SRTTVARB] = "srttvarb",
  1550. [GFS2_LKS_SIRT] = "sirt",
  1551. [GFS2_LKS_SIRTVAR] = "sirtvar",
  1552. [GFS2_LKS_DCOUNT] = "dlm",
  1553. [GFS2_LKS_QCOUNT] = "queue",
  1554. };
  1555. #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
  1556. static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
  1557. {
  1558. struct gfs2_glock_iter *gi = seq->private;
  1559. struct gfs2_sbd *sdp = gi->sdp;
  1560. unsigned index = gi->hash >> 3;
  1561. unsigned subindex = gi->hash & 0x07;
  1562. s64 value;
  1563. int i;
  1564. if (index == 0 && subindex != 0)
  1565. return 0;
  1566. seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
  1567. (index == 0) ? "cpu": gfs2_stype[subindex]);
  1568. for_each_possible_cpu(i) {
  1569. const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
  1570. if (index == 0) {
  1571. value = i;
  1572. } else {
  1573. value = lkstats->lkstats[index - 1].stats[subindex];
  1574. }
  1575. seq_printf(seq, " %15lld", (long long)value);
  1576. }
  1577. seq_putc(seq, '\n');
  1578. return 0;
  1579. }
  1580. int __init gfs2_glock_init(void)
  1581. {
  1582. unsigned i;
  1583. for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
  1584. INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
  1585. }
  1586. glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
  1587. WQ_HIGHPRI | WQ_FREEZABLE, 0);
  1588. if (IS_ERR(glock_workqueue))
  1589. return PTR_ERR(glock_workqueue);
  1590. gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
  1591. WQ_MEM_RECLAIM | WQ_FREEZABLE,
  1592. 0);
  1593. if (IS_ERR(gfs2_delete_workqueue)) {
  1594. destroy_workqueue(glock_workqueue);
  1595. return PTR_ERR(gfs2_delete_workqueue);
  1596. }
  1597. register_shrinker(&glock_shrinker);
  1598. return 0;
  1599. }
  1600. void gfs2_glock_exit(void)
  1601. {
  1602. unregister_shrinker(&glock_shrinker);
  1603. destroy_workqueue(glock_workqueue);
  1604. destroy_workqueue(gfs2_delete_workqueue);
  1605. }
  1606. static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
  1607. {
  1608. return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
  1609. struct gfs2_glock, gl_list);
  1610. }
  1611. static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
  1612. {
  1613. return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
  1614. struct gfs2_glock, gl_list);
  1615. }
  1616. static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
  1617. {
  1618. struct gfs2_glock *gl;
  1619. do {
  1620. gl = gi->gl;
  1621. if (gl) {
  1622. gi->gl = glock_hash_next(gl);
  1623. gi->nhash++;
  1624. } else {
  1625. if (gi->hash >= GFS2_GL_HASH_SIZE) {
  1626. rcu_read_unlock();
  1627. return 1;
  1628. }
  1629. gi->gl = glock_hash_chain(gi->hash);
  1630. gi->nhash = 0;
  1631. }
  1632. while (gi->gl == NULL) {
  1633. gi->hash++;
  1634. if (gi->hash >= GFS2_GL_HASH_SIZE) {
  1635. rcu_read_unlock();
  1636. return 1;
  1637. }
  1638. gi->gl = glock_hash_chain(gi->hash);
  1639. gi->nhash = 0;
  1640. }
  1641. /* Skip entries for other sb and dead entries */
  1642. } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
  1643. return 0;
  1644. }
  1645. static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  1646. {
  1647. struct gfs2_glock_iter *gi = seq->private;
  1648. loff_t n = *pos;
  1649. if (gi->last_pos <= *pos)
  1650. n = gi->nhash + (*pos - gi->last_pos);
  1651. else
  1652. gi->hash = 0;
  1653. gi->nhash = 0;
  1654. rcu_read_lock();
  1655. do {
  1656. if (gfs2_glock_iter_next(gi))
  1657. return NULL;
  1658. } while (n--);
  1659. gi->last_pos = *pos;
  1660. return gi->gl;
  1661. }
  1662. static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
  1663. loff_t *pos)
  1664. {
  1665. struct gfs2_glock_iter *gi = seq->private;
  1666. (*pos)++;
  1667. gi->last_pos = *pos;
  1668. if (gfs2_glock_iter_next(gi))
  1669. return NULL;
  1670. return gi->gl;
  1671. }
  1672. static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  1673. {
  1674. struct gfs2_glock_iter *gi = seq->private;
  1675. if (gi->gl)
  1676. rcu_read_unlock();
  1677. gi->gl = NULL;
  1678. }
  1679. static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
  1680. {
  1681. return dump_glock(seq, iter_ptr);
  1682. }
  1683. static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
  1684. {
  1685. struct gfs2_glock_iter *gi = seq->private;
  1686. gi->hash = *pos;
  1687. if (*pos >= GFS2_NR_SBSTATS)
  1688. return NULL;
  1689. preempt_disable();
  1690. return SEQ_START_TOKEN;
  1691. }
  1692. static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
  1693. loff_t *pos)
  1694. {
  1695. struct gfs2_glock_iter *gi = seq->private;
  1696. (*pos)++;
  1697. gi->hash++;
  1698. if (gi->hash >= GFS2_NR_SBSTATS) {
  1699. preempt_enable();
  1700. return NULL;
  1701. }
  1702. return SEQ_START_TOKEN;
  1703. }
  1704. static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
  1705. {
  1706. preempt_enable();
  1707. }
  1708. static const struct seq_operations gfs2_glock_seq_ops = {
  1709. .start = gfs2_glock_seq_start,
  1710. .next = gfs2_glock_seq_next,
  1711. .stop = gfs2_glock_seq_stop,
  1712. .show = gfs2_glock_seq_show,
  1713. };
  1714. static const struct seq_operations gfs2_glstats_seq_ops = {
  1715. .start = gfs2_glock_seq_start,
  1716. .next = gfs2_glock_seq_next,
  1717. .stop = gfs2_glock_seq_stop,
  1718. .show = gfs2_glstats_seq_show,
  1719. };
  1720. static const struct seq_operations gfs2_sbstats_seq_ops = {
  1721. .start = gfs2_sbstats_seq_start,
  1722. .next = gfs2_sbstats_seq_next,
  1723. .stop = gfs2_sbstats_seq_stop,
  1724. .show = gfs2_sbstats_seq_show,
  1725. };
  1726. #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
  1727. static int gfs2_glocks_open(struct inode *inode, struct file *file)
  1728. {
  1729. int ret = seq_open_private(file, &gfs2_glock_seq_ops,
  1730. sizeof(struct gfs2_glock_iter));
  1731. if (ret == 0) {
  1732. struct seq_file *seq = file->private_data;
  1733. struct gfs2_glock_iter *gi = seq->private;
  1734. gi->sdp = inode->i_private;
  1735. seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
  1736. if (seq->buf)
  1737. seq->size = GFS2_SEQ_GOODSIZE;
  1738. }
  1739. return ret;
  1740. }
  1741. static int gfs2_glstats_open(struct inode *inode, struct file *file)
  1742. {
  1743. int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
  1744. sizeof(struct gfs2_glock_iter));
  1745. if (ret == 0) {
  1746. struct seq_file *seq = file->private_data;
  1747. struct gfs2_glock_iter *gi = seq->private;
  1748. gi->sdp = inode->i_private;
  1749. seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
  1750. if (seq->buf)
  1751. seq->size = GFS2_SEQ_GOODSIZE;
  1752. }
  1753. return ret;
  1754. }
  1755. static int gfs2_sbstats_open(struct inode *inode, struct file *file)
  1756. {
  1757. int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
  1758. sizeof(struct gfs2_glock_iter));
  1759. if (ret == 0) {
  1760. struct seq_file *seq = file->private_data;
  1761. struct gfs2_glock_iter *gi = seq->private;
  1762. gi->sdp = inode->i_private;
  1763. }
  1764. return ret;
  1765. }
  1766. static const struct file_operations gfs2_glocks_fops = {
  1767. .owner = THIS_MODULE,
  1768. .open = gfs2_glocks_open,
  1769. .read = seq_read,
  1770. .llseek = seq_lseek,
  1771. .release = seq_release_private,
  1772. };
  1773. static const struct file_operations gfs2_glstats_fops = {
  1774. .owner = THIS_MODULE,
  1775. .open = gfs2_glstats_open,
  1776. .read = seq_read,
  1777. .llseek = seq_lseek,
  1778. .release = seq_release_private,
  1779. };
  1780. static const struct file_operations gfs2_sbstats_fops = {
  1781. .owner = THIS_MODULE,
  1782. .open = gfs2_sbstats_open,
  1783. .read = seq_read,
  1784. .llseek = seq_lseek,
  1785. .release = seq_release_private,
  1786. };
  1787. int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
  1788. {
  1789. sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
  1790. if (!sdp->debugfs_dir)
  1791. return -ENOMEM;
  1792. sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
  1793. S_IFREG | S_IRUGO,
  1794. sdp->debugfs_dir, sdp,
  1795. &gfs2_glocks_fops);
  1796. if (!sdp->debugfs_dentry_glocks)
  1797. goto fail;
  1798. sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
  1799. S_IFREG | S_IRUGO,
  1800. sdp->debugfs_dir, sdp,
  1801. &gfs2_glstats_fops);
  1802. if (!sdp->debugfs_dentry_glstats)
  1803. goto fail;
  1804. sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
  1805. S_IFREG | S_IRUGO,
  1806. sdp->debugfs_dir, sdp,
  1807. &gfs2_sbstats_fops);
  1808. if (!sdp->debugfs_dentry_sbstats)
  1809. goto fail;
  1810. return 0;
  1811. fail:
  1812. gfs2_delete_debugfs_file(sdp);
  1813. return -ENOMEM;
  1814. }
  1815. void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
  1816. {
  1817. if (sdp->debugfs_dir) {
  1818. if (sdp->debugfs_dentry_glocks) {
  1819. debugfs_remove(sdp->debugfs_dentry_glocks);
  1820. sdp->debugfs_dentry_glocks = NULL;
  1821. }
  1822. if (sdp->debugfs_dentry_glstats) {
  1823. debugfs_remove(sdp->debugfs_dentry_glstats);
  1824. sdp->debugfs_dentry_glstats = NULL;
  1825. }
  1826. if (sdp->debugfs_dentry_sbstats) {
  1827. debugfs_remove(sdp->debugfs_dentry_sbstats);
  1828. sdp->debugfs_dentry_sbstats = NULL;
  1829. }
  1830. debugfs_remove(sdp->debugfs_dir);
  1831. sdp->debugfs_dir = NULL;
  1832. }
  1833. }
  1834. int gfs2_register_debugfs(void)
  1835. {
  1836. gfs2_root = debugfs_create_dir("gfs2", NULL);
  1837. return gfs2_root ? 0 : -ENOMEM;
  1838. }
  1839. void gfs2_unregister_debugfs(void)
  1840. {
  1841. debugfs_remove(gfs2_root);
  1842. gfs2_root = NULL;
  1843. }