glock.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/delay.h>
  15. #include <linux/sort.h>
  16. #include <linux/jhash.h>
  17. #include <linux/kref.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <asm/uaccess.h>
  21. #include "gfs2.h"
  22. #include "lm_interface.h"
  23. #include "incore.h"
  24. #include "glock.h"
  25. #include "glops.h"
  26. #include "inode.h"
  27. #include "lm.h"
  28. #include "lops.h"
  29. #include "meta_io.h"
  30. #include "quota.h"
  31. #include "super.h"
  32. #include "util.h"
  33. /* Must be kept in sync with the beginning of struct gfs2_glock */
  34. struct glock_plug {
  35. struct list_head gl_list;
  36. unsigned long gl_flags;
  37. };
  38. struct greedy {
  39. struct gfs2_holder gr_gh;
  40. struct work_struct gr_work;
  41. };
  42. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  43. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  44. static int dump_glock(struct gfs2_glock *gl);
  45. /**
  46. * relaxed_state_ok - is a requested lock compatible with the current lock mode?
  47. * @actual: the current state of the lock
  48. * @requested: the lock state that was requested by the caller
  49. * @flags: the modifier flags passed in by the caller
  50. *
  51. * Returns: 1 if the locks are compatible, 0 otherwise
  52. */
  53. static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
  54. int flags)
  55. {
  56. if (actual == requested)
  57. return 1;
  58. if (flags & GL_EXACT)
  59. return 0;
  60. if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
  61. return 1;
  62. if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
  63. return 1;
  64. return 0;
  65. }
  66. /**
  67. * gl_hash() - Turn glock number into hash bucket number
  68. * @lock: The glock number
  69. *
  70. * Returns: The number of the corresponding hash bucket
  71. */
  72. static unsigned int gl_hash(struct lm_lockname *name)
  73. {
  74. unsigned int h;
  75. h = jhash(&name->ln_number, sizeof(uint64_t), 0);
  76. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  77. h &= GFS2_GL_HASH_MASK;
  78. return h;
  79. }
  80. /**
  81. * glock_free() - Perform a few checks and then release struct gfs2_glock
  82. * @gl: The glock to release
  83. *
  84. * Also calls lock module to release its internal structure for this glock.
  85. *
  86. */
  87. static void glock_free(struct gfs2_glock *gl)
  88. {
  89. struct gfs2_sbd *sdp = gl->gl_sbd;
  90. struct inode *aspace = gl->gl_aspace;
  91. gfs2_lm_put_lock(sdp, gl->gl_lock);
  92. if (aspace)
  93. gfs2_aspace_put(aspace);
  94. kmem_cache_free(gfs2_glock_cachep, gl);
  95. }
  96. /**
  97. * gfs2_glock_hold() - increment reference count on glock
  98. * @gl: The glock to hold
  99. *
  100. */
  101. void gfs2_glock_hold(struct gfs2_glock *gl)
  102. {
  103. kref_get(&gl->gl_ref);
  104. }
  105. /* All work is done after the return from kref_put() so we
  106. can release the write_lock before the free. */
  107. static void kill_glock(struct kref *kref)
  108. {
  109. struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
  110. struct gfs2_sbd *sdp = gl->gl_sbd;
  111. gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
  112. gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
  113. gfs2_assert(sdp, list_empty(&gl->gl_holders));
  114. gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
  115. gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
  116. gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
  117. }
  118. /**
  119. * gfs2_glock_put() - Decrement reference count on glock
  120. * @gl: The glock to put
  121. *
  122. */
  123. int gfs2_glock_put(struct gfs2_glock *gl)
  124. {
  125. struct gfs2_sbd *sdp = gl->gl_sbd;
  126. struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
  127. int rv = 0;
  128. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  129. write_lock(&bucket->hb_lock);
  130. if (kref_put(&gl->gl_ref, kill_glock)) {
  131. list_del_init(&gl->gl_list);
  132. write_unlock(&bucket->hb_lock);
  133. BUG_ON(spin_is_locked(&gl->gl_spin));
  134. glock_free(gl);
  135. rv = 1;
  136. goto out;
  137. }
  138. write_unlock(&bucket->hb_lock);
  139. out:
  140. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  141. return rv;
  142. }
  143. /**
  144. * queue_empty - check to see if a glock's queue is empty
  145. * @gl: the glock
  146. * @head: the head of the queue to check
  147. *
  148. * This function protects the list in the event that a process already
  149. * has a holder on the list and is adding a second holder for itself.
  150. * The glmutex lock is what generally prevents processes from working
  151. * on the same glock at once, but the special case of adding a second
  152. * holder for yourself ("recursive" locking) doesn't involve locking
  153. * glmutex, making the spin lock necessary.
  154. *
  155. * Returns: 1 if the queue is empty
  156. */
  157. static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
  158. {
  159. int empty;
  160. spin_lock(&gl->gl_spin);
  161. empty = list_empty(head);
  162. spin_unlock(&gl->gl_spin);
  163. return empty;
  164. }
  165. /**
  166. * search_bucket() - Find struct gfs2_glock by lock number
  167. * @bucket: the bucket to search
  168. * @name: The lock name
  169. *
  170. * Returns: NULL, or the struct gfs2_glock with the requested number
  171. */
  172. static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
  173. struct lm_lockname *name)
  174. {
  175. struct gfs2_glock *gl;
  176. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  177. if (test_bit(GLF_PLUG, &gl->gl_flags))
  178. continue;
  179. if (!lm_name_equal(&gl->gl_name, name))
  180. continue;
  181. kref_get(&gl->gl_ref);
  182. return gl;
  183. }
  184. return NULL;
  185. }
  186. /**
  187. * gfs2_glock_find() - Find glock by lock number
  188. * @sdp: The GFS2 superblock
  189. * @name: The lock name
  190. *
  191. * Returns: NULL, or the struct gfs2_glock with the requested number
  192. */
  193. static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
  194. struct lm_lockname *name)
  195. {
  196. struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
  197. struct gfs2_glock *gl;
  198. read_lock(&bucket->hb_lock);
  199. gl = search_bucket(bucket, name);
  200. read_unlock(&bucket->hb_lock);
  201. return gl;
  202. }
  203. /**
  204. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  205. * @sdp: The GFS2 superblock
  206. * @number: the lock number
  207. * @glops: The glock_operations to use
  208. * @create: If 0, don't create the glock if it doesn't exist
  209. * @glp: the glock is returned here
  210. *
  211. * This does not lock a glock, just finds/creates structures for one.
  212. *
  213. * Returns: errno
  214. */
  215. int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
  216. struct gfs2_glock_operations *glops, int create,
  217. struct gfs2_glock **glp)
  218. {
  219. struct lm_lockname name;
  220. struct gfs2_glock *gl, *tmp;
  221. struct gfs2_gl_hash_bucket *bucket;
  222. int error;
  223. name.ln_number = number;
  224. name.ln_type = glops->go_type;
  225. bucket = &sdp->sd_gl_hash[gl_hash(&name)];
  226. read_lock(&bucket->hb_lock);
  227. gl = search_bucket(bucket, &name);
  228. read_unlock(&bucket->hb_lock);
  229. if (gl || !create) {
  230. *glp = gl;
  231. return 0;
  232. }
  233. gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
  234. if (!gl)
  235. return -ENOMEM;
  236. memset(gl, 0, sizeof(struct gfs2_glock));
  237. INIT_LIST_HEAD(&gl->gl_list);
  238. gl->gl_name = name;
  239. kref_init(&gl->gl_ref);
  240. spin_lock_init(&gl->gl_spin);
  241. gl->gl_state = LM_ST_UNLOCKED;
  242. gl->gl_owner = NULL;
  243. gl->gl_ip = 0;
  244. INIT_LIST_HEAD(&gl->gl_holders);
  245. INIT_LIST_HEAD(&gl->gl_waiters1);
  246. INIT_LIST_HEAD(&gl->gl_waiters2);
  247. INIT_LIST_HEAD(&gl->gl_waiters3);
  248. gl->gl_ops = glops;
  249. gl->gl_bucket = bucket;
  250. INIT_LIST_HEAD(&gl->gl_reclaim);
  251. gl->gl_sbd = sdp;
  252. lops_init_le(&gl->gl_le, &gfs2_glock_lops);
  253. INIT_LIST_HEAD(&gl->gl_ail_list);
  254. /* If this glock protects actual on-disk data or metadata blocks,
  255. create a VFS inode to manage the pages/buffers holding them. */
  256. if (glops == &gfs2_inode_glops ||
  257. glops == &gfs2_rgrp_glops) {
  258. gl->gl_aspace = gfs2_aspace_get(sdp);
  259. if (!gl->gl_aspace) {
  260. error = -ENOMEM;
  261. goto fail;
  262. }
  263. }
  264. error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
  265. if (error)
  266. goto fail_aspace;
  267. write_lock(&bucket->hb_lock);
  268. tmp = search_bucket(bucket, &name);
  269. if (tmp) {
  270. write_unlock(&bucket->hb_lock);
  271. glock_free(gl);
  272. gl = tmp;
  273. } else {
  274. list_add_tail(&gl->gl_list, &bucket->hb_list);
  275. write_unlock(&bucket->hb_lock);
  276. }
  277. *glp = gl;
  278. return 0;
  279. fail_aspace:
  280. if (gl->gl_aspace)
  281. gfs2_aspace_put(gl->gl_aspace);
  282. fail:
  283. kmem_cache_free(gfs2_glock_cachep, gl);
  284. return error;
  285. }
  286. /**
  287. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  288. * @gl: the glock
  289. * @state: the state we're requesting
  290. * @flags: the modifier flags
  291. * @gh: the holder structure
  292. *
  293. */
  294. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  295. struct gfs2_holder *gh)
  296. {
  297. INIT_LIST_HEAD(&gh->gh_list);
  298. gh->gh_gl = gl;
  299. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  300. gh->gh_owner = current;
  301. gh->gh_state = state;
  302. gh->gh_flags = flags;
  303. gh->gh_error = 0;
  304. gh->gh_iflags = 0;
  305. init_completion(&gh->gh_wait);
  306. if (gh->gh_state == LM_ST_EXCLUSIVE)
  307. gh->gh_flags |= GL_LOCAL_EXCL;
  308. gfs2_glock_hold(gl);
  309. }
  310. /**
  311. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  312. * @state: the state we're requesting
  313. * @flags: the modifier flags
  314. * @gh: the holder structure
  315. *
  316. * Don't mess with the glock.
  317. *
  318. */
  319. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  320. {
  321. gh->gh_state = state;
  322. gh->gh_flags = flags;
  323. if (gh->gh_state == LM_ST_EXCLUSIVE)
  324. gh->gh_flags |= GL_LOCAL_EXCL;
  325. gh->gh_iflags &= 1 << HIF_ALLOCED;
  326. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  327. }
  328. /**
  329. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  330. * @gh: the holder structure
  331. *
  332. */
  333. void gfs2_holder_uninit(struct gfs2_holder *gh)
  334. {
  335. gfs2_glock_put(gh->gh_gl);
  336. gh->gh_gl = NULL;
  337. gh->gh_ip = 0;
  338. }
  339. /**
  340. * gfs2_holder_get - get a struct gfs2_holder structure
  341. * @gl: the glock
  342. * @state: the state we're requesting
  343. * @flags: the modifier flags
  344. * @gfp_flags:
  345. *
  346. * Figure out how big an impact this function has. Either:
  347. * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
  348. * 2) Leave it like it is
  349. *
  350. * Returns: the holder structure, NULL on ENOMEM
  351. */
  352. static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
  353. unsigned int state,
  354. int flags, gfp_t gfp_flags)
  355. {
  356. struct gfs2_holder *gh;
  357. gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
  358. if (!gh)
  359. return NULL;
  360. gfs2_holder_init(gl, state, flags, gh);
  361. set_bit(HIF_ALLOCED, &gh->gh_iflags);
  362. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  363. return gh;
  364. }
  365. /**
  366. * gfs2_holder_put - get rid of a struct gfs2_holder structure
  367. * @gh: the holder structure
  368. *
  369. */
  370. static void gfs2_holder_put(struct gfs2_holder *gh)
  371. {
  372. gfs2_holder_uninit(gh);
  373. kfree(gh);
  374. }
  375. /**
  376. * rq_mutex - process a mutex request in the queue
  377. * @gh: the glock holder
  378. *
  379. * Returns: 1 if the queue is blocked
  380. */
  381. static int rq_mutex(struct gfs2_holder *gh)
  382. {
  383. struct gfs2_glock *gl = gh->gh_gl;
  384. list_del_init(&gh->gh_list);
  385. /* gh->gh_error never examined. */
  386. set_bit(GLF_LOCK, &gl->gl_flags);
  387. complete(&gh->gh_wait);
  388. return 1;
  389. }
  390. /**
  391. * rq_promote - process a promote request in the queue
  392. * @gh: the glock holder
  393. *
  394. * Acquire a new inter-node lock, or change a lock state to more restrictive.
  395. *
  396. * Returns: 1 if the queue is blocked
  397. */
  398. static int rq_promote(struct gfs2_holder *gh)
  399. {
  400. struct gfs2_glock *gl = gh->gh_gl;
  401. struct gfs2_sbd *sdp = gl->gl_sbd;
  402. struct gfs2_glock_operations *glops = gl->gl_ops;
  403. if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  404. if (list_empty(&gl->gl_holders)) {
  405. gl->gl_req_gh = gh;
  406. set_bit(GLF_LOCK, &gl->gl_flags);
  407. spin_unlock(&gl->gl_spin);
  408. if (atomic_read(&sdp->sd_reclaim_count) >
  409. gfs2_tune_get(sdp, gt_reclaim_limit) &&
  410. !(gh->gh_flags & LM_FLAG_PRIORITY)) {
  411. gfs2_reclaim_glock(sdp);
  412. gfs2_reclaim_glock(sdp);
  413. }
  414. glops->go_xmote_th(gl, gh->gh_state,
  415. gh->gh_flags);
  416. spin_lock(&gl->gl_spin);
  417. }
  418. return 1;
  419. }
  420. if (list_empty(&gl->gl_holders)) {
  421. set_bit(HIF_FIRST, &gh->gh_iflags);
  422. set_bit(GLF_LOCK, &gl->gl_flags);
  423. } else {
  424. struct gfs2_holder *next_gh;
  425. if (gh->gh_flags & GL_LOCAL_EXCL)
  426. return 1;
  427. next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
  428. gh_list);
  429. if (next_gh->gh_flags & GL_LOCAL_EXCL)
  430. return 1;
  431. }
  432. list_move_tail(&gh->gh_list, &gl->gl_holders);
  433. gh->gh_error = 0;
  434. set_bit(HIF_HOLDER, &gh->gh_iflags);
  435. complete(&gh->gh_wait);
  436. return 0;
  437. }
  438. /**
  439. * rq_demote - process a demote request in the queue
  440. * @gh: the glock holder
  441. *
  442. * Returns: 1 if the queue is blocked
  443. */
  444. static int rq_demote(struct gfs2_holder *gh)
  445. {
  446. struct gfs2_glock *gl = gh->gh_gl;
  447. struct gfs2_glock_operations *glops = gl->gl_ops;
  448. if (!list_empty(&gl->gl_holders))
  449. return 1;
  450. if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
  451. list_del_init(&gh->gh_list);
  452. gh->gh_error = 0;
  453. spin_unlock(&gl->gl_spin);
  454. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  455. gfs2_holder_put(gh);
  456. else
  457. complete(&gh->gh_wait);
  458. spin_lock(&gl->gl_spin);
  459. } else {
  460. gl->gl_req_gh = gh;
  461. set_bit(GLF_LOCK, &gl->gl_flags);
  462. spin_unlock(&gl->gl_spin);
  463. if (gh->gh_state == LM_ST_UNLOCKED ||
  464. gl->gl_state != LM_ST_EXCLUSIVE)
  465. glops->go_drop_th(gl);
  466. else
  467. glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
  468. spin_lock(&gl->gl_spin);
  469. }
  470. return 0;
  471. }
  472. /**
  473. * rq_greedy - process a queued request to drop greedy status
  474. * @gh: the glock holder
  475. *
  476. * Returns: 1 if the queue is blocked
  477. */
  478. static int rq_greedy(struct gfs2_holder *gh)
  479. {
  480. struct gfs2_glock *gl = gh->gh_gl;
  481. list_del_init(&gh->gh_list);
  482. /* gh->gh_error never examined. */
  483. clear_bit(GLF_GREEDY, &gl->gl_flags);
  484. spin_unlock(&gl->gl_spin);
  485. gfs2_holder_uninit(gh);
  486. kfree(container_of(gh, struct greedy, gr_gh));
  487. spin_lock(&gl->gl_spin);
  488. return 0;
  489. }
  490. /**
  491. * run_queue - process holder structures on a glock
  492. * @gl: the glock
  493. *
  494. */
  495. static void run_queue(struct gfs2_glock *gl)
  496. {
  497. struct gfs2_holder *gh;
  498. int blocked = 1;
  499. for (;;) {
  500. if (test_bit(GLF_LOCK, &gl->gl_flags))
  501. break;
  502. if (!list_empty(&gl->gl_waiters1)) {
  503. gh = list_entry(gl->gl_waiters1.next,
  504. struct gfs2_holder, gh_list);
  505. if (test_bit(HIF_MUTEX, &gh->gh_iflags))
  506. blocked = rq_mutex(gh);
  507. else
  508. gfs2_assert_warn(gl->gl_sbd, 0);
  509. } else if (!list_empty(&gl->gl_waiters2) &&
  510. !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
  511. gh = list_entry(gl->gl_waiters2.next,
  512. struct gfs2_holder, gh_list);
  513. if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
  514. blocked = rq_demote(gh);
  515. else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
  516. blocked = rq_greedy(gh);
  517. else
  518. gfs2_assert_warn(gl->gl_sbd, 0);
  519. } else if (!list_empty(&gl->gl_waiters3)) {
  520. gh = list_entry(gl->gl_waiters3.next,
  521. struct gfs2_holder, gh_list);
  522. if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
  523. blocked = rq_promote(gh);
  524. else
  525. gfs2_assert_warn(gl->gl_sbd, 0);
  526. } else
  527. break;
  528. if (blocked)
  529. break;
  530. }
  531. }
  532. /**
  533. * gfs2_glmutex_lock - acquire a local lock on a glock
  534. * @gl: the glock
  535. *
  536. * Gives caller exclusive access to manipulate a glock structure.
  537. */
  538. static void gfs2_glmutex_lock(struct gfs2_glock *gl)
  539. {
  540. struct gfs2_holder gh;
  541. gfs2_holder_init(gl, 0, 0, &gh);
  542. set_bit(HIF_MUTEX, &gh.gh_iflags);
  543. spin_lock(&gl->gl_spin);
  544. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  545. list_add_tail(&gh.gh_list, &gl->gl_waiters1);
  546. else {
  547. gl->gl_owner = current;
  548. gl->gl_ip = (unsigned long)__builtin_return_address(0);
  549. complete(&gh.gh_wait);
  550. }
  551. spin_unlock(&gl->gl_spin);
  552. wait_for_completion(&gh.gh_wait);
  553. gfs2_holder_uninit(&gh);
  554. }
  555. /**
  556. * gfs2_glmutex_trylock - try to acquire a local lock on a glock
  557. * @gl: the glock
  558. *
  559. * Returns: 1 if the glock is acquired
  560. */
  561. static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
  562. {
  563. int acquired = 1;
  564. spin_lock(&gl->gl_spin);
  565. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  566. acquired = 0;
  567. else {
  568. gl->gl_owner = current;
  569. gl->gl_ip = (unsigned long)__builtin_return_address(0);
  570. }
  571. spin_unlock(&gl->gl_spin);
  572. return acquired;
  573. }
  574. /**
  575. * gfs2_glmutex_unlock - release a local lock on a glock
  576. * @gl: the glock
  577. *
  578. */
  579. static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
  580. {
  581. spin_lock(&gl->gl_spin);
  582. clear_bit(GLF_LOCK, &gl->gl_flags);
  583. gl->gl_owner = NULL;
  584. gl->gl_ip = 0;
  585. run_queue(gl);
  586. BUG_ON(!spin_is_locked(&gl->gl_spin));
  587. spin_unlock(&gl->gl_spin);
  588. }
  589. /**
  590. * handle_callback - add a demote request to a lock's queue
  591. * @gl: the glock
  592. * @state: the state the caller wants us to change to
  593. *
  594. * Note: This may fail sliently if we are out of memory.
  595. */
  596. static void handle_callback(struct gfs2_glock *gl, unsigned int state)
  597. {
  598. struct gfs2_holder *gh, *new_gh = NULL;
  599. restart:
  600. spin_lock(&gl->gl_spin);
  601. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  602. if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
  603. gl->gl_req_gh != gh) {
  604. if (gh->gh_state != state)
  605. gh->gh_state = LM_ST_UNLOCKED;
  606. goto out;
  607. }
  608. }
  609. if (new_gh) {
  610. list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
  611. new_gh = NULL;
  612. } else {
  613. spin_unlock(&gl->gl_spin);
  614. new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
  615. if (!new_gh)
  616. return;
  617. set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
  618. set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
  619. goto restart;
  620. }
  621. out:
  622. spin_unlock(&gl->gl_spin);
  623. if (new_gh)
  624. gfs2_holder_put(new_gh);
  625. }
  626. void gfs2_glock_inode_squish(struct inode *inode)
  627. {
  628. struct gfs2_holder gh;
  629. struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
  630. gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
  631. set_bit(HIF_DEMOTE, &gh.gh_iflags);
  632. spin_lock(&gl->gl_spin);
  633. gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
  634. list_add_tail(&gh.gh_list, &gl->gl_waiters2);
  635. run_queue(gl);
  636. spin_unlock(&gl->gl_spin);
  637. wait_for_completion(&gh.gh_wait);
  638. gfs2_holder_uninit(&gh);
  639. }
  640. /**
  641. * state_change - record that the glock is now in a different state
  642. * @gl: the glock
  643. * @new_state the new state
  644. *
  645. */
  646. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  647. {
  648. int held1, held2;
  649. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  650. held2 = (new_state != LM_ST_UNLOCKED);
  651. if (held1 != held2) {
  652. if (held2)
  653. gfs2_glock_hold(gl);
  654. else
  655. gfs2_glock_put(gl);
  656. }
  657. gl->gl_state = new_state;
  658. }
  659. /**
  660. * xmote_bh - Called after the lock module is done acquiring a lock
  661. * @gl: The glock in question
  662. * @ret: the int returned from the lock module
  663. *
  664. */
  665. static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
  666. {
  667. struct gfs2_sbd *sdp = gl->gl_sbd;
  668. struct gfs2_glock_operations *glops = gl->gl_ops;
  669. struct gfs2_holder *gh = gl->gl_req_gh;
  670. int prev_state = gl->gl_state;
  671. int op_done = 1;
  672. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  673. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  674. gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
  675. state_change(gl, ret & LM_OUT_ST_MASK);
  676. if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
  677. if (glops->go_inval)
  678. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  679. } else if (gl->gl_state == LM_ST_DEFERRED) {
  680. /* We might not want to do this here.
  681. Look at moving to the inode glops. */
  682. if (glops->go_inval)
  683. glops->go_inval(gl, DIO_DATA);
  684. }
  685. /* Deal with each possible exit condition */
  686. if (!gh)
  687. gl->gl_stamp = jiffies;
  688. else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  689. spin_lock(&gl->gl_spin);
  690. list_del_init(&gh->gh_list);
  691. gh->gh_error = -EIO;
  692. spin_unlock(&gl->gl_spin);
  693. } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
  694. spin_lock(&gl->gl_spin);
  695. list_del_init(&gh->gh_list);
  696. if (gl->gl_state == gh->gh_state ||
  697. gl->gl_state == LM_ST_UNLOCKED)
  698. gh->gh_error = 0;
  699. else {
  700. if (gfs2_assert_warn(sdp, gh->gh_flags &
  701. (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
  702. fs_warn(sdp, "ret = 0x%.8X\n", ret);
  703. gh->gh_error = GLR_TRYFAILED;
  704. }
  705. spin_unlock(&gl->gl_spin);
  706. if (ret & LM_OUT_CANCELED)
  707. handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
  708. } else if (ret & LM_OUT_CANCELED) {
  709. spin_lock(&gl->gl_spin);
  710. list_del_init(&gh->gh_list);
  711. gh->gh_error = GLR_CANCELED;
  712. spin_unlock(&gl->gl_spin);
  713. } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  714. spin_lock(&gl->gl_spin);
  715. list_move_tail(&gh->gh_list, &gl->gl_holders);
  716. gh->gh_error = 0;
  717. set_bit(HIF_HOLDER, &gh->gh_iflags);
  718. spin_unlock(&gl->gl_spin);
  719. set_bit(HIF_FIRST, &gh->gh_iflags);
  720. op_done = 0;
  721. } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  722. spin_lock(&gl->gl_spin);
  723. list_del_init(&gh->gh_list);
  724. gh->gh_error = GLR_TRYFAILED;
  725. spin_unlock(&gl->gl_spin);
  726. } else {
  727. if (gfs2_assert_withdraw(sdp, 0) == -1)
  728. fs_err(sdp, "ret = 0x%.8X\n", ret);
  729. }
  730. if (glops->go_xmote_bh)
  731. glops->go_xmote_bh(gl);
  732. if (op_done) {
  733. spin_lock(&gl->gl_spin);
  734. gl->gl_req_gh = NULL;
  735. gl->gl_req_bh = NULL;
  736. clear_bit(GLF_LOCK, &gl->gl_flags);
  737. run_queue(gl);
  738. spin_unlock(&gl->gl_spin);
  739. }
  740. gfs2_glock_put(gl);
  741. if (gh) {
  742. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  743. gfs2_holder_put(gh);
  744. else
  745. complete(&gh->gh_wait);
  746. }
  747. }
  748. /**
  749. * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
  750. * @gl: The glock in question
  751. * @state: the requested state
  752. * @flags: modifier flags to the lock call
  753. *
  754. */
  755. void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
  756. {
  757. struct gfs2_sbd *sdp = gl->gl_sbd;
  758. struct gfs2_glock_operations *glops = gl->gl_ops;
  759. int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
  760. LM_FLAG_NOEXP | LM_FLAG_ANY |
  761. LM_FLAG_PRIORITY);
  762. unsigned int lck_ret;
  763. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  764. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  765. gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
  766. gfs2_assert_warn(sdp, state != gl->gl_state);
  767. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  768. if (glops->go_sync)
  769. glops->go_sync(gl,
  770. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  771. }
  772. gfs2_glock_hold(gl);
  773. gl->gl_req_bh = xmote_bh;
  774. lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
  775. lck_flags);
  776. if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
  777. return;
  778. if (lck_ret & LM_OUT_ASYNC)
  779. gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
  780. else
  781. xmote_bh(gl, lck_ret);
  782. }
  783. /**
  784. * drop_bh - Called after a lock module unlock completes
  785. * @gl: the glock
  786. * @ret: the return status
  787. *
  788. * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
  789. * Doesn't drop the reference on the glock the top half took out
  790. *
  791. */
  792. static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
  793. {
  794. struct gfs2_sbd *sdp = gl->gl_sbd;
  795. struct gfs2_glock_operations *glops = gl->gl_ops;
  796. struct gfs2_holder *gh = gl->gl_req_gh;
  797. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  798. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  799. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  800. gfs2_assert_warn(sdp, !ret);
  801. state_change(gl, LM_ST_UNLOCKED);
  802. if (glops->go_inval)
  803. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  804. if (gh) {
  805. spin_lock(&gl->gl_spin);
  806. list_del_init(&gh->gh_list);
  807. gh->gh_error = 0;
  808. spin_unlock(&gl->gl_spin);
  809. }
  810. if (glops->go_drop_bh)
  811. glops->go_drop_bh(gl);
  812. spin_lock(&gl->gl_spin);
  813. gl->gl_req_gh = NULL;
  814. gl->gl_req_bh = NULL;
  815. clear_bit(GLF_LOCK, &gl->gl_flags);
  816. run_queue(gl);
  817. spin_unlock(&gl->gl_spin);
  818. gfs2_glock_put(gl);
  819. if (gh) {
  820. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  821. gfs2_holder_put(gh);
  822. else
  823. complete(&gh->gh_wait);
  824. }
  825. }
  826. /**
  827. * gfs2_glock_drop_th - call into the lock module to unlock a lock
  828. * @gl: the glock
  829. *
  830. */
  831. void gfs2_glock_drop_th(struct gfs2_glock *gl)
  832. {
  833. struct gfs2_sbd *sdp = gl->gl_sbd;
  834. struct gfs2_glock_operations *glops = gl->gl_ops;
  835. unsigned int ret;
  836. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  837. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  838. gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
  839. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  840. if (glops->go_sync)
  841. glops->go_sync(gl,
  842. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  843. }
  844. gfs2_glock_hold(gl);
  845. gl->gl_req_bh = drop_bh;
  846. ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
  847. if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
  848. return;
  849. if (!ret)
  850. drop_bh(gl, ret);
  851. else
  852. gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
  853. }
  854. /**
  855. * do_cancels - cancel requests for locks stuck waiting on an expire flag
  856. * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
  857. *
  858. * Don't cancel GL_NOCANCEL requests.
  859. */
  860. static void do_cancels(struct gfs2_holder *gh)
  861. {
  862. struct gfs2_glock *gl = gh->gh_gl;
  863. spin_lock(&gl->gl_spin);
  864. while (gl->gl_req_gh != gh &&
  865. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  866. !list_empty(&gh->gh_list)) {
  867. if (gl->gl_req_bh &&
  868. !(gl->gl_req_gh &&
  869. (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
  870. spin_unlock(&gl->gl_spin);
  871. gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
  872. msleep(100);
  873. spin_lock(&gl->gl_spin);
  874. } else {
  875. spin_unlock(&gl->gl_spin);
  876. msleep(100);
  877. spin_lock(&gl->gl_spin);
  878. }
  879. }
  880. spin_unlock(&gl->gl_spin);
  881. }
  882. /**
  883. * glock_wait_internal - wait on a glock acquisition
  884. * @gh: the glock holder
  885. *
  886. * Returns: 0 on success
  887. */
  888. static int glock_wait_internal(struct gfs2_holder *gh)
  889. {
  890. struct gfs2_glock *gl = gh->gh_gl;
  891. struct gfs2_sbd *sdp = gl->gl_sbd;
  892. struct gfs2_glock_operations *glops = gl->gl_ops;
  893. if (test_bit(HIF_ABORTED, &gh->gh_iflags))
  894. return -EIO;
  895. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  896. spin_lock(&gl->gl_spin);
  897. if (gl->gl_req_gh != gh &&
  898. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  899. !list_empty(&gh->gh_list)) {
  900. list_del_init(&gh->gh_list);
  901. gh->gh_error = GLR_TRYFAILED;
  902. run_queue(gl);
  903. spin_unlock(&gl->gl_spin);
  904. return gh->gh_error;
  905. }
  906. spin_unlock(&gl->gl_spin);
  907. }
  908. if (gh->gh_flags & LM_FLAG_PRIORITY)
  909. do_cancels(gh);
  910. wait_for_completion(&gh->gh_wait);
  911. if (gh->gh_error)
  912. return gh->gh_error;
  913. gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
  914. gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
  915. gh->gh_state,
  916. gh->gh_flags));
  917. if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
  918. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  919. if (glops->go_lock) {
  920. gh->gh_error = glops->go_lock(gh);
  921. if (gh->gh_error) {
  922. spin_lock(&gl->gl_spin);
  923. list_del_init(&gh->gh_list);
  924. spin_unlock(&gl->gl_spin);
  925. }
  926. }
  927. spin_lock(&gl->gl_spin);
  928. gl->gl_req_gh = NULL;
  929. gl->gl_req_bh = NULL;
  930. clear_bit(GLF_LOCK, &gl->gl_flags);
  931. run_queue(gl);
  932. spin_unlock(&gl->gl_spin);
  933. }
  934. return gh->gh_error;
  935. }
  936. static inline struct gfs2_holder *
  937. find_holder_by_owner(struct list_head *head, struct task_struct *owner)
  938. {
  939. struct gfs2_holder *gh;
  940. list_for_each_entry(gh, head, gh_list) {
  941. if (gh->gh_owner == owner)
  942. return gh;
  943. }
  944. return NULL;
  945. }
  946. /**
  947. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  948. * @gh: the holder structure to add
  949. *
  950. */
  951. static void add_to_queue(struct gfs2_holder *gh)
  952. {
  953. struct gfs2_glock *gl = gh->gh_gl;
  954. struct gfs2_holder *existing;
  955. BUG_ON(!gh->gh_owner);
  956. existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
  957. if (existing) {
  958. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  959. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  960. BUG();
  961. }
  962. existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
  963. if (existing) {
  964. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  965. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  966. BUG();
  967. }
  968. if (gh->gh_flags & LM_FLAG_PRIORITY)
  969. list_add(&gh->gh_list, &gl->gl_waiters3);
  970. else
  971. list_add_tail(&gh->gh_list, &gl->gl_waiters3);
  972. }
  973. /**
  974. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  975. * @gh: the holder structure
  976. *
  977. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  978. *
  979. * Returns: 0, GLR_TRYFAILED, or errno on failure
  980. */
  981. int gfs2_glock_nq(struct gfs2_holder *gh)
  982. {
  983. struct gfs2_glock *gl = gh->gh_gl;
  984. struct gfs2_sbd *sdp = gl->gl_sbd;
  985. int error = 0;
  986. restart:
  987. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  988. set_bit(HIF_ABORTED, &gh->gh_iflags);
  989. return -EIO;
  990. }
  991. set_bit(HIF_PROMOTE, &gh->gh_iflags);
  992. spin_lock(&gl->gl_spin);
  993. add_to_queue(gh);
  994. run_queue(gl);
  995. spin_unlock(&gl->gl_spin);
  996. if (!(gh->gh_flags & GL_ASYNC)) {
  997. error = glock_wait_internal(gh);
  998. if (error == GLR_CANCELED) {
  999. msleep(100);
  1000. goto restart;
  1001. }
  1002. }
  1003. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  1004. if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
  1005. dump_glock(gl);
  1006. return error;
  1007. }
  1008. /**
  1009. * gfs2_glock_poll - poll to see if an async request has been completed
  1010. * @gh: the holder
  1011. *
  1012. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  1013. */
  1014. int gfs2_glock_poll(struct gfs2_holder *gh)
  1015. {
  1016. struct gfs2_glock *gl = gh->gh_gl;
  1017. int ready = 0;
  1018. spin_lock(&gl->gl_spin);
  1019. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1020. ready = 1;
  1021. else if (list_empty(&gh->gh_list)) {
  1022. if (gh->gh_error == GLR_CANCELED) {
  1023. spin_unlock(&gl->gl_spin);
  1024. msleep(100);
  1025. if (gfs2_glock_nq(gh))
  1026. return 1;
  1027. return 0;
  1028. } else
  1029. ready = 1;
  1030. }
  1031. spin_unlock(&gl->gl_spin);
  1032. return ready;
  1033. }
  1034. /**
  1035. * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
  1036. * @gh: the holder structure
  1037. *
  1038. * Returns: 0, GLR_TRYFAILED, or errno on failure
  1039. */
  1040. int gfs2_glock_wait(struct gfs2_holder *gh)
  1041. {
  1042. int error;
  1043. error = glock_wait_internal(gh);
  1044. if (error == GLR_CANCELED) {
  1045. msleep(100);
  1046. gh->gh_flags &= ~GL_ASYNC;
  1047. error = gfs2_glock_nq(gh);
  1048. }
  1049. return error;
  1050. }
  1051. /**
  1052. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  1053. * @gh: the glock holder
  1054. *
  1055. */
  1056. void gfs2_glock_dq(struct gfs2_holder *gh)
  1057. {
  1058. struct gfs2_glock *gl = gh->gh_gl;
  1059. struct gfs2_glock_operations *glops = gl->gl_ops;
  1060. if (gh->gh_flags & GL_SYNC)
  1061. set_bit(GLF_SYNC, &gl->gl_flags);
  1062. if (gh->gh_flags & GL_NOCACHE)
  1063. handle_callback(gl, LM_ST_UNLOCKED);
  1064. gfs2_glmutex_lock(gl);
  1065. spin_lock(&gl->gl_spin);
  1066. list_del_init(&gh->gh_list);
  1067. if (list_empty(&gl->gl_holders)) {
  1068. spin_unlock(&gl->gl_spin);
  1069. if (glops->go_unlock)
  1070. glops->go_unlock(gh);
  1071. if (test_bit(GLF_SYNC, &gl->gl_flags)) {
  1072. if (glops->go_sync)
  1073. glops->go_sync(gl, DIO_METADATA | DIO_DATA);
  1074. }
  1075. gl->gl_stamp = jiffies;
  1076. spin_lock(&gl->gl_spin);
  1077. }
  1078. clear_bit(GLF_LOCK, &gl->gl_flags);
  1079. run_queue(gl);
  1080. spin_unlock(&gl->gl_spin);
  1081. }
  1082. /**
  1083. * gfs2_glock_prefetch - Try to prefetch a glock
  1084. * @gl: the glock
  1085. * @state: the state to prefetch in
  1086. * @flags: flags passed to go_xmote_th()
  1087. *
  1088. */
  1089. static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
  1090. int flags)
  1091. {
  1092. struct gfs2_glock_operations *glops = gl->gl_ops;
  1093. spin_lock(&gl->gl_spin);
  1094. if (test_bit(GLF_LOCK, &gl->gl_flags) ||
  1095. !list_empty(&gl->gl_holders) ||
  1096. !list_empty(&gl->gl_waiters1) ||
  1097. !list_empty(&gl->gl_waiters2) ||
  1098. !list_empty(&gl->gl_waiters3) ||
  1099. relaxed_state_ok(gl->gl_state, state, flags)) {
  1100. spin_unlock(&gl->gl_spin);
  1101. return;
  1102. }
  1103. set_bit(GLF_PREFETCH, &gl->gl_flags);
  1104. set_bit(GLF_LOCK, &gl->gl_flags);
  1105. spin_unlock(&gl->gl_spin);
  1106. glops->go_xmote_th(gl, state, flags);
  1107. }
  1108. static void greedy_work(void *data)
  1109. {
  1110. struct greedy *gr = data;
  1111. struct gfs2_holder *gh = &gr->gr_gh;
  1112. struct gfs2_glock *gl = gh->gh_gl;
  1113. struct gfs2_glock_operations *glops = gl->gl_ops;
  1114. clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1115. if (glops->go_greedy)
  1116. glops->go_greedy(gl);
  1117. spin_lock(&gl->gl_spin);
  1118. if (list_empty(&gl->gl_waiters2)) {
  1119. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1120. spin_unlock(&gl->gl_spin);
  1121. gfs2_holder_uninit(gh);
  1122. kfree(gr);
  1123. } else {
  1124. gfs2_glock_hold(gl);
  1125. list_add_tail(&gh->gh_list, &gl->gl_waiters2);
  1126. run_queue(gl);
  1127. spin_unlock(&gl->gl_spin);
  1128. gfs2_glock_put(gl);
  1129. }
  1130. }
  1131. /**
  1132. * gfs2_glock_be_greedy -
  1133. * @gl:
  1134. * @time:
  1135. *
  1136. * Returns: 0 if go_greedy will be called, 1 otherwise
  1137. */
  1138. int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
  1139. {
  1140. struct greedy *gr;
  1141. struct gfs2_holder *gh;
  1142. if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
  1143. test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
  1144. return 1;
  1145. gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
  1146. if (!gr) {
  1147. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1148. return 1;
  1149. }
  1150. gh = &gr->gr_gh;
  1151. gfs2_holder_init(gl, 0, 0, gh);
  1152. set_bit(HIF_GREEDY, &gh->gh_iflags);
  1153. INIT_WORK(&gr->gr_work, greedy_work, gr);
  1154. set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1155. schedule_delayed_work(&gr->gr_work, time);
  1156. return 0;
  1157. }
  1158. /**
  1159. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1160. * @gh: the holder structure
  1161. *
  1162. */
  1163. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1164. {
  1165. gfs2_glock_dq(gh);
  1166. gfs2_holder_uninit(gh);
  1167. }
  1168. /**
  1169. * gfs2_glock_nq_num - acquire a glock based on lock number
  1170. * @sdp: the filesystem
  1171. * @number: the lock number
  1172. * @glops: the glock operations for the type of glock
  1173. * @state: the state to acquire the glock in
  1174. * @flags: modifier flags for the aquisition
  1175. * @gh: the struct gfs2_holder
  1176. *
  1177. * Returns: errno
  1178. */
  1179. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
  1180. struct gfs2_glock_operations *glops, unsigned int state,
  1181. int flags, struct gfs2_holder *gh)
  1182. {
  1183. struct gfs2_glock *gl;
  1184. int error;
  1185. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1186. if (!error) {
  1187. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1188. gfs2_glock_put(gl);
  1189. }
  1190. return error;
  1191. }
  1192. /**
  1193. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1194. * @arg_a: the first structure
  1195. * @arg_b: the second structure
  1196. *
  1197. */
  1198. static int glock_compare(const void *arg_a, const void *arg_b)
  1199. {
  1200. struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
  1201. struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
  1202. struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1203. struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1204. int ret = 0;
  1205. if (a->ln_number > b->ln_number)
  1206. ret = 1;
  1207. else if (a->ln_number < b->ln_number)
  1208. ret = -1;
  1209. else {
  1210. if (gh_a->gh_state == LM_ST_SHARED &&
  1211. gh_b->gh_state == LM_ST_EXCLUSIVE)
  1212. ret = 1;
  1213. else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
  1214. (gh_b->gh_flags & GL_LOCAL_EXCL))
  1215. ret = 1;
  1216. }
  1217. return ret;
  1218. }
  1219. /**
  1220. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1221. * @num_gh: the number of structures
  1222. * @ghs: an array of struct gfs2_holder structures
  1223. *
  1224. * Returns: 0 on success (all glocks acquired),
  1225. * errno on failure (no glocks acquired)
  1226. */
  1227. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1228. struct gfs2_holder **p)
  1229. {
  1230. unsigned int x;
  1231. int error = 0;
  1232. for (x = 0; x < num_gh; x++)
  1233. p[x] = &ghs[x];
  1234. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1235. for (x = 0; x < num_gh; x++) {
  1236. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1237. error = gfs2_glock_nq(p[x]);
  1238. if (error) {
  1239. while (x--)
  1240. gfs2_glock_dq(p[x]);
  1241. break;
  1242. }
  1243. }
  1244. return error;
  1245. }
  1246. /**
  1247. * gfs2_glock_nq_m - acquire multiple glocks
  1248. * @num_gh: the number of structures
  1249. * @ghs: an array of struct gfs2_holder structures
  1250. *
  1251. * Figure out how big an impact this function has. Either:
  1252. * 1) Replace this code with code that calls gfs2_glock_prefetch()
  1253. * 2) Forget async stuff and just call nq_m_sync()
  1254. * 3) Leave it like it is
  1255. *
  1256. * Returns: 0 on success (all glocks acquired),
  1257. * errno on failure (no glocks acquired)
  1258. */
  1259. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1260. {
  1261. int *e;
  1262. unsigned int x;
  1263. int borked = 0, serious = 0;
  1264. int error = 0;
  1265. if (!num_gh)
  1266. return 0;
  1267. if (num_gh == 1) {
  1268. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1269. return gfs2_glock_nq(ghs);
  1270. }
  1271. e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1272. if (!e)
  1273. return -ENOMEM;
  1274. for (x = 0; x < num_gh; x++) {
  1275. ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
  1276. error = gfs2_glock_nq(&ghs[x]);
  1277. if (error) {
  1278. borked = 1;
  1279. serious = error;
  1280. num_gh = x;
  1281. break;
  1282. }
  1283. }
  1284. for (x = 0; x < num_gh; x++) {
  1285. error = e[x] = glock_wait_internal(&ghs[x]);
  1286. if (error) {
  1287. borked = 1;
  1288. if (error != GLR_TRYFAILED && error != GLR_CANCELED)
  1289. serious = error;
  1290. }
  1291. }
  1292. if (!borked) {
  1293. kfree(e);
  1294. return 0;
  1295. }
  1296. for (x = 0; x < num_gh; x++)
  1297. if (!e[x])
  1298. gfs2_glock_dq(&ghs[x]);
  1299. if (serious)
  1300. error = serious;
  1301. else {
  1302. for (x = 0; x < num_gh; x++)
  1303. gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
  1304. &ghs[x]);
  1305. error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
  1306. }
  1307. kfree(e);
  1308. return error;
  1309. }
  1310. /**
  1311. * gfs2_glock_dq_m - release multiple glocks
  1312. * @num_gh: the number of structures
  1313. * @ghs: an array of struct gfs2_holder structures
  1314. *
  1315. */
  1316. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1317. {
  1318. unsigned int x;
  1319. for (x = 0; x < num_gh; x++)
  1320. gfs2_glock_dq(&ghs[x]);
  1321. }
  1322. /**
  1323. * gfs2_glock_dq_uninit_m - release multiple glocks
  1324. * @num_gh: the number of structures
  1325. * @ghs: an array of struct gfs2_holder structures
  1326. *
  1327. */
  1328. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1329. {
  1330. unsigned int x;
  1331. for (x = 0; x < num_gh; x++)
  1332. gfs2_glock_dq_uninit(&ghs[x]);
  1333. }
  1334. /**
  1335. * gfs2_glock_prefetch_num - prefetch a glock based on lock number
  1336. * @sdp: the filesystem
  1337. * @number: the lock number
  1338. * @glops: the glock operations for the type of glock
  1339. * @state: the state to acquire the glock in
  1340. * @flags: modifier flags for the aquisition
  1341. *
  1342. * Returns: errno
  1343. */
  1344. void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
  1345. struct gfs2_glock_operations *glops,
  1346. unsigned int state, int flags)
  1347. {
  1348. struct gfs2_glock *gl;
  1349. int error;
  1350. if (atomic_read(&sdp->sd_reclaim_count) <
  1351. gfs2_tune_get(sdp, gt_reclaim_limit)) {
  1352. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1353. if (!error) {
  1354. gfs2_glock_prefetch(gl, state, flags);
  1355. gfs2_glock_put(gl);
  1356. }
  1357. }
  1358. }
  1359. /**
  1360. * gfs2_lvb_hold - attach a LVB from a glock
  1361. * @gl: The glock in question
  1362. *
  1363. */
  1364. int gfs2_lvb_hold(struct gfs2_glock *gl)
  1365. {
  1366. int error;
  1367. gfs2_glmutex_lock(gl);
  1368. if (!atomic_read(&gl->gl_lvb_count)) {
  1369. error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
  1370. if (error) {
  1371. gfs2_glmutex_unlock(gl);
  1372. return error;
  1373. }
  1374. gfs2_glock_hold(gl);
  1375. }
  1376. atomic_inc(&gl->gl_lvb_count);
  1377. gfs2_glmutex_unlock(gl);
  1378. return 0;
  1379. }
  1380. /**
  1381. * gfs2_lvb_unhold - detach a LVB from a glock
  1382. * @gl: The glock in question
  1383. *
  1384. */
  1385. void gfs2_lvb_unhold(struct gfs2_glock *gl)
  1386. {
  1387. gfs2_glock_hold(gl);
  1388. gfs2_glmutex_lock(gl);
  1389. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
  1390. if (atomic_dec_and_test(&gl->gl_lvb_count)) {
  1391. gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1392. gl->gl_lvb = NULL;
  1393. gfs2_glock_put(gl);
  1394. }
  1395. gfs2_glmutex_unlock(gl);
  1396. gfs2_glock_put(gl);
  1397. }
  1398. #if 0
  1399. void gfs2_lvb_sync(struct gfs2_glock *gl)
  1400. {
  1401. gfs2_glmutex_lock(gl);
  1402. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
  1403. if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
  1404. gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1405. gfs2_glmutex_unlock(gl);
  1406. }
  1407. #endif /* 0 */
  1408. static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
  1409. unsigned int state)
  1410. {
  1411. struct gfs2_glock *gl;
  1412. gl = gfs2_glock_find(sdp, name);
  1413. if (!gl)
  1414. return;
  1415. if (gl->gl_ops->go_callback)
  1416. gl->gl_ops->go_callback(gl, state);
  1417. handle_callback(gl, state);
  1418. spin_lock(&gl->gl_spin);
  1419. run_queue(gl);
  1420. spin_unlock(&gl->gl_spin);
  1421. gfs2_glock_put(gl);
  1422. }
  1423. /**
  1424. * gfs2_glock_cb - Callback used by locking module
  1425. * @fsdata: Pointer to the superblock
  1426. * @type: Type of callback
  1427. * @data: Type dependent data pointer
  1428. *
  1429. * Called by the locking module when it wants to tell us something.
  1430. * Either we need to drop a lock, one of our ASYNC requests completed, or
  1431. * a journal from another client needs to be recovered.
  1432. */
  1433. void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
  1434. {
  1435. struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
  1436. switch (type) {
  1437. case LM_CB_NEED_E:
  1438. blocking_cb(sdp, data, LM_ST_UNLOCKED);
  1439. return;
  1440. case LM_CB_NEED_D:
  1441. blocking_cb(sdp, data, LM_ST_DEFERRED);
  1442. return;
  1443. case LM_CB_NEED_S:
  1444. blocking_cb(sdp, data, LM_ST_SHARED);
  1445. return;
  1446. case LM_CB_ASYNC: {
  1447. struct lm_async_cb *async = data;
  1448. struct gfs2_glock *gl;
  1449. gl = gfs2_glock_find(sdp, &async->lc_name);
  1450. if (gfs2_assert_warn(sdp, gl))
  1451. return;
  1452. if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
  1453. gl->gl_req_bh(gl, async->lc_ret);
  1454. gfs2_glock_put(gl);
  1455. return;
  1456. }
  1457. case LM_CB_NEED_RECOVERY:
  1458. gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
  1459. if (sdp->sd_recoverd_process)
  1460. wake_up_process(sdp->sd_recoverd_process);
  1461. return;
  1462. case LM_CB_DROPLOCKS:
  1463. gfs2_gl_hash_clear(sdp, NO_WAIT);
  1464. gfs2_quota_scan(sdp);
  1465. return;
  1466. default:
  1467. gfs2_assert_warn(sdp, 0);
  1468. return;
  1469. }
  1470. }
  1471. /**
  1472. * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
  1473. * iopen glock from memory
  1474. * @io_gl: the iopen glock
  1475. * @state: the state into which the glock should be put
  1476. *
  1477. */
  1478. void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
  1479. {
  1480. if (state != LM_ST_UNLOCKED)
  1481. return;
  1482. /* FIXME: remove this? */
  1483. }
  1484. /**
  1485. * demote_ok - Check to see if it's ok to unlock a glock
  1486. * @gl: the glock
  1487. *
  1488. * Returns: 1 if it's ok
  1489. */
  1490. static int demote_ok(struct gfs2_glock *gl)
  1491. {
  1492. struct gfs2_sbd *sdp = gl->gl_sbd;
  1493. struct gfs2_glock_operations *glops = gl->gl_ops;
  1494. int demote = 1;
  1495. if (test_bit(GLF_STICKY, &gl->gl_flags))
  1496. demote = 0;
  1497. else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
  1498. demote = time_after_eq(jiffies,
  1499. gl->gl_stamp +
  1500. gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
  1501. else if (glops->go_demote_ok)
  1502. demote = glops->go_demote_ok(gl);
  1503. return demote;
  1504. }
  1505. /**
  1506. * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  1507. * @gl: the glock
  1508. *
  1509. */
  1510. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  1511. {
  1512. struct gfs2_sbd *sdp = gl->gl_sbd;
  1513. spin_lock(&sdp->sd_reclaim_lock);
  1514. if (list_empty(&gl->gl_reclaim)) {
  1515. gfs2_glock_hold(gl);
  1516. list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
  1517. atomic_inc(&sdp->sd_reclaim_count);
  1518. }
  1519. spin_unlock(&sdp->sd_reclaim_lock);
  1520. wake_up(&sdp->sd_reclaim_wq);
  1521. }
  1522. /**
  1523. * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
  1524. * @sdp: the filesystem
  1525. *
  1526. * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
  1527. * different glock and we notice that there are a lot of glocks in the
  1528. * reclaim list.
  1529. *
  1530. */
  1531. void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
  1532. {
  1533. struct gfs2_glock *gl;
  1534. spin_lock(&sdp->sd_reclaim_lock);
  1535. if (list_empty(&sdp->sd_reclaim_list)) {
  1536. spin_unlock(&sdp->sd_reclaim_lock);
  1537. return;
  1538. }
  1539. gl = list_entry(sdp->sd_reclaim_list.next,
  1540. struct gfs2_glock, gl_reclaim);
  1541. list_del_init(&gl->gl_reclaim);
  1542. spin_unlock(&sdp->sd_reclaim_lock);
  1543. atomic_dec(&sdp->sd_reclaim_count);
  1544. atomic_inc(&sdp->sd_reclaimed);
  1545. if (gfs2_glmutex_trylock(gl)) {
  1546. if (queue_empty(gl, &gl->gl_holders) &&
  1547. gl->gl_state != LM_ST_UNLOCKED &&
  1548. demote_ok(gl))
  1549. handle_callback(gl, LM_ST_UNLOCKED);
  1550. gfs2_glmutex_unlock(gl);
  1551. }
  1552. gfs2_glock_put(gl);
  1553. }
  1554. /**
  1555. * examine_bucket - Call a function for glock in a hash bucket
  1556. * @examiner: the function
  1557. * @sdp: the filesystem
  1558. * @bucket: the bucket
  1559. *
  1560. * Returns: 1 if the bucket has entries
  1561. */
  1562. static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
  1563. struct gfs2_gl_hash_bucket *bucket)
  1564. {
  1565. struct glock_plug plug;
  1566. struct list_head *tmp;
  1567. struct gfs2_glock *gl;
  1568. int entries;
  1569. /* Add "plug" to end of bucket list, work back up list from there */
  1570. memset(&plug.gl_flags, 0, sizeof(unsigned long));
  1571. set_bit(GLF_PLUG, &plug.gl_flags);
  1572. write_lock(&bucket->hb_lock);
  1573. list_add(&plug.gl_list, &bucket->hb_list);
  1574. write_unlock(&bucket->hb_lock);
  1575. for (;;) {
  1576. write_lock(&bucket->hb_lock);
  1577. for (;;) {
  1578. tmp = plug.gl_list.next;
  1579. if (tmp == &bucket->hb_list) {
  1580. list_del(&plug.gl_list);
  1581. entries = !list_empty(&bucket->hb_list);
  1582. write_unlock(&bucket->hb_lock);
  1583. return entries;
  1584. }
  1585. gl = list_entry(tmp, struct gfs2_glock, gl_list);
  1586. /* Move plug up list */
  1587. list_move(&plug.gl_list, &gl->gl_list);
  1588. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1589. continue;
  1590. /* examiner() must glock_put() */
  1591. gfs2_glock_hold(gl);
  1592. break;
  1593. }
  1594. write_unlock(&bucket->hb_lock);
  1595. examiner(gl);
  1596. }
  1597. }
  1598. /**
  1599. * scan_glock - look at a glock and see if we can reclaim it
  1600. * @gl: the glock to look at
  1601. *
  1602. */
  1603. static void scan_glock(struct gfs2_glock *gl)
  1604. {
  1605. if (gfs2_glmutex_trylock(gl)) {
  1606. if (gl->gl_ops == &gfs2_inode_glops)
  1607. goto out;
  1608. if (queue_empty(gl, &gl->gl_holders) &&
  1609. gl->gl_state != LM_ST_UNLOCKED &&
  1610. demote_ok(gl))
  1611. goto out_schedule;
  1612. out:
  1613. gfs2_glmutex_unlock(gl);
  1614. }
  1615. gfs2_glock_put(gl);
  1616. return;
  1617. out_schedule:
  1618. gfs2_glmutex_unlock(gl);
  1619. gfs2_glock_schedule_for_reclaim(gl);
  1620. gfs2_glock_put(gl);
  1621. }
  1622. /**
  1623. * gfs2_scand_internal - Look for glocks and inodes to toss from memory
  1624. * @sdp: the filesystem
  1625. *
  1626. */
  1627. void gfs2_scand_internal(struct gfs2_sbd *sdp)
  1628. {
  1629. unsigned int x;
  1630. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1631. examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
  1632. cond_resched();
  1633. }
  1634. }
  1635. /**
  1636. * clear_glock - look at a glock and see if we can free it from glock cache
  1637. * @gl: the glock to look at
  1638. *
  1639. */
  1640. static void clear_glock(struct gfs2_glock *gl)
  1641. {
  1642. struct gfs2_sbd *sdp = gl->gl_sbd;
  1643. int released;
  1644. spin_lock(&sdp->sd_reclaim_lock);
  1645. if (!list_empty(&gl->gl_reclaim)) {
  1646. list_del_init(&gl->gl_reclaim);
  1647. atomic_dec(&sdp->sd_reclaim_count);
  1648. spin_unlock(&sdp->sd_reclaim_lock);
  1649. released = gfs2_glock_put(gl);
  1650. gfs2_assert(sdp, !released);
  1651. } else {
  1652. spin_unlock(&sdp->sd_reclaim_lock);
  1653. }
  1654. if (gfs2_glmutex_trylock(gl)) {
  1655. if (queue_empty(gl, &gl->gl_holders) &&
  1656. gl->gl_state != LM_ST_UNLOCKED)
  1657. handle_callback(gl, LM_ST_UNLOCKED);
  1658. gfs2_glmutex_unlock(gl);
  1659. }
  1660. gfs2_glock_put(gl);
  1661. }
  1662. /**
  1663. * gfs2_gl_hash_clear - Empty out the glock hash table
  1664. * @sdp: the filesystem
  1665. * @wait: wait until it's all gone
  1666. *
  1667. * Called when unmounting the filesystem, or when inter-node lock manager
  1668. * requests DROPLOCKS because it is running out of capacity.
  1669. */
  1670. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
  1671. {
  1672. unsigned long t;
  1673. unsigned int x;
  1674. int cont;
  1675. t = jiffies;
  1676. for (;;) {
  1677. cont = 0;
  1678. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1679. if (examine_bucket(clear_glock, sdp,
  1680. &sdp->sd_gl_hash[x]))
  1681. cont = 1;
  1682. if (!wait || !cont)
  1683. break;
  1684. if (time_after_eq(jiffies,
  1685. t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
  1686. fs_warn(sdp, "Unmount seems to be stalled. "
  1687. "Dumping lock state...\n");
  1688. gfs2_dump_lockstate(sdp);
  1689. t = jiffies;
  1690. }
  1691. /* invalidate_inodes() requires that the sb inodes list
  1692. not change, but an async completion callback for an
  1693. unlock can occur which does glock_put() which
  1694. can call iput() which will change the sb inodes list.
  1695. invalidate_inodes_mutex prevents glock_put()'s during
  1696. an invalidate_inodes() */
  1697. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  1698. invalidate_inodes(sdp->sd_vfs);
  1699. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  1700. msleep(10);
  1701. }
  1702. }
  1703. /*
  1704. * Diagnostic routines to help debug distributed deadlock
  1705. */
  1706. /**
  1707. * dump_holder - print information about a glock holder
  1708. * @str: a string naming the type of holder
  1709. * @gh: the glock holder
  1710. *
  1711. * Returns: 0 on success, -ENOBUFS when we run out of space
  1712. */
  1713. static int dump_holder(char *str, struct gfs2_holder *gh)
  1714. {
  1715. unsigned int x;
  1716. int error = -ENOBUFS;
  1717. printk(KERN_INFO " %s\n", str);
  1718. printk(KERN_INFO " owner = %ld\n",
  1719. (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
  1720. printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
  1721. printk(KERN_INFO " gh_flags =");
  1722. for (x = 0; x < 32; x++)
  1723. if (gh->gh_flags & (1 << x))
  1724. printk(" %u", x);
  1725. printk(" \n");
  1726. printk(KERN_INFO " error = %d\n", gh->gh_error);
  1727. printk(KERN_INFO " gh_iflags =");
  1728. for (x = 0; x < 32; x++)
  1729. if (test_bit(x, &gh->gh_iflags))
  1730. printk(" %u", x);
  1731. printk(" \n");
  1732. print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
  1733. error = 0;
  1734. return error;
  1735. }
  1736. /**
  1737. * dump_inode - print information about an inode
  1738. * @ip: the inode
  1739. *
  1740. * Returns: 0 on success, -ENOBUFS when we run out of space
  1741. */
  1742. static int dump_inode(struct gfs2_inode *ip)
  1743. {
  1744. unsigned int x;
  1745. int error = -ENOBUFS;
  1746. printk(KERN_INFO " Inode:\n");
  1747. printk(KERN_INFO " num = %llu %llu\n",
  1748. (unsigned long long)ip->i_num.no_formal_ino,
  1749. (unsigned long long)ip->i_num.no_addr);
  1750. printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
  1751. printk(KERN_INFO " i_flags =");
  1752. for (x = 0; x < 32; x++)
  1753. if (test_bit(x, &ip->i_flags))
  1754. printk(" %u", x);
  1755. printk(" \n");
  1756. error = 0;
  1757. return error;
  1758. }
  1759. /**
  1760. * dump_glock - print information about a glock
  1761. * @gl: the glock
  1762. * @count: where we are in the buffer
  1763. *
  1764. * Returns: 0 on success, -ENOBUFS when we run out of space
  1765. */
  1766. static int dump_glock(struct gfs2_glock *gl)
  1767. {
  1768. struct gfs2_holder *gh;
  1769. unsigned int x;
  1770. int error = -ENOBUFS;
  1771. spin_lock(&gl->gl_spin);
  1772. printk(KERN_INFO "Glock 0x%p (%u, %llu)\n",
  1773. gl,
  1774. gl->gl_name.ln_type,
  1775. (unsigned long long)gl->gl_name.ln_number);
  1776. printk(KERN_INFO " gl_flags =");
  1777. for (x = 0; x < 32; x++)
  1778. if (test_bit(x, &gl->gl_flags))
  1779. printk(" %u", x);
  1780. printk(" \n");
  1781. printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
  1782. printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
  1783. printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
  1784. print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
  1785. printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
  1786. printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
  1787. printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
  1788. printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
  1789. printk(KERN_INFO " le = %s\n",
  1790. (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
  1791. printk(KERN_INFO " reclaim = %s\n",
  1792. (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
  1793. if (gl->gl_aspace)
  1794. printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n",
  1795. gl->gl_aspace,
  1796. gl->gl_aspace->i_mapping->nrpages);
  1797. else
  1798. printk(KERN_INFO " aspace = no\n");
  1799. printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
  1800. if (gl->gl_req_gh) {
  1801. error = dump_holder("Request", gl->gl_req_gh);
  1802. if (error)
  1803. goto out;
  1804. }
  1805. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1806. error = dump_holder("Holder", gh);
  1807. if (error)
  1808. goto out;
  1809. }
  1810. list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
  1811. error = dump_holder("Waiter1", gh);
  1812. if (error)
  1813. goto out;
  1814. }
  1815. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  1816. error = dump_holder("Waiter2", gh);
  1817. if (error)
  1818. goto out;
  1819. }
  1820. list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
  1821. error = dump_holder("Waiter3", gh);
  1822. if (error)
  1823. goto out;
  1824. }
  1825. if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
  1826. if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
  1827. list_empty(&gl->gl_holders)) {
  1828. error = dump_inode(gl->gl_object);
  1829. if (error)
  1830. goto out;
  1831. } else {
  1832. error = -ENOBUFS;
  1833. printk(KERN_INFO " Inode: busy\n");
  1834. }
  1835. }
  1836. error = 0;
  1837. out:
  1838. spin_unlock(&gl->gl_spin);
  1839. return error;
  1840. }
  1841. /**
  1842. * gfs2_dump_lockstate - print out the current lockstate
  1843. * @sdp: the filesystem
  1844. * @ub: the buffer to copy the information into
  1845. *
  1846. * If @ub is NULL, dump the lockstate to the console.
  1847. *
  1848. */
  1849. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
  1850. {
  1851. struct gfs2_gl_hash_bucket *bucket;
  1852. struct gfs2_glock *gl;
  1853. unsigned int x;
  1854. int error = 0;
  1855. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1856. bucket = &sdp->sd_gl_hash[x];
  1857. read_lock(&bucket->hb_lock);
  1858. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  1859. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1860. continue;
  1861. error = dump_glock(gl);
  1862. if (error)
  1863. break;
  1864. }
  1865. read_unlock(&bucket->hb_lock);
  1866. if (error)
  1867. break;
  1868. }
  1869. return error;
  1870. }