glock.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/delay.h>
  15. #include <linux/sort.h>
  16. #include <linux/jhash.h>
  17. #include <linux/kref.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <asm/semaphore.h>
  21. #include <asm/uaccess.h>
  22. #include "gfs2.h"
  23. #include "lm_interface.h"
  24. #include "incore.h"
  25. #include "glock.h"
  26. #include "glops.h"
  27. #include "inode.h"
  28. #include "lm.h"
  29. #include "lops.h"
  30. #include "meta_io.h"
  31. #include "quota.h"
  32. #include "super.h"
  33. #include "util.h"
  34. /* Must be kept in sync with the beginning of struct gfs2_glock */
  35. struct glock_plug {
  36. struct list_head gl_list;
  37. unsigned long gl_flags;
  38. };
  39. struct greedy {
  40. struct gfs2_holder gr_gh;
  41. struct work_struct gr_work;
  42. };
  43. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  44. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
  45. /**
  46. * relaxed_state_ok - is a requested lock compatible with the current lock mode?
  47. * @actual: the current state of the lock
  48. * @requested: the lock state that was requested by the caller
  49. * @flags: the modifier flags passed in by the caller
  50. *
  51. * Returns: 1 if the locks are compatible, 0 otherwise
  52. */
  53. static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
  54. int flags)
  55. {
  56. if (actual == requested)
  57. return 1;
  58. if (flags & GL_EXACT)
  59. return 0;
  60. if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
  61. return 1;
  62. if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
  63. return 1;
  64. return 0;
  65. }
  66. /**
  67. * gl_hash() - Turn glock number into hash bucket number
  68. * @lock: The glock number
  69. *
  70. * Returns: The number of the corresponding hash bucket
  71. */
  72. static unsigned int gl_hash(struct lm_lockname *name)
  73. {
  74. unsigned int h;
  75. h = jhash(&name->ln_number, sizeof(uint64_t), 0);
  76. h = jhash(&name->ln_type, sizeof(unsigned int), h);
  77. h &= GFS2_GL_HASH_MASK;
  78. return h;
  79. }
  80. /**
  81. * glock_free() - Perform a few checks and then release struct gfs2_glock
  82. * @gl: The glock to release
  83. *
  84. * Also calls lock module to release its internal structure for this glock.
  85. *
  86. */
  87. static void glock_free(struct gfs2_glock *gl)
  88. {
  89. struct gfs2_sbd *sdp = gl->gl_sbd;
  90. struct inode *aspace = gl->gl_aspace;
  91. gfs2_lm_put_lock(sdp, gl->gl_lock);
  92. if (aspace)
  93. gfs2_aspace_put(aspace);
  94. kmem_cache_free(gfs2_glock_cachep, gl);
  95. }
  96. /**
  97. * gfs2_glock_hold() - increment reference count on glock
  98. * @gl: The glock to hold
  99. *
  100. */
  101. void gfs2_glock_hold(struct gfs2_glock *gl)
  102. {
  103. kref_get(&gl->gl_ref);
  104. }
  105. /* All work is done after the return from kref_put() so we
  106. can release the write_lock before the free. */
  107. static void kill_glock(struct kref *kref)
  108. {
  109. struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
  110. struct gfs2_sbd *sdp = gl->gl_sbd;
  111. gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
  112. gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
  113. gfs2_assert(sdp, list_empty(&gl->gl_holders));
  114. gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
  115. gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
  116. gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
  117. }
  118. /**
  119. * gfs2_glock_put() - Decrement reference count on glock
  120. * @gl: The glock to put
  121. *
  122. */
  123. int gfs2_glock_put(struct gfs2_glock *gl)
  124. {
  125. struct gfs2_sbd *sdp = gl->gl_sbd;
  126. struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
  127. int rv = 0;
  128. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  129. write_lock(&bucket->hb_lock);
  130. if (kref_put(&gl->gl_ref, kill_glock)) {
  131. list_del_init(&gl->gl_list);
  132. write_unlock(&bucket->hb_lock);
  133. BUG_ON(spin_is_locked(&gl->gl_spin));
  134. glock_free(gl);
  135. rv = 1;
  136. goto out;
  137. }
  138. write_unlock(&bucket->hb_lock);
  139. out:
  140. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  141. return rv;
  142. }
  143. /**
  144. * queue_empty - check to see if a glock's queue is empty
  145. * @gl: the glock
  146. * @head: the head of the queue to check
  147. *
  148. * This function protects the list in the event that a process already
  149. * has a holder on the list and is adding a second holder for itself.
  150. * The glmutex lock is what generally prevents processes from working
  151. * on the same glock at once, but the special case of adding a second
  152. * holder for yourself ("recursive" locking) doesn't involve locking
  153. * glmutex, making the spin lock necessary.
  154. *
  155. * Returns: 1 if the queue is empty
  156. */
  157. static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
  158. {
  159. int empty;
  160. spin_lock(&gl->gl_spin);
  161. empty = list_empty(head);
  162. spin_unlock(&gl->gl_spin);
  163. return empty;
  164. }
  165. /**
  166. * search_bucket() - Find struct gfs2_glock by lock number
  167. * @bucket: the bucket to search
  168. * @name: The lock name
  169. *
  170. * Returns: NULL, or the struct gfs2_glock with the requested number
  171. */
  172. static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
  173. struct lm_lockname *name)
  174. {
  175. struct gfs2_glock *gl;
  176. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  177. if (test_bit(GLF_PLUG, &gl->gl_flags))
  178. continue;
  179. if (!lm_name_equal(&gl->gl_name, name))
  180. continue;
  181. kref_get(&gl->gl_ref);
  182. return gl;
  183. }
  184. return NULL;
  185. }
  186. /**
  187. * gfs2_glock_find() - Find glock by lock number
  188. * @sdp: The GFS2 superblock
  189. * @name: The lock name
  190. *
  191. * Returns: NULL, or the struct gfs2_glock with the requested number
  192. */
  193. static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
  194. struct lm_lockname *name)
  195. {
  196. struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
  197. struct gfs2_glock *gl;
  198. read_lock(&bucket->hb_lock);
  199. gl = search_bucket(bucket, name);
  200. read_unlock(&bucket->hb_lock);
  201. return gl;
  202. }
  203. /**
  204. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  205. * @sdp: The GFS2 superblock
  206. * @number: the lock number
  207. * @glops: The glock_operations to use
  208. * @create: If 0, don't create the glock if it doesn't exist
  209. * @glp: the glock is returned here
  210. *
  211. * This does not lock a glock, just finds/creates structures for one.
  212. *
  213. * Returns: errno
  214. */
  215. int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
  216. struct gfs2_glock_operations *glops, int create,
  217. struct gfs2_glock **glp)
  218. {
  219. struct lm_lockname name;
  220. struct gfs2_glock *gl, *tmp;
  221. struct gfs2_gl_hash_bucket *bucket;
  222. int error;
  223. name.ln_number = number;
  224. name.ln_type = glops->go_type;
  225. bucket = &sdp->sd_gl_hash[gl_hash(&name)];
  226. read_lock(&bucket->hb_lock);
  227. gl = search_bucket(bucket, &name);
  228. read_unlock(&bucket->hb_lock);
  229. if (gl || !create) {
  230. *glp = gl;
  231. return 0;
  232. }
  233. gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
  234. if (!gl)
  235. return -ENOMEM;
  236. memset(gl, 0, sizeof(struct gfs2_glock));
  237. INIT_LIST_HEAD(&gl->gl_list);
  238. gl->gl_name = name;
  239. kref_init(&gl->gl_ref);
  240. spin_lock_init(&gl->gl_spin);
  241. gl->gl_state = LM_ST_UNLOCKED;
  242. INIT_LIST_HEAD(&gl->gl_holders);
  243. INIT_LIST_HEAD(&gl->gl_waiters1);
  244. INIT_LIST_HEAD(&gl->gl_waiters2);
  245. INIT_LIST_HEAD(&gl->gl_waiters3);
  246. gl->gl_ops = glops;
  247. gl->gl_bucket = bucket;
  248. INIT_LIST_HEAD(&gl->gl_reclaim);
  249. gl->gl_sbd = sdp;
  250. lops_init_le(&gl->gl_le, &gfs2_glock_lops);
  251. INIT_LIST_HEAD(&gl->gl_ail_list);
  252. /* If this glock protects actual on-disk data or metadata blocks,
  253. create a VFS inode to manage the pages/buffers holding them. */
  254. if (glops == &gfs2_inode_glops ||
  255. glops == &gfs2_rgrp_glops ||
  256. glops == &gfs2_meta_glops) {
  257. gl->gl_aspace = gfs2_aspace_get(sdp);
  258. if (!gl->gl_aspace) {
  259. error = -ENOMEM;
  260. goto fail;
  261. }
  262. }
  263. error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
  264. if (error)
  265. goto fail_aspace;
  266. write_lock(&bucket->hb_lock);
  267. tmp = search_bucket(bucket, &name);
  268. if (tmp) {
  269. write_unlock(&bucket->hb_lock);
  270. glock_free(gl);
  271. gl = tmp;
  272. } else {
  273. list_add_tail(&gl->gl_list, &bucket->hb_list);
  274. write_unlock(&bucket->hb_lock);
  275. }
  276. *glp = gl;
  277. return 0;
  278. fail_aspace:
  279. if (gl->gl_aspace)
  280. gfs2_aspace_put(gl->gl_aspace);
  281. fail:
  282. kmem_cache_free(gfs2_glock_cachep, gl);
  283. return error;
  284. }
  285. /**
  286. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  287. * @gl: the glock
  288. * @state: the state we're requesting
  289. * @flags: the modifier flags
  290. * @gh: the holder structure
  291. *
  292. */
  293. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
  294. struct gfs2_holder *gh)
  295. {
  296. INIT_LIST_HEAD(&gh->gh_list);
  297. gh->gh_gl = gl;
  298. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  299. gh->gh_owner = current;
  300. gh->gh_state = state;
  301. gh->gh_flags = flags;
  302. gh->gh_error = 0;
  303. gh->gh_iflags = 0;
  304. init_completion(&gh->gh_wait);
  305. if (gh->gh_state == LM_ST_EXCLUSIVE)
  306. gh->gh_flags |= GL_LOCAL_EXCL;
  307. gfs2_glock_hold(gl);
  308. }
  309. /**
  310. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  311. * @state: the state we're requesting
  312. * @flags: the modifier flags
  313. * @gh: the holder structure
  314. *
  315. * Don't mess with the glock.
  316. *
  317. */
  318. void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
  319. {
  320. gh->gh_state = state;
  321. gh->gh_flags = flags;
  322. if (gh->gh_state == LM_ST_EXCLUSIVE)
  323. gh->gh_flags |= GL_LOCAL_EXCL;
  324. gh->gh_iflags &= 1 << HIF_ALLOCED;
  325. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  326. }
  327. /**
  328. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  329. * @gh: the holder structure
  330. *
  331. */
  332. void gfs2_holder_uninit(struct gfs2_holder *gh)
  333. {
  334. gfs2_glock_put(gh->gh_gl);
  335. gh->gh_gl = NULL;
  336. gh->gh_ip = 0;
  337. }
  338. /**
  339. * gfs2_holder_get - get a struct gfs2_holder structure
  340. * @gl: the glock
  341. * @state: the state we're requesting
  342. * @flags: the modifier flags
  343. * @gfp_flags: __GFP_NOFAIL
  344. *
  345. * Figure out how big an impact this function has. Either:
  346. * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
  347. * 2) Leave it like it is
  348. *
  349. * Returns: the holder structure, NULL on ENOMEM
  350. */
  351. static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
  352. unsigned int state,
  353. int flags, gfp_t gfp_flags)
  354. {
  355. struct gfs2_holder *gh;
  356. gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
  357. if (!gh)
  358. return NULL;
  359. gfs2_holder_init(gl, state, flags, gh);
  360. set_bit(HIF_ALLOCED, &gh->gh_iflags);
  361. gh->gh_ip = (unsigned long)__builtin_return_address(0);
  362. return gh;
  363. }
  364. /**
  365. * gfs2_holder_put - get rid of a struct gfs2_holder structure
  366. * @gh: the holder structure
  367. *
  368. */
  369. static void gfs2_holder_put(struct gfs2_holder *gh)
  370. {
  371. gfs2_holder_uninit(gh);
  372. kfree(gh);
  373. }
  374. /**
  375. * rq_mutex - process a mutex request in the queue
  376. * @gh: the glock holder
  377. *
  378. * Returns: 1 if the queue is blocked
  379. */
  380. static int rq_mutex(struct gfs2_holder *gh)
  381. {
  382. struct gfs2_glock *gl = gh->gh_gl;
  383. list_del_init(&gh->gh_list);
  384. /* gh->gh_error never examined. */
  385. set_bit(GLF_LOCK, &gl->gl_flags);
  386. complete(&gh->gh_wait);
  387. return 1;
  388. }
  389. /**
  390. * rq_promote - process a promote request in the queue
  391. * @gh: the glock holder
  392. *
  393. * Acquire a new inter-node lock, or change a lock state to more restrictive.
  394. *
  395. * Returns: 1 if the queue is blocked
  396. */
  397. static int rq_promote(struct gfs2_holder *gh)
  398. {
  399. struct gfs2_glock *gl = gh->gh_gl;
  400. struct gfs2_sbd *sdp = gl->gl_sbd;
  401. struct gfs2_glock_operations *glops = gl->gl_ops;
  402. if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  403. if (list_empty(&gl->gl_holders)) {
  404. gl->gl_req_gh = gh;
  405. set_bit(GLF_LOCK, &gl->gl_flags);
  406. spin_unlock(&gl->gl_spin);
  407. if (atomic_read(&sdp->sd_reclaim_count) >
  408. gfs2_tune_get(sdp, gt_reclaim_limit) &&
  409. !(gh->gh_flags & LM_FLAG_PRIORITY)) {
  410. gfs2_reclaim_glock(sdp);
  411. gfs2_reclaim_glock(sdp);
  412. }
  413. glops->go_xmote_th(gl, gh->gh_state,
  414. gh->gh_flags);
  415. spin_lock(&gl->gl_spin);
  416. }
  417. return 1;
  418. }
  419. if (list_empty(&gl->gl_holders)) {
  420. set_bit(HIF_FIRST, &gh->gh_iflags);
  421. set_bit(GLF_LOCK, &gl->gl_flags);
  422. } else {
  423. struct gfs2_holder *next_gh;
  424. if (gh->gh_flags & GL_LOCAL_EXCL)
  425. return 1;
  426. next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
  427. gh_list);
  428. if (next_gh->gh_flags & GL_LOCAL_EXCL)
  429. return 1;
  430. }
  431. list_move_tail(&gh->gh_list, &gl->gl_holders);
  432. gh->gh_error = 0;
  433. set_bit(HIF_HOLDER, &gh->gh_iflags);
  434. complete(&gh->gh_wait);
  435. return 0;
  436. }
  437. /**
  438. * rq_demote - process a demote request in the queue
  439. * @gh: the glock holder
  440. *
  441. * Returns: 1 if the queue is blocked
  442. */
  443. static int rq_demote(struct gfs2_holder *gh)
  444. {
  445. struct gfs2_glock *gl = gh->gh_gl;
  446. struct gfs2_glock_operations *glops = gl->gl_ops;
  447. if (!list_empty(&gl->gl_holders))
  448. return 1;
  449. if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
  450. list_del_init(&gh->gh_list);
  451. gh->gh_error = 0;
  452. spin_unlock(&gl->gl_spin);
  453. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  454. gfs2_holder_put(gh);
  455. else
  456. complete(&gh->gh_wait);
  457. spin_lock(&gl->gl_spin);
  458. } else {
  459. gl->gl_req_gh = gh;
  460. set_bit(GLF_LOCK, &gl->gl_flags);
  461. spin_unlock(&gl->gl_spin);
  462. if (gh->gh_state == LM_ST_UNLOCKED ||
  463. gl->gl_state != LM_ST_EXCLUSIVE)
  464. glops->go_drop_th(gl);
  465. else
  466. glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
  467. spin_lock(&gl->gl_spin);
  468. }
  469. return 0;
  470. }
  471. /**
  472. * rq_greedy - process a queued request to drop greedy status
  473. * @gh: the glock holder
  474. *
  475. * Returns: 1 if the queue is blocked
  476. */
  477. static int rq_greedy(struct gfs2_holder *gh)
  478. {
  479. struct gfs2_glock *gl = gh->gh_gl;
  480. list_del_init(&gh->gh_list);
  481. /* gh->gh_error never examined. */
  482. clear_bit(GLF_GREEDY, &gl->gl_flags);
  483. spin_unlock(&gl->gl_spin);
  484. gfs2_holder_uninit(gh);
  485. kfree(container_of(gh, struct greedy, gr_gh));
  486. spin_lock(&gl->gl_spin);
  487. return 0;
  488. }
  489. /**
  490. * run_queue - process holder structures on a glock
  491. * @gl: the glock
  492. *
  493. */
  494. static void run_queue(struct gfs2_glock *gl)
  495. {
  496. struct gfs2_holder *gh;
  497. int blocked = 1;
  498. for (;;) {
  499. if (test_bit(GLF_LOCK, &gl->gl_flags))
  500. break;
  501. if (!list_empty(&gl->gl_waiters1)) {
  502. gh = list_entry(gl->gl_waiters1.next,
  503. struct gfs2_holder, gh_list);
  504. if (test_bit(HIF_MUTEX, &gh->gh_iflags))
  505. blocked = rq_mutex(gh);
  506. else
  507. gfs2_assert_warn(gl->gl_sbd, 0);
  508. } else if (!list_empty(&gl->gl_waiters2) &&
  509. !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
  510. gh = list_entry(gl->gl_waiters2.next,
  511. struct gfs2_holder, gh_list);
  512. if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
  513. blocked = rq_demote(gh);
  514. else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
  515. blocked = rq_greedy(gh);
  516. else
  517. gfs2_assert_warn(gl->gl_sbd, 0);
  518. } else if (!list_empty(&gl->gl_waiters3)) {
  519. gh = list_entry(gl->gl_waiters3.next,
  520. struct gfs2_holder, gh_list);
  521. if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
  522. blocked = rq_promote(gh);
  523. else
  524. gfs2_assert_warn(gl->gl_sbd, 0);
  525. } else
  526. break;
  527. if (blocked)
  528. break;
  529. }
  530. }
  531. /**
  532. * gfs2_glmutex_lock - acquire a local lock on a glock
  533. * @gl: the glock
  534. *
  535. * Gives caller exclusive access to manipulate a glock structure.
  536. */
  537. void gfs2_glmutex_lock(struct gfs2_glock *gl)
  538. {
  539. struct gfs2_holder gh;
  540. gfs2_holder_init(gl, 0, 0, &gh);
  541. set_bit(HIF_MUTEX, &gh.gh_iflags);
  542. spin_lock(&gl->gl_spin);
  543. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  544. list_add_tail(&gh.gh_list, &gl->gl_waiters1);
  545. else
  546. complete(&gh.gh_wait);
  547. spin_unlock(&gl->gl_spin);
  548. wait_for_completion(&gh.gh_wait);
  549. gfs2_holder_uninit(&gh);
  550. }
  551. /**
  552. * gfs2_glmutex_trylock - try to acquire a local lock on a glock
  553. * @gl: the glock
  554. *
  555. * Returns: 1 if the glock is acquired
  556. */
  557. static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
  558. {
  559. int acquired = 1;
  560. spin_lock(&gl->gl_spin);
  561. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  562. acquired = 0;
  563. spin_unlock(&gl->gl_spin);
  564. return acquired;
  565. }
  566. /**
  567. * gfs2_glmutex_unlock - release a local lock on a glock
  568. * @gl: the glock
  569. *
  570. */
  571. void gfs2_glmutex_unlock(struct gfs2_glock *gl)
  572. {
  573. spin_lock(&gl->gl_spin);
  574. clear_bit(GLF_LOCK, &gl->gl_flags);
  575. run_queue(gl);
  576. BUG_ON(!spin_is_locked(&gl->gl_spin));
  577. spin_unlock(&gl->gl_spin);
  578. }
  579. /**
  580. * handle_callback - add a demote request to a lock's queue
  581. * @gl: the glock
  582. * @state: the state the caller wants us to change to
  583. *
  584. */
  585. static void handle_callback(struct gfs2_glock *gl, unsigned int state)
  586. {
  587. struct gfs2_holder *gh, *new_gh = NULL;
  588. restart:
  589. spin_lock(&gl->gl_spin);
  590. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  591. if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
  592. gl->gl_req_gh != gh) {
  593. if (gh->gh_state != state)
  594. gh->gh_state = LM_ST_UNLOCKED;
  595. goto out;
  596. }
  597. }
  598. if (new_gh) {
  599. list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
  600. new_gh = NULL;
  601. } else {
  602. spin_unlock(&gl->gl_spin);
  603. new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY,
  604. GFP_KERNEL | __GFP_NOFAIL),
  605. set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
  606. set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
  607. goto restart;
  608. }
  609. out:
  610. spin_unlock(&gl->gl_spin);
  611. if (new_gh)
  612. gfs2_holder_put(new_gh);
  613. }
  614. /**
  615. * state_change - record that the glock is now in a different state
  616. * @gl: the glock
  617. * @new_state the new state
  618. *
  619. */
  620. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  621. {
  622. int held1, held2;
  623. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  624. held2 = (new_state != LM_ST_UNLOCKED);
  625. if (held1 != held2) {
  626. if (held2)
  627. gfs2_glock_hold(gl);
  628. else
  629. gfs2_glock_put(gl);
  630. }
  631. gl->gl_state = new_state;
  632. }
  633. /**
  634. * xmote_bh - Called after the lock module is done acquiring a lock
  635. * @gl: The glock in question
  636. * @ret: the int returned from the lock module
  637. *
  638. */
  639. static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
  640. {
  641. struct gfs2_sbd *sdp = gl->gl_sbd;
  642. struct gfs2_glock_operations *glops = gl->gl_ops;
  643. struct gfs2_holder *gh = gl->gl_req_gh;
  644. int prev_state = gl->gl_state;
  645. int op_done = 1;
  646. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  647. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  648. gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
  649. state_change(gl, ret & LM_OUT_ST_MASK);
  650. if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
  651. if (glops->go_inval)
  652. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  653. } else if (gl->gl_state == LM_ST_DEFERRED) {
  654. /* We might not want to do this here.
  655. Look at moving to the inode glops. */
  656. if (glops->go_inval)
  657. glops->go_inval(gl, DIO_DATA);
  658. }
  659. /* Deal with each possible exit condition */
  660. if (!gh)
  661. gl->gl_stamp = jiffies;
  662. else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  663. spin_lock(&gl->gl_spin);
  664. list_del_init(&gh->gh_list);
  665. gh->gh_error = -EIO;
  666. spin_unlock(&gl->gl_spin);
  667. } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
  668. spin_lock(&gl->gl_spin);
  669. list_del_init(&gh->gh_list);
  670. if (gl->gl_state == gh->gh_state ||
  671. gl->gl_state == LM_ST_UNLOCKED)
  672. gh->gh_error = 0;
  673. else {
  674. if (gfs2_assert_warn(sdp, gh->gh_flags &
  675. (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
  676. fs_warn(sdp, "ret = 0x%.8X\n", ret);
  677. gh->gh_error = GLR_TRYFAILED;
  678. }
  679. spin_unlock(&gl->gl_spin);
  680. if (ret & LM_OUT_CANCELED)
  681. handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
  682. } else if (ret & LM_OUT_CANCELED) {
  683. spin_lock(&gl->gl_spin);
  684. list_del_init(&gh->gh_list);
  685. gh->gh_error = GLR_CANCELED;
  686. spin_unlock(&gl->gl_spin);
  687. } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
  688. spin_lock(&gl->gl_spin);
  689. list_move_tail(&gh->gh_list, &gl->gl_holders);
  690. gh->gh_error = 0;
  691. set_bit(HIF_HOLDER, &gh->gh_iflags);
  692. spin_unlock(&gl->gl_spin);
  693. set_bit(HIF_FIRST, &gh->gh_iflags);
  694. op_done = 0;
  695. } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  696. spin_lock(&gl->gl_spin);
  697. list_del_init(&gh->gh_list);
  698. gh->gh_error = GLR_TRYFAILED;
  699. spin_unlock(&gl->gl_spin);
  700. } else {
  701. if (gfs2_assert_withdraw(sdp, 0) == -1)
  702. fs_err(sdp, "ret = 0x%.8X\n", ret);
  703. }
  704. if (glops->go_xmote_bh)
  705. glops->go_xmote_bh(gl);
  706. if (op_done) {
  707. spin_lock(&gl->gl_spin);
  708. gl->gl_req_gh = NULL;
  709. gl->gl_req_bh = NULL;
  710. clear_bit(GLF_LOCK, &gl->gl_flags);
  711. run_queue(gl);
  712. spin_unlock(&gl->gl_spin);
  713. }
  714. gfs2_glock_put(gl);
  715. if (gh) {
  716. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  717. gfs2_holder_put(gh);
  718. else
  719. complete(&gh->gh_wait);
  720. }
  721. }
  722. /**
  723. * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
  724. * @gl: The glock in question
  725. * @state: the requested state
  726. * @flags: modifier flags to the lock call
  727. *
  728. */
  729. void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
  730. {
  731. struct gfs2_sbd *sdp = gl->gl_sbd;
  732. struct gfs2_glock_operations *glops = gl->gl_ops;
  733. int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
  734. LM_FLAG_NOEXP | LM_FLAG_ANY |
  735. LM_FLAG_PRIORITY);
  736. unsigned int lck_ret;
  737. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  738. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  739. gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
  740. gfs2_assert_warn(sdp, state != gl->gl_state);
  741. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  742. if (glops->go_sync)
  743. glops->go_sync(gl,
  744. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  745. }
  746. gfs2_glock_hold(gl);
  747. gl->gl_req_bh = xmote_bh;
  748. lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
  749. lck_flags);
  750. if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
  751. return;
  752. if (lck_ret & LM_OUT_ASYNC)
  753. gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
  754. else
  755. xmote_bh(gl, lck_ret);
  756. }
  757. /**
  758. * drop_bh - Called after a lock module unlock completes
  759. * @gl: the glock
  760. * @ret: the return status
  761. *
  762. * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
  763. * Doesn't drop the reference on the glock the top half took out
  764. *
  765. */
  766. static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
  767. {
  768. struct gfs2_sbd *sdp = gl->gl_sbd;
  769. struct gfs2_glock_operations *glops = gl->gl_ops;
  770. struct gfs2_holder *gh = gl->gl_req_gh;
  771. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  772. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  773. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  774. gfs2_assert_warn(sdp, !ret);
  775. state_change(gl, LM_ST_UNLOCKED);
  776. if (glops->go_inval)
  777. glops->go_inval(gl, DIO_METADATA | DIO_DATA);
  778. if (gh) {
  779. spin_lock(&gl->gl_spin);
  780. list_del_init(&gh->gh_list);
  781. gh->gh_error = 0;
  782. spin_unlock(&gl->gl_spin);
  783. }
  784. if (glops->go_drop_bh)
  785. glops->go_drop_bh(gl);
  786. spin_lock(&gl->gl_spin);
  787. gl->gl_req_gh = NULL;
  788. gl->gl_req_bh = NULL;
  789. clear_bit(GLF_LOCK, &gl->gl_flags);
  790. run_queue(gl);
  791. spin_unlock(&gl->gl_spin);
  792. gfs2_glock_put(gl);
  793. if (gh) {
  794. if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
  795. gfs2_holder_put(gh);
  796. else
  797. complete(&gh->gh_wait);
  798. }
  799. }
  800. /**
  801. * gfs2_glock_drop_th - call into the lock module to unlock a lock
  802. * @gl: the glock
  803. *
  804. */
  805. void gfs2_glock_drop_th(struct gfs2_glock *gl)
  806. {
  807. struct gfs2_sbd *sdp = gl->gl_sbd;
  808. struct gfs2_glock_operations *glops = gl->gl_ops;
  809. unsigned int ret;
  810. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  811. gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
  812. gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
  813. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  814. if (glops->go_sync)
  815. glops->go_sync(gl,
  816. DIO_METADATA | DIO_DATA | DIO_RELEASE);
  817. }
  818. gfs2_glock_hold(gl);
  819. gl->gl_req_bh = drop_bh;
  820. ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
  821. if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
  822. return;
  823. if (!ret)
  824. drop_bh(gl, ret);
  825. else
  826. gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
  827. }
  828. /**
  829. * do_cancels - cancel requests for locks stuck waiting on an expire flag
  830. * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
  831. *
  832. * Don't cancel GL_NOCANCEL requests.
  833. */
  834. static void do_cancels(struct gfs2_holder *gh)
  835. {
  836. struct gfs2_glock *gl = gh->gh_gl;
  837. spin_lock(&gl->gl_spin);
  838. while (gl->gl_req_gh != gh &&
  839. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  840. !list_empty(&gh->gh_list)) {
  841. if (gl->gl_req_bh &&
  842. !(gl->gl_req_gh &&
  843. (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
  844. spin_unlock(&gl->gl_spin);
  845. gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
  846. msleep(100);
  847. spin_lock(&gl->gl_spin);
  848. } else {
  849. spin_unlock(&gl->gl_spin);
  850. msleep(100);
  851. spin_lock(&gl->gl_spin);
  852. }
  853. }
  854. spin_unlock(&gl->gl_spin);
  855. }
  856. /**
  857. * glock_wait_internal - wait on a glock acquisition
  858. * @gh: the glock holder
  859. *
  860. * Returns: 0 on success
  861. */
  862. static int glock_wait_internal(struct gfs2_holder *gh)
  863. {
  864. struct gfs2_glock *gl = gh->gh_gl;
  865. struct gfs2_sbd *sdp = gl->gl_sbd;
  866. struct gfs2_glock_operations *glops = gl->gl_ops;
  867. if (test_bit(HIF_ABORTED, &gh->gh_iflags))
  868. return -EIO;
  869. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  870. spin_lock(&gl->gl_spin);
  871. if (gl->gl_req_gh != gh &&
  872. !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
  873. !list_empty(&gh->gh_list)) {
  874. list_del_init(&gh->gh_list);
  875. gh->gh_error = GLR_TRYFAILED;
  876. run_queue(gl);
  877. spin_unlock(&gl->gl_spin);
  878. return gh->gh_error;
  879. }
  880. spin_unlock(&gl->gl_spin);
  881. }
  882. if (gh->gh_flags & LM_FLAG_PRIORITY)
  883. do_cancels(gh);
  884. wait_for_completion(&gh->gh_wait);
  885. if (gh->gh_error)
  886. return gh->gh_error;
  887. gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
  888. gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
  889. gh->gh_state,
  890. gh->gh_flags));
  891. if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
  892. gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
  893. if (glops->go_lock) {
  894. gh->gh_error = glops->go_lock(gh);
  895. if (gh->gh_error) {
  896. spin_lock(&gl->gl_spin);
  897. list_del_init(&gh->gh_list);
  898. spin_unlock(&gl->gl_spin);
  899. }
  900. }
  901. spin_lock(&gl->gl_spin);
  902. gl->gl_req_gh = NULL;
  903. gl->gl_req_bh = NULL;
  904. clear_bit(GLF_LOCK, &gl->gl_flags);
  905. run_queue(gl);
  906. spin_unlock(&gl->gl_spin);
  907. }
  908. return gh->gh_error;
  909. }
  910. static inline struct gfs2_holder *
  911. find_holder_by_owner(struct list_head *head, struct task_struct *owner)
  912. {
  913. struct gfs2_holder *gh;
  914. list_for_each_entry(gh, head, gh_list) {
  915. if (gh->gh_owner == owner)
  916. return gh;
  917. }
  918. return NULL;
  919. }
  920. /**
  921. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  922. * @gh: the holder structure to add
  923. *
  924. */
  925. static void add_to_queue(struct gfs2_holder *gh)
  926. {
  927. struct gfs2_glock *gl = gh->gh_gl;
  928. struct gfs2_holder *existing;
  929. BUG_ON(!gh->gh_owner);
  930. existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
  931. if (existing) {
  932. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  933. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  934. BUG();
  935. }
  936. existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
  937. if (existing) {
  938. print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
  939. print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
  940. BUG();
  941. }
  942. if (gh->gh_flags & LM_FLAG_PRIORITY)
  943. list_add(&gh->gh_list, &gl->gl_waiters3);
  944. else
  945. list_add_tail(&gh->gh_list, &gl->gl_waiters3);
  946. }
  947. /**
  948. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  949. * @gh: the holder structure
  950. *
  951. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  952. *
  953. * Returns: 0, GLR_TRYFAILED, or errno on failure
  954. */
  955. int gfs2_glock_nq(struct gfs2_holder *gh)
  956. {
  957. struct gfs2_glock *gl = gh->gh_gl;
  958. struct gfs2_sbd *sdp = gl->gl_sbd;
  959. int error = 0;
  960. restart:
  961. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
  962. set_bit(HIF_ABORTED, &gh->gh_iflags);
  963. return -EIO;
  964. }
  965. set_bit(HIF_PROMOTE, &gh->gh_iflags);
  966. spin_lock(&gl->gl_spin);
  967. add_to_queue(gh);
  968. run_queue(gl);
  969. spin_unlock(&gl->gl_spin);
  970. if (!(gh->gh_flags & GL_ASYNC)) {
  971. error = glock_wait_internal(gh);
  972. if (error == GLR_CANCELED) {
  973. msleep(100);
  974. goto restart;
  975. }
  976. }
  977. clear_bit(GLF_PREFETCH, &gl->gl_flags);
  978. return error;
  979. }
  980. /**
  981. * gfs2_glock_poll - poll to see if an async request has been completed
  982. * @gh: the holder
  983. *
  984. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  985. */
  986. int gfs2_glock_poll(struct gfs2_holder *gh)
  987. {
  988. struct gfs2_glock *gl = gh->gh_gl;
  989. int ready = 0;
  990. spin_lock(&gl->gl_spin);
  991. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  992. ready = 1;
  993. else if (list_empty(&gh->gh_list)) {
  994. if (gh->gh_error == GLR_CANCELED) {
  995. spin_unlock(&gl->gl_spin);
  996. msleep(100);
  997. if (gfs2_glock_nq(gh))
  998. return 1;
  999. return 0;
  1000. } else
  1001. ready = 1;
  1002. }
  1003. spin_unlock(&gl->gl_spin);
  1004. return ready;
  1005. }
  1006. /**
  1007. * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
  1008. * @gh: the holder structure
  1009. *
  1010. * Returns: 0, GLR_TRYFAILED, or errno on failure
  1011. */
  1012. int gfs2_glock_wait(struct gfs2_holder *gh)
  1013. {
  1014. int error;
  1015. error = glock_wait_internal(gh);
  1016. if (error == GLR_CANCELED) {
  1017. msleep(100);
  1018. gh->gh_flags &= ~GL_ASYNC;
  1019. error = gfs2_glock_nq(gh);
  1020. }
  1021. return error;
  1022. }
  1023. /**
  1024. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  1025. * @gh: the glock holder
  1026. *
  1027. */
  1028. void gfs2_glock_dq(struct gfs2_holder *gh)
  1029. {
  1030. struct gfs2_glock *gl = gh->gh_gl;
  1031. struct gfs2_glock_operations *glops = gl->gl_ops;
  1032. if (gh->gh_flags & GL_SYNC)
  1033. set_bit(GLF_SYNC, &gl->gl_flags);
  1034. if (gh->gh_flags & GL_NOCACHE)
  1035. handle_callback(gl, LM_ST_UNLOCKED);
  1036. gfs2_glmutex_lock(gl);
  1037. spin_lock(&gl->gl_spin);
  1038. list_del_init(&gh->gh_list);
  1039. if (list_empty(&gl->gl_holders)) {
  1040. spin_unlock(&gl->gl_spin);
  1041. if (glops->go_unlock)
  1042. glops->go_unlock(gh);
  1043. if (test_bit(GLF_SYNC, &gl->gl_flags)) {
  1044. if (glops->go_sync)
  1045. glops->go_sync(gl, DIO_METADATA | DIO_DATA);
  1046. }
  1047. gl->gl_stamp = jiffies;
  1048. spin_lock(&gl->gl_spin);
  1049. }
  1050. clear_bit(GLF_LOCK, &gl->gl_flags);
  1051. run_queue(gl);
  1052. spin_unlock(&gl->gl_spin);
  1053. }
  1054. /**
  1055. * gfs2_glock_prefetch - Try to prefetch a glock
  1056. * @gl: the glock
  1057. * @state: the state to prefetch in
  1058. * @flags: flags passed to go_xmote_th()
  1059. *
  1060. */
  1061. static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
  1062. int flags)
  1063. {
  1064. struct gfs2_glock_operations *glops = gl->gl_ops;
  1065. spin_lock(&gl->gl_spin);
  1066. if (test_bit(GLF_LOCK, &gl->gl_flags) ||
  1067. !list_empty(&gl->gl_holders) ||
  1068. !list_empty(&gl->gl_waiters1) ||
  1069. !list_empty(&gl->gl_waiters2) ||
  1070. !list_empty(&gl->gl_waiters3) ||
  1071. relaxed_state_ok(gl->gl_state, state, flags)) {
  1072. spin_unlock(&gl->gl_spin);
  1073. return;
  1074. }
  1075. set_bit(GLF_PREFETCH, &gl->gl_flags);
  1076. set_bit(GLF_LOCK, &gl->gl_flags);
  1077. spin_unlock(&gl->gl_spin);
  1078. glops->go_xmote_th(gl, state, flags);
  1079. }
  1080. static void greedy_work(void *data)
  1081. {
  1082. struct greedy *gr = data;
  1083. struct gfs2_holder *gh = &gr->gr_gh;
  1084. struct gfs2_glock *gl = gh->gh_gl;
  1085. struct gfs2_glock_operations *glops = gl->gl_ops;
  1086. clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1087. if (glops->go_greedy)
  1088. glops->go_greedy(gl);
  1089. spin_lock(&gl->gl_spin);
  1090. if (list_empty(&gl->gl_waiters2)) {
  1091. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1092. spin_unlock(&gl->gl_spin);
  1093. gfs2_holder_uninit(gh);
  1094. kfree(gr);
  1095. } else {
  1096. gfs2_glock_hold(gl);
  1097. list_add_tail(&gh->gh_list, &gl->gl_waiters2);
  1098. run_queue(gl);
  1099. spin_unlock(&gl->gl_spin);
  1100. gfs2_glock_put(gl);
  1101. }
  1102. }
  1103. /**
  1104. * gfs2_glock_be_greedy -
  1105. * @gl:
  1106. * @time:
  1107. *
  1108. * Returns: 0 if go_greedy will be called, 1 otherwise
  1109. */
  1110. int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
  1111. {
  1112. struct greedy *gr;
  1113. struct gfs2_holder *gh;
  1114. if (!time ||
  1115. gl->gl_sbd->sd_args.ar_localcaching ||
  1116. test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
  1117. return 1;
  1118. gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
  1119. if (!gr) {
  1120. clear_bit(GLF_GREEDY, &gl->gl_flags);
  1121. return 1;
  1122. }
  1123. gh = &gr->gr_gh;
  1124. gfs2_holder_init(gl, 0, 0, gh);
  1125. set_bit(HIF_GREEDY, &gh->gh_iflags);
  1126. INIT_WORK(&gr->gr_work, greedy_work, gr);
  1127. set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
  1128. schedule_delayed_work(&gr->gr_work, time);
  1129. return 0;
  1130. }
  1131. /**
  1132. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1133. * @gh: the holder structure
  1134. *
  1135. */
  1136. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1137. {
  1138. gfs2_glock_dq(gh);
  1139. gfs2_holder_uninit(gh);
  1140. }
  1141. /**
  1142. * gfs2_glock_nq_num - acquire a glock based on lock number
  1143. * @sdp: the filesystem
  1144. * @number: the lock number
  1145. * @glops: the glock operations for the type of glock
  1146. * @state: the state to acquire the glock in
  1147. * @flags: modifier flags for the aquisition
  1148. * @gh: the struct gfs2_holder
  1149. *
  1150. * Returns: errno
  1151. */
  1152. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
  1153. struct gfs2_glock_operations *glops, unsigned int state,
  1154. int flags, struct gfs2_holder *gh)
  1155. {
  1156. struct gfs2_glock *gl;
  1157. int error;
  1158. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1159. if (!error) {
  1160. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1161. gfs2_glock_put(gl);
  1162. }
  1163. return error;
  1164. }
  1165. /**
  1166. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1167. * @arg_a: the first structure
  1168. * @arg_b: the second structure
  1169. *
  1170. */
  1171. static int glock_compare(const void *arg_a, const void *arg_b)
  1172. {
  1173. struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
  1174. struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
  1175. struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1176. struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1177. int ret = 0;
  1178. if (a->ln_number > b->ln_number)
  1179. ret = 1;
  1180. else if (a->ln_number < b->ln_number)
  1181. ret = -1;
  1182. else {
  1183. if (gh_a->gh_state == LM_ST_SHARED &&
  1184. gh_b->gh_state == LM_ST_EXCLUSIVE)
  1185. ret = 1;
  1186. else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
  1187. (gh_b->gh_flags & GL_LOCAL_EXCL))
  1188. ret = 1;
  1189. }
  1190. return ret;
  1191. }
  1192. /**
  1193. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1194. * @num_gh: the number of structures
  1195. * @ghs: an array of struct gfs2_holder structures
  1196. *
  1197. * Returns: 0 on success (all glocks acquired),
  1198. * errno on failure (no glocks acquired)
  1199. */
  1200. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1201. struct gfs2_holder **p)
  1202. {
  1203. unsigned int x;
  1204. int error = 0;
  1205. for (x = 0; x < num_gh; x++)
  1206. p[x] = &ghs[x];
  1207. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1208. for (x = 0; x < num_gh; x++) {
  1209. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1210. error = gfs2_glock_nq(p[x]);
  1211. if (error) {
  1212. while (x--)
  1213. gfs2_glock_dq(p[x]);
  1214. break;
  1215. }
  1216. }
  1217. return error;
  1218. }
  1219. /**
  1220. * gfs2_glock_nq_m - acquire multiple glocks
  1221. * @num_gh: the number of structures
  1222. * @ghs: an array of struct gfs2_holder structures
  1223. *
  1224. * Figure out how big an impact this function has. Either:
  1225. * 1) Replace this code with code that calls gfs2_glock_prefetch()
  1226. * 2) Forget async stuff and just call nq_m_sync()
  1227. * 3) Leave it like it is
  1228. *
  1229. * Returns: 0 on success (all glocks acquired),
  1230. * errno on failure (no glocks acquired)
  1231. */
  1232. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1233. {
  1234. int *e;
  1235. unsigned int x;
  1236. int borked = 0, serious = 0;
  1237. int error = 0;
  1238. if (!num_gh)
  1239. return 0;
  1240. if (num_gh == 1) {
  1241. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1242. return gfs2_glock_nq(ghs);
  1243. }
  1244. e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1245. if (!e)
  1246. return -ENOMEM;
  1247. for (x = 0; x < num_gh; x++) {
  1248. ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
  1249. error = gfs2_glock_nq(&ghs[x]);
  1250. if (error) {
  1251. borked = 1;
  1252. serious = error;
  1253. num_gh = x;
  1254. break;
  1255. }
  1256. }
  1257. for (x = 0; x < num_gh; x++) {
  1258. error = e[x] = glock_wait_internal(&ghs[x]);
  1259. if (error) {
  1260. borked = 1;
  1261. if (error != GLR_TRYFAILED && error != GLR_CANCELED)
  1262. serious = error;
  1263. }
  1264. }
  1265. if (!borked) {
  1266. kfree(e);
  1267. return 0;
  1268. }
  1269. for (x = 0; x < num_gh; x++)
  1270. if (!e[x])
  1271. gfs2_glock_dq(&ghs[x]);
  1272. if (serious)
  1273. error = serious;
  1274. else {
  1275. for (x = 0; x < num_gh; x++)
  1276. gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
  1277. &ghs[x]);
  1278. error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
  1279. }
  1280. kfree(e);
  1281. return error;
  1282. }
  1283. /**
  1284. * gfs2_glock_dq_m - release multiple glocks
  1285. * @num_gh: the number of structures
  1286. * @ghs: an array of struct gfs2_holder structures
  1287. *
  1288. */
  1289. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1290. {
  1291. unsigned int x;
  1292. for (x = 0; x < num_gh; x++)
  1293. gfs2_glock_dq(&ghs[x]);
  1294. }
  1295. /**
  1296. * gfs2_glock_dq_uninit_m - release multiple glocks
  1297. * @num_gh: the number of structures
  1298. * @ghs: an array of struct gfs2_holder structures
  1299. *
  1300. */
  1301. void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1302. {
  1303. unsigned int x;
  1304. for (x = 0; x < num_gh; x++)
  1305. gfs2_glock_dq_uninit(&ghs[x]);
  1306. }
  1307. /**
  1308. * gfs2_glock_prefetch_num - prefetch a glock based on lock number
  1309. * @sdp: the filesystem
  1310. * @number: the lock number
  1311. * @glops: the glock operations for the type of glock
  1312. * @state: the state to acquire the glock in
  1313. * @flags: modifier flags for the aquisition
  1314. *
  1315. * Returns: errno
  1316. */
  1317. void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
  1318. struct gfs2_glock_operations *glops,
  1319. unsigned int state, int flags)
  1320. {
  1321. struct gfs2_glock *gl;
  1322. int error;
  1323. if (atomic_read(&sdp->sd_reclaim_count) <
  1324. gfs2_tune_get(sdp, gt_reclaim_limit)) {
  1325. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1326. if (!error) {
  1327. gfs2_glock_prefetch(gl, state, flags);
  1328. gfs2_glock_put(gl);
  1329. }
  1330. }
  1331. }
  1332. /**
  1333. * gfs2_lvb_hold - attach a LVB from a glock
  1334. * @gl: The glock in question
  1335. *
  1336. */
  1337. int gfs2_lvb_hold(struct gfs2_glock *gl)
  1338. {
  1339. int error;
  1340. gfs2_glmutex_lock(gl);
  1341. if (!atomic_read(&gl->gl_lvb_count)) {
  1342. error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
  1343. if (error) {
  1344. gfs2_glmutex_unlock(gl);
  1345. return error;
  1346. }
  1347. gfs2_glock_hold(gl);
  1348. }
  1349. atomic_inc(&gl->gl_lvb_count);
  1350. gfs2_glmutex_unlock(gl);
  1351. return 0;
  1352. }
  1353. /**
  1354. * gfs2_lvb_unhold - detach a LVB from a glock
  1355. * @gl: The glock in question
  1356. *
  1357. */
  1358. void gfs2_lvb_unhold(struct gfs2_glock *gl)
  1359. {
  1360. gfs2_glock_hold(gl);
  1361. gfs2_glmutex_lock(gl);
  1362. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
  1363. if (atomic_dec_and_test(&gl->gl_lvb_count)) {
  1364. gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1365. gl->gl_lvb = NULL;
  1366. gfs2_glock_put(gl);
  1367. }
  1368. gfs2_glmutex_unlock(gl);
  1369. gfs2_glock_put(gl);
  1370. }
  1371. #if 0
  1372. void gfs2_lvb_sync(struct gfs2_glock *gl)
  1373. {
  1374. gfs2_glmutex_lock(gl);
  1375. gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
  1376. if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
  1377. gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
  1378. gfs2_glmutex_unlock(gl);
  1379. }
  1380. #endif /* 0 */
  1381. static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
  1382. unsigned int state)
  1383. {
  1384. struct gfs2_glock *gl;
  1385. gl = gfs2_glock_find(sdp, name);
  1386. if (!gl)
  1387. return;
  1388. if (gl->gl_ops->go_callback)
  1389. gl->gl_ops->go_callback(gl, state);
  1390. handle_callback(gl, state);
  1391. spin_lock(&gl->gl_spin);
  1392. run_queue(gl);
  1393. spin_unlock(&gl->gl_spin);
  1394. gfs2_glock_put(gl);
  1395. }
  1396. /**
  1397. * gfs2_glock_cb - Callback used by locking module
  1398. * @fsdata: Pointer to the superblock
  1399. * @type: Type of callback
  1400. * @data: Type dependent data pointer
  1401. *
  1402. * Called by the locking module when it wants to tell us something.
  1403. * Either we need to drop a lock, one of our ASYNC requests completed, or
  1404. * a journal from another client needs to be recovered.
  1405. */
  1406. void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
  1407. {
  1408. struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
  1409. switch (type) {
  1410. case LM_CB_NEED_E:
  1411. blocking_cb(sdp, data, LM_ST_UNLOCKED);
  1412. return;
  1413. case LM_CB_NEED_D:
  1414. blocking_cb(sdp, data, LM_ST_DEFERRED);
  1415. return;
  1416. case LM_CB_NEED_S:
  1417. blocking_cb(sdp, data, LM_ST_SHARED);
  1418. return;
  1419. case LM_CB_ASYNC: {
  1420. struct lm_async_cb *async = data;
  1421. struct gfs2_glock *gl;
  1422. gl = gfs2_glock_find(sdp, &async->lc_name);
  1423. if (gfs2_assert_warn(sdp, gl))
  1424. return;
  1425. if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
  1426. gl->gl_req_bh(gl, async->lc_ret);
  1427. gfs2_glock_put(gl);
  1428. return;
  1429. }
  1430. case LM_CB_NEED_RECOVERY:
  1431. gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
  1432. if (sdp->sd_recoverd_process)
  1433. wake_up_process(sdp->sd_recoverd_process);
  1434. return;
  1435. case LM_CB_DROPLOCKS:
  1436. gfs2_gl_hash_clear(sdp, NO_WAIT);
  1437. gfs2_quota_scan(sdp);
  1438. return;
  1439. default:
  1440. gfs2_assert_warn(sdp, 0);
  1441. return;
  1442. }
  1443. }
  1444. /**
  1445. * gfs2_try_toss_inode - try to remove a particular inode struct from cache
  1446. * sdp: the filesystem
  1447. * inum: the inode number
  1448. *
  1449. */
  1450. void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
  1451. {
  1452. struct gfs2_glock *gl;
  1453. struct gfs2_inode *ip;
  1454. int error;
  1455. error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
  1456. NO_CREATE, &gl);
  1457. if (error || !gl)
  1458. return;
  1459. if (!gfs2_glmutex_trylock(gl))
  1460. goto out;
  1461. ip = gl->gl_object;
  1462. if (!ip)
  1463. goto out_unlock;
  1464. if (atomic_read(&ip->i_count))
  1465. goto out_unlock;
  1466. gfs2_inode_destroy(ip, 1);
  1467. out_unlock:
  1468. gfs2_glmutex_unlock(gl);
  1469. out:
  1470. gfs2_glock_put(gl);
  1471. }
  1472. /**
  1473. * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
  1474. * iopen glock from memory
  1475. * @io_gl: the iopen glock
  1476. * @state: the state into which the glock should be put
  1477. *
  1478. */
  1479. void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
  1480. {
  1481. struct gfs2_glock *i_gl;
  1482. if (state != LM_ST_UNLOCKED)
  1483. return;
  1484. spin_lock(&io_gl->gl_spin);
  1485. i_gl = io_gl->gl_object;
  1486. if (i_gl) {
  1487. gfs2_glock_hold(i_gl);
  1488. spin_unlock(&io_gl->gl_spin);
  1489. } else {
  1490. spin_unlock(&io_gl->gl_spin);
  1491. return;
  1492. }
  1493. if (gfs2_glmutex_trylock(i_gl)) {
  1494. struct gfs2_inode *ip = i_gl->gl_object;
  1495. if (ip) {
  1496. gfs2_try_toss_vnode(ip);
  1497. gfs2_glmutex_unlock(i_gl);
  1498. gfs2_glock_schedule_for_reclaim(i_gl);
  1499. goto out;
  1500. }
  1501. gfs2_glmutex_unlock(i_gl);
  1502. }
  1503. out:
  1504. gfs2_glock_put(i_gl);
  1505. }
  1506. /**
  1507. * demote_ok - Check to see if it's ok to unlock a glock
  1508. * @gl: the glock
  1509. *
  1510. * Returns: 1 if it's ok
  1511. */
  1512. static int demote_ok(struct gfs2_glock *gl)
  1513. {
  1514. struct gfs2_sbd *sdp = gl->gl_sbd;
  1515. struct gfs2_glock_operations *glops = gl->gl_ops;
  1516. int demote = 1;
  1517. if (test_bit(GLF_STICKY, &gl->gl_flags))
  1518. demote = 0;
  1519. else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
  1520. demote = time_after_eq(jiffies,
  1521. gl->gl_stamp +
  1522. gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
  1523. else if (glops->go_demote_ok)
  1524. demote = glops->go_demote_ok(gl);
  1525. return demote;
  1526. }
  1527. /**
  1528. * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  1529. * @gl: the glock
  1530. *
  1531. */
  1532. void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
  1533. {
  1534. struct gfs2_sbd *sdp = gl->gl_sbd;
  1535. spin_lock(&sdp->sd_reclaim_lock);
  1536. if (list_empty(&gl->gl_reclaim)) {
  1537. gfs2_glock_hold(gl);
  1538. list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
  1539. atomic_inc(&sdp->sd_reclaim_count);
  1540. }
  1541. spin_unlock(&sdp->sd_reclaim_lock);
  1542. wake_up(&sdp->sd_reclaim_wq);
  1543. }
  1544. /**
  1545. * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
  1546. * @sdp: the filesystem
  1547. *
  1548. * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
  1549. * different glock and we notice that there are a lot of glocks in the
  1550. * reclaim list.
  1551. *
  1552. */
  1553. void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
  1554. {
  1555. struct gfs2_glock *gl;
  1556. spin_lock(&sdp->sd_reclaim_lock);
  1557. if (list_empty(&sdp->sd_reclaim_list)) {
  1558. spin_unlock(&sdp->sd_reclaim_lock);
  1559. return;
  1560. }
  1561. gl = list_entry(sdp->sd_reclaim_list.next,
  1562. struct gfs2_glock, gl_reclaim);
  1563. list_del_init(&gl->gl_reclaim);
  1564. spin_unlock(&sdp->sd_reclaim_lock);
  1565. atomic_dec(&sdp->sd_reclaim_count);
  1566. atomic_inc(&sdp->sd_reclaimed);
  1567. if (gfs2_glmutex_trylock(gl)) {
  1568. if (gl->gl_ops == &gfs2_inode_glops) {
  1569. struct gfs2_inode *ip = gl->gl_object;
  1570. if (ip && !atomic_read(&ip->i_count))
  1571. gfs2_inode_destroy(ip, 1);
  1572. }
  1573. if (queue_empty(gl, &gl->gl_holders) &&
  1574. gl->gl_state != LM_ST_UNLOCKED &&
  1575. demote_ok(gl))
  1576. handle_callback(gl, LM_ST_UNLOCKED);
  1577. gfs2_glmutex_unlock(gl);
  1578. }
  1579. gfs2_glock_put(gl);
  1580. }
  1581. /**
  1582. * examine_bucket - Call a function for glock in a hash bucket
  1583. * @examiner: the function
  1584. * @sdp: the filesystem
  1585. * @bucket: the bucket
  1586. *
  1587. * Returns: 1 if the bucket has entries
  1588. */
  1589. static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
  1590. struct gfs2_gl_hash_bucket *bucket)
  1591. {
  1592. struct glock_plug plug;
  1593. struct list_head *tmp;
  1594. struct gfs2_glock *gl;
  1595. int entries;
  1596. /* Add "plug" to end of bucket list, work back up list from there */
  1597. memset(&plug.gl_flags, 0, sizeof(unsigned long));
  1598. set_bit(GLF_PLUG, &plug.gl_flags);
  1599. write_lock(&bucket->hb_lock);
  1600. list_add(&plug.gl_list, &bucket->hb_list);
  1601. write_unlock(&bucket->hb_lock);
  1602. for (;;) {
  1603. write_lock(&bucket->hb_lock);
  1604. for (;;) {
  1605. tmp = plug.gl_list.next;
  1606. if (tmp == &bucket->hb_list) {
  1607. list_del(&plug.gl_list);
  1608. entries = !list_empty(&bucket->hb_list);
  1609. write_unlock(&bucket->hb_lock);
  1610. return entries;
  1611. }
  1612. gl = list_entry(tmp, struct gfs2_glock, gl_list);
  1613. /* Move plug up list */
  1614. list_move(&plug.gl_list, &gl->gl_list);
  1615. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1616. continue;
  1617. /* examiner() must glock_put() */
  1618. gfs2_glock_hold(gl);
  1619. break;
  1620. }
  1621. write_unlock(&bucket->hb_lock);
  1622. examiner(gl);
  1623. }
  1624. }
  1625. /**
  1626. * scan_glock - look at a glock and see if we can reclaim it
  1627. * @gl: the glock to look at
  1628. *
  1629. */
  1630. static void scan_glock(struct gfs2_glock *gl)
  1631. {
  1632. if (gfs2_glmutex_trylock(gl)) {
  1633. if (gl->gl_ops == &gfs2_inode_glops) {
  1634. struct gfs2_inode *ip = gl->gl_object;
  1635. if (ip && !atomic_read(&ip->i_count))
  1636. goto out_schedule;
  1637. }
  1638. if (queue_empty(gl, &gl->gl_holders) &&
  1639. gl->gl_state != LM_ST_UNLOCKED &&
  1640. demote_ok(gl))
  1641. goto out_schedule;
  1642. gfs2_glmutex_unlock(gl);
  1643. }
  1644. gfs2_glock_put(gl);
  1645. return;
  1646. out_schedule:
  1647. gfs2_glmutex_unlock(gl);
  1648. gfs2_glock_schedule_for_reclaim(gl);
  1649. gfs2_glock_put(gl);
  1650. }
  1651. /**
  1652. * gfs2_scand_internal - Look for glocks and inodes to toss from memory
  1653. * @sdp: the filesystem
  1654. *
  1655. */
  1656. void gfs2_scand_internal(struct gfs2_sbd *sdp)
  1657. {
  1658. unsigned int x;
  1659. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1660. examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
  1661. cond_resched();
  1662. }
  1663. }
  1664. /**
  1665. * clear_glock - look at a glock and see if we can free it from glock cache
  1666. * @gl: the glock to look at
  1667. *
  1668. */
  1669. static void clear_glock(struct gfs2_glock *gl)
  1670. {
  1671. struct gfs2_sbd *sdp = gl->gl_sbd;
  1672. int released;
  1673. spin_lock(&sdp->sd_reclaim_lock);
  1674. if (!list_empty(&gl->gl_reclaim)) {
  1675. list_del_init(&gl->gl_reclaim);
  1676. atomic_dec(&sdp->sd_reclaim_count);
  1677. spin_unlock(&sdp->sd_reclaim_lock);
  1678. released = gfs2_glock_put(gl);
  1679. gfs2_assert(sdp, !released);
  1680. } else {
  1681. spin_unlock(&sdp->sd_reclaim_lock);
  1682. }
  1683. if (gfs2_glmutex_trylock(gl)) {
  1684. if (gl->gl_ops == &gfs2_inode_glops) {
  1685. struct gfs2_inode *ip = gl->gl_object;
  1686. if (ip && !atomic_read(&ip->i_count))
  1687. gfs2_inode_destroy(ip, 1);
  1688. }
  1689. if (queue_empty(gl, &gl->gl_holders) &&
  1690. gl->gl_state != LM_ST_UNLOCKED)
  1691. handle_callback(gl, LM_ST_UNLOCKED);
  1692. gfs2_glmutex_unlock(gl);
  1693. }
  1694. gfs2_glock_put(gl);
  1695. }
  1696. /**
  1697. * gfs2_gl_hash_clear - Empty out the glock hash table
  1698. * @sdp: the filesystem
  1699. * @wait: wait until it's all gone
  1700. *
  1701. * Called when unmounting the filesystem, or when inter-node lock manager
  1702. * requests DROPLOCKS because it is running out of capacity.
  1703. */
  1704. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
  1705. {
  1706. unsigned long t;
  1707. unsigned int x;
  1708. int cont;
  1709. t = jiffies;
  1710. for (;;) {
  1711. cont = 0;
  1712. for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
  1713. if (examine_bucket(clear_glock, sdp,
  1714. &sdp->sd_gl_hash[x]))
  1715. cont = 1;
  1716. if (!wait || !cont)
  1717. break;
  1718. if (time_after_eq(jiffies,
  1719. t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
  1720. fs_warn(sdp, "Unmount seems to be stalled. "
  1721. "Dumping lock state...\n");
  1722. gfs2_dump_lockstate(sdp);
  1723. t = jiffies;
  1724. }
  1725. /* invalidate_inodes() requires that the sb inodes list
  1726. not change, but an async completion callback for an
  1727. unlock can occur which does glock_put() which
  1728. can call iput() which will change the sb inodes list.
  1729. invalidate_inodes_mutex prevents glock_put()'s during
  1730. an invalidate_inodes() */
  1731. mutex_lock(&sdp->sd_invalidate_inodes_mutex);
  1732. invalidate_inodes(sdp->sd_vfs);
  1733. mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
  1734. msleep(10);
  1735. }
  1736. }
  1737. /*
  1738. * Diagnostic routines to help debug distributed deadlock
  1739. */
  1740. /**
  1741. * dump_holder - print information about a glock holder
  1742. * @str: a string naming the type of holder
  1743. * @gh: the glock holder
  1744. *
  1745. * Returns: 0 on success, -ENOBUFS when we run out of space
  1746. */
  1747. static int dump_holder(char *str, struct gfs2_holder *gh)
  1748. {
  1749. unsigned int x;
  1750. int error = -ENOBUFS;
  1751. printk(KERN_INFO " %s\n", str);
  1752. printk(KERN_INFO " owner = %ld\n",
  1753. (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
  1754. printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
  1755. printk(KERN_INFO " gh_flags =");
  1756. for (x = 0; x < 32; x++)
  1757. if (gh->gh_flags & (1 << x))
  1758. printk(" %u", x);
  1759. printk(" \n");
  1760. printk(KERN_INFO " error = %d\n", gh->gh_error);
  1761. printk(KERN_INFO " gh_iflags =");
  1762. for (x = 0; x < 32; x++)
  1763. if (test_bit(x, &gh->gh_iflags))
  1764. printk(" %u", x);
  1765. printk(" \n");
  1766. print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
  1767. error = 0;
  1768. return error;
  1769. }
  1770. /**
  1771. * dump_inode - print information about an inode
  1772. * @ip: the inode
  1773. *
  1774. * Returns: 0 on success, -ENOBUFS when we run out of space
  1775. */
  1776. static int dump_inode(struct gfs2_inode *ip)
  1777. {
  1778. unsigned int x;
  1779. int error = -ENOBUFS;
  1780. printk(KERN_INFO " Inode:\n");
  1781. printk(KERN_INFO " num = %llu %llu\n",
  1782. ip->i_num.no_formal_ino, ip->i_num.no_addr);
  1783. printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
  1784. printk(KERN_INFO " i_count = %d\n", atomic_read(&ip->i_count));
  1785. printk(KERN_INFO " i_flags =");
  1786. for (x = 0; x < 32; x++)
  1787. if (test_bit(x, &ip->i_flags))
  1788. printk(" %u", x);
  1789. printk(" \n");
  1790. printk(KERN_INFO " vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
  1791. error = 0;
  1792. return error;
  1793. }
  1794. /**
  1795. * dump_glock - print information about a glock
  1796. * @gl: the glock
  1797. * @count: where we are in the buffer
  1798. *
  1799. * Returns: 0 on success, -ENOBUFS when we run out of space
  1800. */
  1801. static int dump_glock(struct gfs2_glock *gl)
  1802. {
  1803. struct gfs2_holder *gh;
  1804. unsigned int x;
  1805. int error = -ENOBUFS;
  1806. spin_lock(&gl->gl_spin);
  1807. printk(KERN_INFO "Glock (%u, %llu)\n",
  1808. gl->gl_name.ln_type,
  1809. gl->gl_name.ln_number);
  1810. printk(KERN_INFO " gl_flags =");
  1811. for (x = 0; x < 32; x++)
  1812. if (test_bit(x, &gl->gl_flags))
  1813. printk(" %u", x);
  1814. printk(" \n");
  1815. printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
  1816. printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
  1817. printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
  1818. printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
  1819. printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
  1820. printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
  1821. printk(KERN_INFO " le = %s\n",
  1822. (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
  1823. printk(KERN_INFO " reclaim = %s\n",
  1824. (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
  1825. if (gl->gl_aspace)
  1826. printk(KERN_INFO " aspace = %lu\n",
  1827. gl->gl_aspace->i_mapping->nrpages);
  1828. else
  1829. printk(KERN_INFO " aspace = no\n");
  1830. printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
  1831. if (gl->gl_req_gh) {
  1832. error = dump_holder("Request", gl->gl_req_gh);
  1833. if (error)
  1834. goto out;
  1835. }
  1836. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1837. error = dump_holder("Holder", gh);
  1838. if (error)
  1839. goto out;
  1840. }
  1841. list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
  1842. error = dump_holder("Waiter1", gh);
  1843. if (error)
  1844. goto out;
  1845. }
  1846. list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
  1847. error = dump_holder("Waiter2", gh);
  1848. if (error)
  1849. goto out;
  1850. }
  1851. list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
  1852. error = dump_holder("Waiter3", gh);
  1853. if (error)
  1854. goto out;
  1855. }
  1856. if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
  1857. if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
  1858. list_empty(&gl->gl_holders)) {
  1859. error = dump_inode(gl->gl_object);
  1860. if (error)
  1861. goto out;
  1862. } else {
  1863. error = -ENOBUFS;
  1864. printk(KERN_INFO " Inode: busy\n");
  1865. }
  1866. }
  1867. error = 0;
  1868. out:
  1869. spin_unlock(&gl->gl_spin);
  1870. return error;
  1871. }
  1872. /**
  1873. * gfs2_dump_lockstate - print out the current lockstate
  1874. * @sdp: the filesystem
  1875. * @ub: the buffer to copy the information into
  1876. *
  1877. * If @ub is NULL, dump the lockstate to the console.
  1878. *
  1879. */
  1880. static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
  1881. {
  1882. struct gfs2_gl_hash_bucket *bucket;
  1883. struct gfs2_glock *gl;
  1884. unsigned int x;
  1885. int error = 0;
  1886. for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
  1887. bucket = &sdp->sd_gl_hash[x];
  1888. read_lock(&bucket->hb_lock);
  1889. list_for_each_entry(gl, &bucket->hb_list, gl_list) {
  1890. if (test_bit(GLF_PLUG, &gl->gl_flags))
  1891. continue;
  1892. error = dump_glock(gl);
  1893. if (error)
  1894. break;
  1895. }
  1896. read_unlock(&bucket->hb_lock);
  1897. if (error)
  1898. break;
  1899. }
  1900. return error;
  1901. }