xfs_buf.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/slab.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. static kmem_zone_t *xfs_buf_zone;
  37. static kmem_shaker_t xfs_buf_shake;
  38. STATIC int xfsbufd(void *);
  39. STATIC int xfsbufd_wakeup(int, gfp_t);
  40. STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
  41. static struct workqueue_struct *xfslogd_workqueue;
  42. struct workqueue_struct *xfsdatad_workqueue;
  43. #ifdef XFS_BUF_TRACE
  44. void
  45. xfs_buf_trace(
  46. xfs_buf_t *bp,
  47. char *id,
  48. void *data,
  49. void *ra)
  50. {
  51. ktrace_enter(xfs_buf_trace_buf,
  52. bp, id,
  53. (void *)(unsigned long)bp->b_flags,
  54. (void *)(unsigned long)bp->b_hold.counter,
  55. (void *)(unsigned long)bp->b_sema.count.counter,
  56. (void *)current,
  57. data, ra,
  58. (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
  59. (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
  60. (void *)(unsigned long)bp->b_buffer_length,
  61. NULL, NULL, NULL, NULL, NULL);
  62. }
  63. ktrace_t *xfs_buf_trace_buf;
  64. #define XFS_BUF_TRACE_SIZE 4096
  65. #define XB_TRACE(bp, id, data) \
  66. xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
  67. #else
  68. #define XB_TRACE(bp, id, data) do { } while (0)
  69. #endif
  70. #ifdef XFS_BUF_LOCK_TRACKING
  71. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  72. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  73. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  74. #else
  75. # define XB_SET_OWNER(bp) do { } while (0)
  76. # define XB_CLEAR_OWNER(bp) do { } while (0)
  77. # define XB_GET_OWNER(bp) do { } while (0)
  78. #endif
  79. #define xb_to_gfp(flags) \
  80. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
  81. ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
  82. #define xb_to_km(flags) \
  83. (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
  84. #define xfs_buf_allocate(flags) \
  85. kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
  86. #define xfs_buf_deallocate(bp) \
  87. kmem_zone_free(xfs_buf_zone, (bp));
  88. /*
  89. * Page Region interfaces.
  90. *
  91. * For pages in filesystems where the blocksize is smaller than the
  92. * pagesize, we use the page->private field (long) to hold a bitmap
  93. * of uptodate regions within the page.
  94. *
  95. * Each such region is "bytes per page / bits per long" bytes long.
  96. *
  97. * NBPPR == number-of-bytes-per-page-region
  98. * BTOPR == bytes-to-page-region (rounded up)
  99. * BTOPRT == bytes-to-page-region-truncated (rounded down)
  100. */
  101. #if (BITS_PER_LONG == 32)
  102. #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
  103. #elif (BITS_PER_LONG == 64)
  104. #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
  105. #else
  106. #error BITS_PER_LONG must be 32 or 64
  107. #endif
  108. #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
  109. #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
  110. #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
  111. STATIC unsigned long
  112. page_region_mask(
  113. size_t offset,
  114. size_t length)
  115. {
  116. unsigned long mask;
  117. int first, final;
  118. first = BTOPR(offset);
  119. final = BTOPRT(offset + length - 1);
  120. first = min(first, final);
  121. mask = ~0UL;
  122. mask <<= BITS_PER_LONG - (final - first);
  123. mask >>= BITS_PER_LONG - (final);
  124. ASSERT(offset + length <= PAGE_CACHE_SIZE);
  125. ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
  126. return mask;
  127. }
  128. STATIC_INLINE void
  129. set_page_region(
  130. struct page *page,
  131. size_t offset,
  132. size_t length)
  133. {
  134. set_page_private(page,
  135. page_private(page) | page_region_mask(offset, length));
  136. if (page_private(page) == ~0UL)
  137. SetPageUptodate(page);
  138. }
  139. STATIC_INLINE int
  140. test_page_region(
  141. struct page *page,
  142. size_t offset,
  143. size_t length)
  144. {
  145. unsigned long mask = page_region_mask(offset, length);
  146. return (mask && (page_private(page) & mask) == mask);
  147. }
  148. /*
  149. * Mapping of multi-page buffers into contiguous virtual space
  150. */
  151. typedef struct a_list {
  152. void *vm_addr;
  153. struct a_list *next;
  154. } a_list_t;
  155. static a_list_t *as_free_head;
  156. static int as_list_len;
  157. static DEFINE_SPINLOCK(as_lock);
  158. /*
  159. * Try to batch vunmaps because they are costly.
  160. */
  161. STATIC void
  162. free_address(
  163. void *addr)
  164. {
  165. a_list_t *aentry;
  166. aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
  167. if (likely(aentry)) {
  168. spin_lock(&as_lock);
  169. aentry->next = as_free_head;
  170. aentry->vm_addr = addr;
  171. as_free_head = aentry;
  172. as_list_len++;
  173. spin_unlock(&as_lock);
  174. } else {
  175. vunmap(addr);
  176. }
  177. }
  178. STATIC void
  179. purge_addresses(void)
  180. {
  181. a_list_t *aentry, *old;
  182. if (as_free_head == NULL)
  183. return;
  184. spin_lock(&as_lock);
  185. aentry = as_free_head;
  186. as_free_head = NULL;
  187. as_list_len = 0;
  188. spin_unlock(&as_lock);
  189. while ((old = aentry) != NULL) {
  190. vunmap(aentry->vm_addr);
  191. aentry = aentry->next;
  192. kfree(old);
  193. }
  194. }
  195. /*
  196. * Internal xfs_buf_t object manipulation
  197. */
  198. STATIC void
  199. _xfs_buf_initialize(
  200. xfs_buf_t *bp,
  201. xfs_buftarg_t *target,
  202. xfs_off_t range_base,
  203. size_t range_length,
  204. xfs_buf_flags_t flags)
  205. {
  206. /*
  207. * We don't want certain flags to appear in b_flags.
  208. */
  209. flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
  210. memset(bp, 0, sizeof(xfs_buf_t));
  211. atomic_set(&bp->b_hold, 1);
  212. init_MUTEX_LOCKED(&bp->b_iodonesema);
  213. INIT_LIST_HEAD(&bp->b_list);
  214. INIT_LIST_HEAD(&bp->b_hash_list);
  215. init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
  216. XB_SET_OWNER(bp);
  217. bp->b_target = target;
  218. bp->b_file_offset = range_base;
  219. /*
  220. * Set buffer_length and count_desired to the same value initially.
  221. * I/O routines should use count_desired, which will be the same in
  222. * most cases but may be reset (e.g. XFS recovery).
  223. */
  224. bp->b_buffer_length = bp->b_count_desired = range_length;
  225. bp->b_flags = flags;
  226. bp->b_bn = XFS_BUF_DADDR_NULL;
  227. atomic_set(&bp->b_pin_count, 0);
  228. init_waitqueue_head(&bp->b_waiters);
  229. XFS_STATS_INC(xb_create);
  230. XB_TRACE(bp, "initialize", target);
  231. }
  232. /*
  233. * Allocate a page array capable of holding a specified number
  234. * of pages, and point the page buf at it.
  235. */
  236. STATIC int
  237. _xfs_buf_get_pages(
  238. xfs_buf_t *bp,
  239. int page_count,
  240. xfs_buf_flags_t flags)
  241. {
  242. /* Make sure that we have a page list */
  243. if (bp->b_pages == NULL) {
  244. bp->b_offset = xfs_buf_poff(bp->b_file_offset);
  245. bp->b_page_count = page_count;
  246. if (page_count <= XB_PAGES) {
  247. bp->b_pages = bp->b_page_array;
  248. } else {
  249. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  250. page_count, xb_to_km(flags));
  251. if (bp->b_pages == NULL)
  252. return -ENOMEM;
  253. }
  254. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  255. }
  256. return 0;
  257. }
  258. /*
  259. * Frees b_pages if it was allocated.
  260. */
  261. STATIC void
  262. _xfs_buf_free_pages(
  263. xfs_buf_t *bp)
  264. {
  265. if (bp->b_pages != bp->b_page_array) {
  266. kmem_free(bp->b_pages,
  267. bp->b_page_count * sizeof(struct page *));
  268. }
  269. }
  270. /*
  271. * Releases the specified buffer.
  272. *
  273. * The modification state of any associated pages is left unchanged.
  274. * The buffer most not be on any hash - use xfs_buf_rele instead for
  275. * hashed and refcounted buffers
  276. */
  277. void
  278. xfs_buf_free(
  279. xfs_buf_t *bp)
  280. {
  281. XB_TRACE(bp, "free", 0);
  282. ASSERT(list_empty(&bp->b_hash_list));
  283. if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
  284. uint i;
  285. if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
  286. free_address(bp->b_addr - bp->b_offset);
  287. for (i = 0; i < bp->b_page_count; i++) {
  288. struct page *page = bp->b_pages[i];
  289. if (bp->b_flags & _XBF_PAGE_CACHE)
  290. ASSERT(!PagePrivate(page));
  291. page_cache_release(page);
  292. }
  293. _xfs_buf_free_pages(bp);
  294. }
  295. xfs_buf_deallocate(bp);
  296. }
  297. /*
  298. * Finds all pages for buffer in question and builds it's page list.
  299. */
  300. STATIC int
  301. _xfs_buf_lookup_pages(
  302. xfs_buf_t *bp,
  303. uint flags)
  304. {
  305. struct address_space *mapping = bp->b_target->bt_mapping;
  306. size_t blocksize = bp->b_target->bt_bsize;
  307. size_t size = bp->b_count_desired;
  308. size_t nbytes, offset;
  309. gfp_t gfp_mask = xb_to_gfp(flags);
  310. unsigned short page_count, i;
  311. pgoff_t first;
  312. xfs_off_t end;
  313. int error;
  314. end = bp->b_file_offset + bp->b_buffer_length;
  315. page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
  316. error = _xfs_buf_get_pages(bp, page_count, flags);
  317. if (unlikely(error))
  318. return error;
  319. bp->b_flags |= _XBF_PAGE_CACHE;
  320. offset = bp->b_offset;
  321. first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
  322. for (i = 0; i < bp->b_page_count; i++) {
  323. struct page *page;
  324. uint retries = 0;
  325. retry:
  326. page = find_or_create_page(mapping, first + i, gfp_mask);
  327. if (unlikely(page == NULL)) {
  328. if (flags & XBF_READ_AHEAD) {
  329. bp->b_page_count = i;
  330. for (i = 0; i < bp->b_page_count; i++)
  331. unlock_page(bp->b_pages[i]);
  332. return -ENOMEM;
  333. }
  334. /*
  335. * This could deadlock.
  336. *
  337. * But until all the XFS lowlevel code is revamped to
  338. * handle buffer allocation failures we can't do much.
  339. */
  340. if (!(++retries % 100))
  341. printk(KERN_ERR
  342. "XFS: possible memory allocation "
  343. "deadlock in %s (mode:0x%x)\n",
  344. __FUNCTION__, gfp_mask);
  345. XFS_STATS_INC(xb_page_retries);
  346. xfsbufd_wakeup(0, gfp_mask);
  347. congestion_wait(WRITE, HZ/50);
  348. goto retry;
  349. }
  350. XFS_STATS_INC(xb_page_found);
  351. nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
  352. size -= nbytes;
  353. ASSERT(!PagePrivate(page));
  354. if (!PageUptodate(page)) {
  355. page_count--;
  356. if (blocksize >= PAGE_CACHE_SIZE) {
  357. if (flags & XBF_READ)
  358. bp->b_locked = 1;
  359. } else if (!PagePrivate(page)) {
  360. if (test_page_region(page, offset, nbytes))
  361. page_count++;
  362. }
  363. }
  364. bp->b_pages[i] = page;
  365. offset = 0;
  366. }
  367. if (!bp->b_locked) {
  368. for (i = 0; i < bp->b_page_count; i++)
  369. unlock_page(bp->b_pages[i]);
  370. }
  371. if (page_count == bp->b_page_count)
  372. bp->b_flags |= XBF_DONE;
  373. XB_TRACE(bp, "lookup_pages", (long)page_count);
  374. return error;
  375. }
  376. /*
  377. * Map buffer into kernel address-space if nessecary.
  378. */
  379. STATIC int
  380. _xfs_buf_map_pages(
  381. xfs_buf_t *bp,
  382. uint flags)
  383. {
  384. /* A single page buffer is always mappable */
  385. if (bp->b_page_count == 1) {
  386. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  387. bp->b_flags |= XBF_MAPPED;
  388. } else if (flags & XBF_MAPPED) {
  389. if (as_list_len > 64)
  390. purge_addresses();
  391. bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
  392. VM_MAP, PAGE_KERNEL);
  393. if (unlikely(bp->b_addr == NULL))
  394. return -ENOMEM;
  395. bp->b_addr += bp->b_offset;
  396. bp->b_flags |= XBF_MAPPED;
  397. }
  398. return 0;
  399. }
  400. /*
  401. * Finding and Reading Buffers
  402. */
  403. /*
  404. * Look up, and creates if absent, a lockable buffer for
  405. * a given range of an inode. The buffer is returned
  406. * locked. If other overlapping buffers exist, they are
  407. * released before the new buffer is created and locked,
  408. * which may imply that this call will block until those buffers
  409. * are unlocked. No I/O is implied by this call.
  410. */
  411. xfs_buf_t *
  412. _xfs_buf_find(
  413. xfs_buftarg_t *btp, /* block device target */
  414. xfs_off_t ioff, /* starting offset of range */
  415. size_t isize, /* length of range */
  416. xfs_buf_flags_t flags,
  417. xfs_buf_t *new_bp)
  418. {
  419. xfs_off_t range_base;
  420. size_t range_length;
  421. xfs_bufhash_t *hash;
  422. xfs_buf_t *bp, *n;
  423. range_base = (ioff << BBSHIFT);
  424. range_length = (isize << BBSHIFT);
  425. /* Check for IOs smaller than the sector size / not sector aligned */
  426. ASSERT(!(range_length < (1 << btp->bt_sshift)));
  427. ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
  428. hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
  429. spin_lock(&hash->bh_lock);
  430. list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
  431. ASSERT(btp == bp->b_target);
  432. if (bp->b_file_offset == range_base &&
  433. bp->b_buffer_length == range_length) {
  434. /*
  435. * If we look at something, bring it to the
  436. * front of the list for next time.
  437. */
  438. atomic_inc(&bp->b_hold);
  439. list_move(&bp->b_hash_list, &hash->bh_list);
  440. goto found;
  441. }
  442. }
  443. /* No match found */
  444. if (new_bp) {
  445. _xfs_buf_initialize(new_bp, btp, range_base,
  446. range_length, flags);
  447. new_bp->b_hash = hash;
  448. list_add(&new_bp->b_hash_list, &hash->bh_list);
  449. } else {
  450. XFS_STATS_INC(xb_miss_locked);
  451. }
  452. spin_unlock(&hash->bh_lock);
  453. return new_bp;
  454. found:
  455. spin_unlock(&hash->bh_lock);
  456. /* Attempt to get the semaphore without sleeping,
  457. * if this does not work then we need to drop the
  458. * spinlock and do a hard attempt on the semaphore.
  459. */
  460. if (down_trylock(&bp->b_sema)) {
  461. if (!(flags & XBF_TRYLOCK)) {
  462. /* wait for buffer ownership */
  463. XB_TRACE(bp, "get_lock", 0);
  464. xfs_buf_lock(bp);
  465. XFS_STATS_INC(xb_get_locked_waited);
  466. } else {
  467. /* We asked for a trylock and failed, no need
  468. * to look at file offset and length here, we
  469. * know that this buffer at least overlaps our
  470. * buffer and is locked, therefore our buffer
  471. * either does not exist, or is this buffer.
  472. */
  473. xfs_buf_rele(bp);
  474. XFS_STATS_INC(xb_busy_locked);
  475. return NULL;
  476. }
  477. } else {
  478. /* trylock worked */
  479. XB_SET_OWNER(bp);
  480. }
  481. if (bp->b_flags & XBF_STALE) {
  482. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  483. bp->b_flags &= XBF_MAPPED;
  484. }
  485. XB_TRACE(bp, "got_lock", 0);
  486. XFS_STATS_INC(xb_get_locked);
  487. return bp;
  488. }
  489. /*
  490. * Assembles a buffer covering the specified range.
  491. * Storage in memory for all portions of the buffer will be allocated,
  492. * although backing storage may not be.
  493. */
  494. xfs_buf_t *
  495. xfs_buf_get_flags(
  496. xfs_buftarg_t *target,/* target for buffer */
  497. xfs_off_t ioff, /* starting offset of range */
  498. size_t isize, /* length of range */
  499. xfs_buf_flags_t flags)
  500. {
  501. xfs_buf_t *bp, *new_bp;
  502. int error = 0, i;
  503. new_bp = xfs_buf_allocate(flags);
  504. if (unlikely(!new_bp))
  505. return NULL;
  506. bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
  507. if (bp == new_bp) {
  508. error = _xfs_buf_lookup_pages(bp, flags);
  509. if (error)
  510. goto no_buffer;
  511. } else {
  512. xfs_buf_deallocate(new_bp);
  513. if (unlikely(bp == NULL))
  514. return NULL;
  515. }
  516. for (i = 0; i < bp->b_page_count; i++)
  517. mark_page_accessed(bp->b_pages[i]);
  518. if (!(bp->b_flags & XBF_MAPPED)) {
  519. error = _xfs_buf_map_pages(bp, flags);
  520. if (unlikely(error)) {
  521. printk(KERN_WARNING "%s: failed to map pages\n",
  522. __FUNCTION__);
  523. goto no_buffer;
  524. }
  525. }
  526. XFS_STATS_INC(xb_get);
  527. /*
  528. * Always fill in the block number now, the mapped cases can do
  529. * their own overlay of this later.
  530. */
  531. bp->b_bn = ioff;
  532. bp->b_count_desired = bp->b_buffer_length;
  533. XB_TRACE(bp, "get", (unsigned long)flags);
  534. return bp;
  535. no_buffer:
  536. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  537. xfs_buf_unlock(bp);
  538. xfs_buf_rele(bp);
  539. return NULL;
  540. }
  541. xfs_buf_t *
  542. xfs_buf_read_flags(
  543. xfs_buftarg_t *target,
  544. xfs_off_t ioff,
  545. size_t isize,
  546. xfs_buf_flags_t flags)
  547. {
  548. xfs_buf_t *bp;
  549. flags |= XBF_READ;
  550. bp = xfs_buf_get_flags(target, ioff, isize, flags);
  551. if (bp) {
  552. if (!XFS_BUF_ISDONE(bp)) {
  553. XB_TRACE(bp, "read", (unsigned long)flags);
  554. XFS_STATS_INC(xb_get_read);
  555. xfs_buf_iostart(bp, flags);
  556. } else if (flags & XBF_ASYNC) {
  557. XB_TRACE(bp, "read_async", (unsigned long)flags);
  558. /*
  559. * Read ahead call which is already satisfied,
  560. * drop the buffer
  561. */
  562. goto no_buffer;
  563. } else {
  564. XB_TRACE(bp, "read_done", (unsigned long)flags);
  565. /* We do not want read in the flags */
  566. bp->b_flags &= ~XBF_READ;
  567. }
  568. }
  569. return bp;
  570. no_buffer:
  571. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  572. xfs_buf_unlock(bp);
  573. xfs_buf_rele(bp);
  574. return NULL;
  575. }
  576. /*
  577. * If we are not low on memory then do the readahead in a deadlock
  578. * safe manner.
  579. */
  580. void
  581. xfs_buf_readahead(
  582. xfs_buftarg_t *target,
  583. xfs_off_t ioff,
  584. size_t isize,
  585. xfs_buf_flags_t flags)
  586. {
  587. struct backing_dev_info *bdi;
  588. bdi = target->bt_mapping->backing_dev_info;
  589. if (bdi_read_congested(bdi))
  590. return;
  591. flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
  592. xfs_buf_read_flags(target, ioff, isize, flags);
  593. }
  594. xfs_buf_t *
  595. xfs_buf_get_empty(
  596. size_t len,
  597. xfs_buftarg_t *target)
  598. {
  599. xfs_buf_t *bp;
  600. bp = xfs_buf_allocate(0);
  601. if (bp)
  602. _xfs_buf_initialize(bp, target, 0, len, 0);
  603. return bp;
  604. }
  605. static inline struct page *
  606. mem_to_page(
  607. void *addr)
  608. {
  609. if (((unsigned long)addr < VMALLOC_START) ||
  610. ((unsigned long)addr >= VMALLOC_END)) {
  611. return virt_to_page(addr);
  612. } else {
  613. return vmalloc_to_page(addr);
  614. }
  615. }
  616. int
  617. xfs_buf_associate_memory(
  618. xfs_buf_t *bp,
  619. void *mem,
  620. size_t len)
  621. {
  622. int rval;
  623. int i = 0;
  624. size_t ptr;
  625. size_t end, end_cur;
  626. off_t offset;
  627. int page_count;
  628. page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
  629. offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
  630. if (offset && (len > PAGE_CACHE_SIZE))
  631. page_count++;
  632. /* Free any previous set of page pointers */
  633. if (bp->b_pages)
  634. _xfs_buf_free_pages(bp);
  635. bp->b_pages = NULL;
  636. bp->b_addr = mem;
  637. rval = _xfs_buf_get_pages(bp, page_count, 0);
  638. if (rval)
  639. return rval;
  640. bp->b_offset = offset;
  641. ptr = (size_t) mem & PAGE_CACHE_MASK;
  642. end = PAGE_CACHE_ALIGN((size_t) mem + len);
  643. end_cur = end;
  644. /* set up first page */
  645. bp->b_pages[0] = mem_to_page(mem);
  646. ptr += PAGE_CACHE_SIZE;
  647. bp->b_page_count = ++i;
  648. while (ptr < end) {
  649. bp->b_pages[i] = mem_to_page((void *)ptr);
  650. bp->b_page_count = ++i;
  651. ptr += PAGE_CACHE_SIZE;
  652. }
  653. bp->b_locked = 0;
  654. bp->b_count_desired = bp->b_buffer_length = len;
  655. bp->b_flags |= XBF_MAPPED;
  656. return 0;
  657. }
  658. xfs_buf_t *
  659. xfs_buf_get_noaddr(
  660. size_t len,
  661. xfs_buftarg_t *target)
  662. {
  663. unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
  664. int error, i;
  665. xfs_buf_t *bp;
  666. bp = xfs_buf_allocate(0);
  667. if (unlikely(bp == NULL))
  668. goto fail;
  669. _xfs_buf_initialize(bp, target, 0, len, 0);
  670. error = _xfs_buf_get_pages(bp, page_count, 0);
  671. if (error)
  672. goto fail_free_buf;
  673. for (i = 0; i < page_count; i++) {
  674. bp->b_pages[i] = alloc_page(GFP_KERNEL);
  675. if (!bp->b_pages[i])
  676. goto fail_free_mem;
  677. }
  678. bp->b_flags |= _XBF_PAGES;
  679. error = _xfs_buf_map_pages(bp, XBF_MAPPED);
  680. if (unlikely(error)) {
  681. printk(KERN_WARNING "%s: failed to map pages\n",
  682. __FUNCTION__);
  683. goto fail_free_mem;
  684. }
  685. xfs_buf_unlock(bp);
  686. XB_TRACE(bp, "no_daddr", len);
  687. return bp;
  688. fail_free_mem:
  689. while (--i >= 0)
  690. __free_page(bp->b_pages[i]);
  691. fail_free_buf:
  692. xfs_buf_free(bp);
  693. fail:
  694. return NULL;
  695. }
  696. /*
  697. * Increment reference count on buffer, to hold the buffer concurrently
  698. * with another thread which may release (free) the buffer asynchronously.
  699. * Must hold the buffer already to call this function.
  700. */
  701. void
  702. xfs_buf_hold(
  703. xfs_buf_t *bp)
  704. {
  705. atomic_inc(&bp->b_hold);
  706. XB_TRACE(bp, "hold", 0);
  707. }
  708. /*
  709. * Releases a hold on the specified buffer. If the
  710. * the hold count is 1, calls xfs_buf_free.
  711. */
  712. void
  713. xfs_buf_rele(
  714. xfs_buf_t *bp)
  715. {
  716. xfs_bufhash_t *hash = bp->b_hash;
  717. XB_TRACE(bp, "rele", bp->b_relse);
  718. if (unlikely(!hash)) {
  719. ASSERT(!bp->b_relse);
  720. if (atomic_dec_and_test(&bp->b_hold))
  721. xfs_buf_free(bp);
  722. return;
  723. }
  724. if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
  725. if (bp->b_relse) {
  726. atomic_inc(&bp->b_hold);
  727. spin_unlock(&hash->bh_lock);
  728. (*(bp->b_relse)) (bp);
  729. } else if (bp->b_flags & XBF_FS_MANAGED) {
  730. spin_unlock(&hash->bh_lock);
  731. } else {
  732. ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
  733. list_del_init(&bp->b_hash_list);
  734. spin_unlock(&hash->bh_lock);
  735. xfs_buf_free(bp);
  736. }
  737. } else {
  738. /*
  739. * Catch reference count leaks
  740. */
  741. ASSERT(atomic_read(&bp->b_hold) >= 0);
  742. }
  743. }
  744. /*
  745. * Mutual exclusion on buffers. Locking model:
  746. *
  747. * Buffers associated with inodes for which buffer locking
  748. * is not enabled are not protected by semaphores, and are
  749. * assumed to be exclusively owned by the caller. There is a
  750. * spinlock in the buffer, used by the caller when concurrent
  751. * access is possible.
  752. */
  753. /*
  754. * Locks a buffer object, if it is not already locked.
  755. * Note that this in no way locks the underlying pages, so it is only
  756. * useful for synchronizing concurrent use of buffer objects, not for
  757. * synchronizing independent access to the underlying pages.
  758. */
  759. int
  760. xfs_buf_cond_lock(
  761. xfs_buf_t *bp)
  762. {
  763. int locked;
  764. locked = down_trylock(&bp->b_sema) == 0;
  765. if (locked) {
  766. XB_SET_OWNER(bp);
  767. }
  768. XB_TRACE(bp, "cond_lock", (long)locked);
  769. return locked ? 0 : -EBUSY;
  770. }
  771. #if defined(DEBUG) || defined(XFS_BLI_TRACE)
  772. int
  773. xfs_buf_lock_value(
  774. xfs_buf_t *bp)
  775. {
  776. return atomic_read(&bp->b_sema.count);
  777. }
  778. #endif
  779. /*
  780. * Locks a buffer object.
  781. * Note that this in no way locks the underlying pages, so it is only
  782. * useful for synchronizing concurrent use of buffer objects, not for
  783. * synchronizing independent access to the underlying pages.
  784. */
  785. void
  786. xfs_buf_lock(
  787. xfs_buf_t *bp)
  788. {
  789. XB_TRACE(bp, "lock", 0);
  790. if (atomic_read(&bp->b_io_remaining))
  791. blk_run_address_space(bp->b_target->bt_mapping);
  792. down(&bp->b_sema);
  793. XB_SET_OWNER(bp);
  794. XB_TRACE(bp, "locked", 0);
  795. }
  796. /*
  797. * Releases the lock on the buffer object.
  798. * If the buffer is marked delwri but is not queued, do so before we
  799. * unlock the buffer as we need to set flags correctly. We also need to
  800. * take a reference for the delwri queue because the unlocker is going to
  801. * drop their's and they don't know we just queued it.
  802. */
  803. void
  804. xfs_buf_unlock(
  805. xfs_buf_t *bp)
  806. {
  807. if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
  808. atomic_inc(&bp->b_hold);
  809. bp->b_flags |= XBF_ASYNC;
  810. xfs_buf_delwri_queue(bp, 0);
  811. }
  812. XB_CLEAR_OWNER(bp);
  813. up(&bp->b_sema);
  814. XB_TRACE(bp, "unlock", 0);
  815. }
  816. /*
  817. * Pinning Buffer Storage in Memory
  818. * Ensure that no attempt to force a buffer to disk will succeed.
  819. */
  820. void
  821. xfs_buf_pin(
  822. xfs_buf_t *bp)
  823. {
  824. atomic_inc(&bp->b_pin_count);
  825. XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
  826. }
  827. void
  828. xfs_buf_unpin(
  829. xfs_buf_t *bp)
  830. {
  831. if (atomic_dec_and_test(&bp->b_pin_count))
  832. wake_up_all(&bp->b_waiters);
  833. XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
  834. }
  835. int
  836. xfs_buf_ispin(
  837. xfs_buf_t *bp)
  838. {
  839. return atomic_read(&bp->b_pin_count);
  840. }
  841. STATIC void
  842. xfs_buf_wait_unpin(
  843. xfs_buf_t *bp)
  844. {
  845. DECLARE_WAITQUEUE (wait, current);
  846. if (atomic_read(&bp->b_pin_count) == 0)
  847. return;
  848. add_wait_queue(&bp->b_waiters, &wait);
  849. for (;;) {
  850. set_current_state(TASK_UNINTERRUPTIBLE);
  851. if (atomic_read(&bp->b_pin_count) == 0)
  852. break;
  853. if (atomic_read(&bp->b_io_remaining))
  854. blk_run_address_space(bp->b_target->bt_mapping);
  855. schedule();
  856. }
  857. remove_wait_queue(&bp->b_waiters, &wait);
  858. set_current_state(TASK_RUNNING);
  859. }
  860. /*
  861. * Buffer Utility Routines
  862. */
  863. STATIC void
  864. xfs_buf_iodone_work(
  865. struct work_struct *work)
  866. {
  867. xfs_buf_t *bp =
  868. container_of(work, xfs_buf_t, b_iodone_work);
  869. if (bp->b_iodone)
  870. (*(bp->b_iodone))(bp);
  871. else if (bp->b_flags & XBF_ASYNC)
  872. xfs_buf_relse(bp);
  873. }
  874. void
  875. xfs_buf_ioend(
  876. xfs_buf_t *bp,
  877. int schedule)
  878. {
  879. bp->b_flags &= ~(XBF_READ | XBF_WRITE);
  880. if (bp->b_error == 0)
  881. bp->b_flags |= XBF_DONE;
  882. XB_TRACE(bp, "iodone", bp->b_iodone);
  883. if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
  884. if (schedule) {
  885. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  886. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  887. } else {
  888. xfs_buf_iodone_work(&bp->b_iodone_work);
  889. }
  890. } else {
  891. up(&bp->b_iodonesema);
  892. }
  893. }
  894. void
  895. xfs_buf_ioerror(
  896. xfs_buf_t *bp,
  897. int error)
  898. {
  899. ASSERT(error >= 0 && error <= 0xffff);
  900. bp->b_error = (unsigned short)error;
  901. XB_TRACE(bp, "ioerror", (unsigned long)error);
  902. }
  903. /*
  904. * Initiate I/O on a buffer, based on the flags supplied.
  905. * The b_iodone routine in the buffer supplied will only be called
  906. * when all of the subsidiary I/O requests, if any, have been completed.
  907. */
  908. int
  909. xfs_buf_iostart(
  910. xfs_buf_t *bp,
  911. xfs_buf_flags_t flags)
  912. {
  913. int status = 0;
  914. XB_TRACE(bp, "iostart", (unsigned long)flags);
  915. if (flags & XBF_DELWRI) {
  916. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
  917. bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
  918. xfs_buf_delwri_queue(bp, 1);
  919. return status;
  920. }
  921. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
  922. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  923. bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
  924. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  925. BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
  926. /* For writes allow an alternate strategy routine to precede
  927. * the actual I/O request (which may not be issued at all in
  928. * a shutdown situation, for example).
  929. */
  930. status = (flags & XBF_WRITE) ?
  931. xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
  932. /* Wait for I/O if we are not an async request.
  933. * Note: async I/O request completion will release the buffer,
  934. * and that can already be done by this point. So using the
  935. * buffer pointer from here on, after async I/O, is invalid.
  936. */
  937. if (!status && !(flags & XBF_ASYNC))
  938. status = xfs_buf_iowait(bp);
  939. return status;
  940. }
  941. STATIC_INLINE int
  942. _xfs_buf_iolocked(
  943. xfs_buf_t *bp)
  944. {
  945. ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
  946. if (bp->b_flags & XBF_READ)
  947. return bp->b_locked;
  948. return 0;
  949. }
  950. STATIC_INLINE void
  951. _xfs_buf_ioend(
  952. xfs_buf_t *bp,
  953. int schedule)
  954. {
  955. if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
  956. bp->b_locked = 0;
  957. xfs_buf_ioend(bp, schedule);
  958. }
  959. }
  960. STATIC int
  961. xfs_buf_bio_end_io(
  962. struct bio *bio,
  963. unsigned int bytes_done,
  964. int error)
  965. {
  966. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  967. unsigned int blocksize = bp->b_target->bt_bsize;
  968. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  969. if (bio->bi_size)
  970. return 1;
  971. if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  972. bp->b_error = EIO;
  973. do {
  974. struct page *page = bvec->bv_page;
  975. ASSERT(!PagePrivate(page));
  976. if (unlikely(bp->b_error)) {
  977. if (bp->b_flags & XBF_READ)
  978. ClearPageUptodate(page);
  979. } else if (blocksize >= PAGE_CACHE_SIZE) {
  980. SetPageUptodate(page);
  981. } else if (!PagePrivate(page) &&
  982. (bp->b_flags & _XBF_PAGE_CACHE)) {
  983. set_page_region(page, bvec->bv_offset, bvec->bv_len);
  984. }
  985. if (--bvec >= bio->bi_io_vec)
  986. prefetchw(&bvec->bv_page->flags);
  987. if (_xfs_buf_iolocked(bp)) {
  988. unlock_page(page);
  989. }
  990. } while (bvec >= bio->bi_io_vec);
  991. _xfs_buf_ioend(bp, 1);
  992. bio_put(bio);
  993. return 0;
  994. }
  995. STATIC void
  996. _xfs_buf_ioapply(
  997. xfs_buf_t *bp)
  998. {
  999. int i, rw, map_i, total_nr_pages, nr_pages;
  1000. struct bio *bio;
  1001. int offset = bp->b_offset;
  1002. int size = bp->b_count_desired;
  1003. sector_t sector = bp->b_bn;
  1004. unsigned int blocksize = bp->b_target->bt_bsize;
  1005. int locking = _xfs_buf_iolocked(bp);
  1006. total_nr_pages = bp->b_page_count;
  1007. map_i = 0;
  1008. if (bp->b_flags & XBF_ORDERED) {
  1009. ASSERT(!(bp->b_flags & XBF_READ));
  1010. rw = WRITE_BARRIER;
  1011. } else if (bp->b_flags & _XBF_RUN_QUEUES) {
  1012. ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
  1013. bp->b_flags &= ~_XBF_RUN_QUEUES;
  1014. rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
  1015. } else {
  1016. rw = (bp->b_flags & XBF_WRITE) ? WRITE :
  1017. (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
  1018. }
  1019. /* Special code path for reading a sub page size buffer in --
  1020. * we populate up the whole page, and hence the other metadata
  1021. * in the same page. This optimization is only valid when the
  1022. * filesystem block size is not smaller than the page size.
  1023. */
  1024. if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
  1025. (bp->b_flags & XBF_READ) && locking &&
  1026. (blocksize >= PAGE_CACHE_SIZE)) {
  1027. bio = bio_alloc(GFP_NOIO, 1);
  1028. bio->bi_bdev = bp->b_target->bt_bdev;
  1029. bio->bi_sector = sector - (offset >> BBSHIFT);
  1030. bio->bi_end_io = xfs_buf_bio_end_io;
  1031. bio->bi_private = bp;
  1032. bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
  1033. size = 0;
  1034. atomic_inc(&bp->b_io_remaining);
  1035. goto submit_io;
  1036. }
  1037. /* Lock down the pages which we need to for the request */
  1038. if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
  1039. for (i = 0; size; i++) {
  1040. int nbytes = PAGE_CACHE_SIZE - offset;
  1041. struct page *page = bp->b_pages[i];
  1042. if (nbytes > size)
  1043. nbytes = size;
  1044. lock_page(page);
  1045. size -= nbytes;
  1046. offset = 0;
  1047. }
  1048. offset = bp->b_offset;
  1049. size = bp->b_count_desired;
  1050. }
  1051. next_chunk:
  1052. atomic_inc(&bp->b_io_remaining);
  1053. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1054. if (nr_pages > total_nr_pages)
  1055. nr_pages = total_nr_pages;
  1056. bio = bio_alloc(GFP_NOIO, nr_pages);
  1057. bio->bi_bdev = bp->b_target->bt_bdev;
  1058. bio->bi_sector = sector;
  1059. bio->bi_end_io = xfs_buf_bio_end_io;
  1060. bio->bi_private = bp;
  1061. for (; size && nr_pages; nr_pages--, map_i++) {
  1062. int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
  1063. if (nbytes > size)
  1064. nbytes = size;
  1065. rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
  1066. if (rbytes < nbytes)
  1067. break;
  1068. offset = 0;
  1069. sector += nbytes >> BBSHIFT;
  1070. size -= nbytes;
  1071. total_nr_pages--;
  1072. }
  1073. submit_io:
  1074. if (likely(bio->bi_size)) {
  1075. submit_bio(rw, bio);
  1076. if (size)
  1077. goto next_chunk;
  1078. } else {
  1079. bio_put(bio);
  1080. xfs_buf_ioerror(bp, EIO);
  1081. }
  1082. }
  1083. int
  1084. xfs_buf_iorequest(
  1085. xfs_buf_t *bp)
  1086. {
  1087. XB_TRACE(bp, "iorequest", 0);
  1088. if (bp->b_flags & XBF_DELWRI) {
  1089. xfs_buf_delwri_queue(bp, 1);
  1090. return 0;
  1091. }
  1092. if (bp->b_flags & XBF_WRITE) {
  1093. xfs_buf_wait_unpin(bp);
  1094. }
  1095. xfs_buf_hold(bp);
  1096. /* Set the count to 1 initially, this will stop an I/O
  1097. * completion callout which happens before we have started
  1098. * all the I/O from calling xfs_buf_ioend too early.
  1099. */
  1100. atomic_set(&bp->b_io_remaining, 1);
  1101. _xfs_buf_ioapply(bp);
  1102. _xfs_buf_ioend(bp, 0);
  1103. xfs_buf_rele(bp);
  1104. return 0;
  1105. }
  1106. /*
  1107. * Waits for I/O to complete on the buffer supplied.
  1108. * It returns immediately if no I/O is pending.
  1109. * It returns the I/O error code, if any, or 0 if there was no error.
  1110. */
  1111. int
  1112. xfs_buf_iowait(
  1113. xfs_buf_t *bp)
  1114. {
  1115. XB_TRACE(bp, "iowait", 0);
  1116. if (atomic_read(&bp->b_io_remaining))
  1117. blk_run_address_space(bp->b_target->bt_mapping);
  1118. down(&bp->b_iodonesema);
  1119. XB_TRACE(bp, "iowaited", (long)bp->b_error);
  1120. return bp->b_error;
  1121. }
  1122. xfs_caddr_t
  1123. xfs_buf_offset(
  1124. xfs_buf_t *bp,
  1125. size_t offset)
  1126. {
  1127. struct page *page;
  1128. if (bp->b_flags & XBF_MAPPED)
  1129. return XFS_BUF_PTR(bp) + offset;
  1130. offset += bp->b_offset;
  1131. page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
  1132. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
  1133. }
  1134. /*
  1135. * Move data into or out of a buffer.
  1136. */
  1137. void
  1138. xfs_buf_iomove(
  1139. xfs_buf_t *bp, /* buffer to process */
  1140. size_t boff, /* starting buffer offset */
  1141. size_t bsize, /* length to copy */
  1142. caddr_t data, /* data address */
  1143. xfs_buf_rw_t mode) /* read/write/zero flag */
  1144. {
  1145. size_t bend, cpoff, csize;
  1146. struct page *page;
  1147. bend = boff + bsize;
  1148. while (boff < bend) {
  1149. page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
  1150. cpoff = xfs_buf_poff(boff + bp->b_offset);
  1151. csize = min_t(size_t,
  1152. PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
  1153. ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
  1154. switch (mode) {
  1155. case XBRW_ZERO:
  1156. memset(page_address(page) + cpoff, 0, csize);
  1157. break;
  1158. case XBRW_READ:
  1159. memcpy(data, page_address(page) + cpoff, csize);
  1160. break;
  1161. case XBRW_WRITE:
  1162. memcpy(page_address(page) + cpoff, data, csize);
  1163. }
  1164. boff += csize;
  1165. data += csize;
  1166. }
  1167. }
  1168. /*
  1169. * Handling of buffer targets (buftargs).
  1170. */
  1171. /*
  1172. * Wait for any bufs with callbacks that have been submitted but
  1173. * have not yet returned... walk the hash list for the target.
  1174. */
  1175. void
  1176. xfs_wait_buftarg(
  1177. xfs_buftarg_t *btp)
  1178. {
  1179. xfs_buf_t *bp, *n;
  1180. xfs_bufhash_t *hash;
  1181. uint i;
  1182. for (i = 0; i < (1 << btp->bt_hashshift); i++) {
  1183. hash = &btp->bt_hash[i];
  1184. again:
  1185. spin_lock(&hash->bh_lock);
  1186. list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
  1187. ASSERT(btp == bp->b_target);
  1188. if (!(bp->b_flags & XBF_FS_MANAGED)) {
  1189. spin_unlock(&hash->bh_lock);
  1190. /*
  1191. * Catch superblock reference count leaks
  1192. * immediately
  1193. */
  1194. BUG_ON(bp->b_bn == 0);
  1195. delay(100);
  1196. goto again;
  1197. }
  1198. }
  1199. spin_unlock(&hash->bh_lock);
  1200. }
  1201. }
  1202. /*
  1203. * Allocate buffer hash table for a given target.
  1204. * For devices containing metadata (i.e. not the log/realtime devices)
  1205. * we need to allocate a much larger hash table.
  1206. */
  1207. STATIC void
  1208. xfs_alloc_bufhash(
  1209. xfs_buftarg_t *btp,
  1210. int external)
  1211. {
  1212. unsigned int i;
  1213. btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
  1214. btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
  1215. btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
  1216. sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
  1217. for (i = 0; i < (1 << btp->bt_hashshift); i++) {
  1218. spin_lock_init(&btp->bt_hash[i].bh_lock);
  1219. INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
  1220. }
  1221. }
  1222. STATIC void
  1223. xfs_free_bufhash(
  1224. xfs_buftarg_t *btp)
  1225. {
  1226. kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
  1227. btp->bt_hash = NULL;
  1228. }
  1229. /*
  1230. * buftarg list for delwrite queue processing
  1231. */
  1232. static LIST_HEAD(xfs_buftarg_list);
  1233. static DEFINE_SPINLOCK(xfs_buftarg_lock);
  1234. STATIC void
  1235. xfs_register_buftarg(
  1236. xfs_buftarg_t *btp)
  1237. {
  1238. spin_lock(&xfs_buftarg_lock);
  1239. list_add(&btp->bt_list, &xfs_buftarg_list);
  1240. spin_unlock(&xfs_buftarg_lock);
  1241. }
  1242. STATIC void
  1243. xfs_unregister_buftarg(
  1244. xfs_buftarg_t *btp)
  1245. {
  1246. spin_lock(&xfs_buftarg_lock);
  1247. list_del(&btp->bt_list);
  1248. spin_unlock(&xfs_buftarg_lock);
  1249. }
  1250. void
  1251. xfs_free_buftarg(
  1252. xfs_buftarg_t *btp,
  1253. int external)
  1254. {
  1255. xfs_flush_buftarg(btp, 1);
  1256. if (external)
  1257. xfs_blkdev_put(btp->bt_bdev);
  1258. xfs_free_bufhash(btp);
  1259. iput(btp->bt_mapping->host);
  1260. /* Unregister the buftarg first so that we don't get a
  1261. * wakeup finding a non-existent task
  1262. */
  1263. xfs_unregister_buftarg(btp);
  1264. kthread_stop(btp->bt_task);
  1265. kmem_free(btp, sizeof(*btp));
  1266. }
  1267. STATIC int
  1268. xfs_setsize_buftarg_flags(
  1269. xfs_buftarg_t *btp,
  1270. unsigned int blocksize,
  1271. unsigned int sectorsize,
  1272. int verbose)
  1273. {
  1274. btp->bt_bsize = blocksize;
  1275. btp->bt_sshift = ffs(sectorsize) - 1;
  1276. btp->bt_smask = sectorsize - 1;
  1277. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1278. printk(KERN_WARNING
  1279. "XFS: Cannot set_blocksize to %u on device %s\n",
  1280. sectorsize, XFS_BUFTARG_NAME(btp));
  1281. return EINVAL;
  1282. }
  1283. if (verbose &&
  1284. (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
  1285. printk(KERN_WARNING
  1286. "XFS: %u byte sectors in use on device %s. "
  1287. "This is suboptimal; %u or greater is ideal.\n",
  1288. sectorsize, XFS_BUFTARG_NAME(btp),
  1289. (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
  1290. }
  1291. return 0;
  1292. }
  1293. /*
  1294. * When allocating the initial buffer target we have not yet
  1295. * read in the superblock, so don't know what sized sectors
  1296. * are being used is at this early stage. Play safe.
  1297. */
  1298. STATIC int
  1299. xfs_setsize_buftarg_early(
  1300. xfs_buftarg_t *btp,
  1301. struct block_device *bdev)
  1302. {
  1303. return xfs_setsize_buftarg_flags(btp,
  1304. PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
  1305. }
  1306. int
  1307. xfs_setsize_buftarg(
  1308. xfs_buftarg_t *btp,
  1309. unsigned int blocksize,
  1310. unsigned int sectorsize)
  1311. {
  1312. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1313. }
  1314. STATIC int
  1315. xfs_mapping_buftarg(
  1316. xfs_buftarg_t *btp,
  1317. struct block_device *bdev)
  1318. {
  1319. struct backing_dev_info *bdi;
  1320. struct inode *inode;
  1321. struct address_space *mapping;
  1322. static const struct address_space_operations mapping_aops = {
  1323. .sync_page = block_sync_page,
  1324. .migratepage = fail_migrate_page,
  1325. };
  1326. inode = new_inode(bdev->bd_inode->i_sb);
  1327. if (!inode) {
  1328. printk(KERN_WARNING
  1329. "XFS: Cannot allocate mapping inode for device %s\n",
  1330. XFS_BUFTARG_NAME(btp));
  1331. return ENOMEM;
  1332. }
  1333. inode->i_mode = S_IFBLK;
  1334. inode->i_bdev = bdev;
  1335. inode->i_rdev = bdev->bd_dev;
  1336. bdi = blk_get_backing_dev_info(bdev);
  1337. if (!bdi)
  1338. bdi = &default_backing_dev_info;
  1339. mapping = &inode->i_data;
  1340. mapping->a_ops = &mapping_aops;
  1341. mapping->backing_dev_info = bdi;
  1342. mapping_set_gfp_mask(mapping, GFP_NOFS);
  1343. btp->bt_mapping = mapping;
  1344. return 0;
  1345. }
  1346. STATIC int
  1347. xfs_alloc_delwrite_queue(
  1348. xfs_buftarg_t *btp)
  1349. {
  1350. int error = 0;
  1351. INIT_LIST_HEAD(&btp->bt_list);
  1352. INIT_LIST_HEAD(&btp->bt_delwrite_queue);
  1353. spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
  1354. btp->bt_flags = 0;
  1355. btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
  1356. if (IS_ERR(btp->bt_task)) {
  1357. error = PTR_ERR(btp->bt_task);
  1358. goto out_error;
  1359. }
  1360. xfs_register_buftarg(btp);
  1361. out_error:
  1362. return error;
  1363. }
  1364. xfs_buftarg_t *
  1365. xfs_alloc_buftarg(
  1366. struct block_device *bdev,
  1367. int external)
  1368. {
  1369. xfs_buftarg_t *btp;
  1370. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
  1371. btp->bt_dev = bdev->bd_dev;
  1372. btp->bt_bdev = bdev;
  1373. if (xfs_setsize_buftarg_early(btp, bdev))
  1374. goto error;
  1375. if (xfs_mapping_buftarg(btp, bdev))
  1376. goto error;
  1377. if (xfs_alloc_delwrite_queue(btp))
  1378. goto error;
  1379. xfs_alloc_bufhash(btp, external);
  1380. return btp;
  1381. error:
  1382. kmem_free(btp, sizeof(*btp));
  1383. return NULL;
  1384. }
  1385. /*
  1386. * Delayed write buffer handling
  1387. */
  1388. STATIC void
  1389. xfs_buf_delwri_queue(
  1390. xfs_buf_t *bp,
  1391. int unlock)
  1392. {
  1393. struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
  1394. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1395. XB_TRACE(bp, "delwri_q", (long)unlock);
  1396. ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
  1397. spin_lock(dwlk);
  1398. /* If already in the queue, dequeue and place at tail */
  1399. if (!list_empty(&bp->b_list)) {
  1400. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1401. if (unlock)
  1402. atomic_dec(&bp->b_hold);
  1403. list_del(&bp->b_list);
  1404. }
  1405. bp->b_flags |= _XBF_DELWRI_Q;
  1406. list_add_tail(&bp->b_list, dwq);
  1407. bp->b_queuetime = jiffies;
  1408. spin_unlock(dwlk);
  1409. if (unlock)
  1410. xfs_buf_unlock(bp);
  1411. }
  1412. void
  1413. xfs_buf_delwri_dequeue(
  1414. xfs_buf_t *bp)
  1415. {
  1416. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1417. int dequeued = 0;
  1418. spin_lock(dwlk);
  1419. if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
  1420. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1421. list_del_init(&bp->b_list);
  1422. dequeued = 1;
  1423. }
  1424. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
  1425. spin_unlock(dwlk);
  1426. if (dequeued)
  1427. xfs_buf_rele(bp);
  1428. XB_TRACE(bp, "delwri_dq", (long)dequeued);
  1429. }
  1430. STATIC void
  1431. xfs_buf_runall_queues(
  1432. struct workqueue_struct *queue)
  1433. {
  1434. flush_workqueue(queue);
  1435. }
  1436. STATIC int
  1437. xfsbufd_wakeup(
  1438. int priority,
  1439. gfp_t mask)
  1440. {
  1441. xfs_buftarg_t *btp;
  1442. spin_lock(&xfs_buftarg_lock);
  1443. list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
  1444. if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
  1445. continue;
  1446. set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
  1447. wake_up_process(btp->bt_task);
  1448. }
  1449. spin_unlock(&xfs_buftarg_lock);
  1450. return 0;
  1451. }
  1452. /*
  1453. * Move as many buffers as specified to the supplied list
  1454. * idicating if we skipped any buffers to prevent deadlocks.
  1455. */
  1456. STATIC int
  1457. xfs_buf_delwri_split(
  1458. xfs_buftarg_t *target,
  1459. struct list_head *list,
  1460. unsigned long age)
  1461. {
  1462. xfs_buf_t *bp, *n;
  1463. struct list_head *dwq = &target->bt_delwrite_queue;
  1464. spinlock_t *dwlk = &target->bt_delwrite_lock;
  1465. int skipped = 0;
  1466. int force;
  1467. force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1468. INIT_LIST_HEAD(list);
  1469. spin_lock(dwlk);
  1470. list_for_each_entry_safe(bp, n, dwq, b_list) {
  1471. XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
  1472. ASSERT(bp->b_flags & XBF_DELWRI);
  1473. if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
  1474. if (!force &&
  1475. time_before(jiffies, bp->b_queuetime + age)) {
  1476. xfs_buf_unlock(bp);
  1477. break;
  1478. }
  1479. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
  1480. _XBF_RUN_QUEUES);
  1481. bp->b_flags |= XBF_WRITE;
  1482. list_move_tail(&bp->b_list, list);
  1483. } else
  1484. skipped++;
  1485. }
  1486. spin_unlock(dwlk);
  1487. return skipped;
  1488. }
  1489. STATIC int
  1490. xfsbufd(
  1491. void *data)
  1492. {
  1493. struct list_head tmp;
  1494. xfs_buftarg_t *target = (xfs_buftarg_t *)data;
  1495. int count;
  1496. xfs_buf_t *bp;
  1497. current->flags |= PF_MEMALLOC;
  1498. do {
  1499. if (unlikely(freezing(current))) {
  1500. set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1501. refrigerator();
  1502. } else {
  1503. clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1504. }
  1505. schedule_timeout_interruptible(
  1506. xfs_buf_timer_centisecs * msecs_to_jiffies(10));
  1507. xfs_buf_delwri_split(target, &tmp,
  1508. xfs_buf_age_centisecs * msecs_to_jiffies(10));
  1509. count = 0;
  1510. while (!list_empty(&tmp)) {
  1511. bp = list_entry(tmp.next, xfs_buf_t, b_list);
  1512. ASSERT(target == bp->b_target);
  1513. list_del_init(&bp->b_list);
  1514. xfs_buf_iostrategy(bp);
  1515. count++;
  1516. }
  1517. if (as_list_len > 0)
  1518. purge_addresses();
  1519. if (count)
  1520. blk_run_address_space(target->bt_mapping);
  1521. } while (!kthread_should_stop());
  1522. return 0;
  1523. }
  1524. /*
  1525. * Go through all incore buffers, and release buffers if they belong to
  1526. * the given device. This is used in filesystem error handling to
  1527. * preserve the consistency of its metadata.
  1528. */
  1529. int
  1530. xfs_flush_buftarg(
  1531. xfs_buftarg_t *target,
  1532. int wait)
  1533. {
  1534. struct list_head tmp;
  1535. xfs_buf_t *bp, *n;
  1536. int pincount = 0;
  1537. xfs_buf_runall_queues(xfsdatad_workqueue);
  1538. xfs_buf_runall_queues(xfslogd_workqueue);
  1539. set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1540. pincount = xfs_buf_delwri_split(target, &tmp, 0);
  1541. /*
  1542. * Dropped the delayed write list lock, now walk the temporary list
  1543. */
  1544. list_for_each_entry_safe(bp, n, &tmp, b_list) {
  1545. ASSERT(target == bp->b_target);
  1546. if (wait)
  1547. bp->b_flags &= ~XBF_ASYNC;
  1548. else
  1549. list_del_init(&bp->b_list);
  1550. xfs_buf_iostrategy(bp);
  1551. }
  1552. if (wait)
  1553. blk_run_address_space(target->bt_mapping);
  1554. /*
  1555. * Remaining list items must be flushed before returning
  1556. */
  1557. while (!list_empty(&tmp)) {
  1558. bp = list_entry(tmp.next, xfs_buf_t, b_list);
  1559. list_del_init(&bp->b_list);
  1560. xfs_iowait(bp);
  1561. xfs_buf_relse(bp);
  1562. }
  1563. return pincount;
  1564. }
  1565. int __init
  1566. xfs_buf_init(void)
  1567. {
  1568. #ifdef XFS_BUF_TRACE
  1569. xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
  1570. #endif
  1571. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1572. KM_ZONE_HWALIGN, NULL);
  1573. if (!xfs_buf_zone)
  1574. goto out_free_trace_buf;
  1575. xfslogd_workqueue = create_workqueue("xfslogd");
  1576. if (!xfslogd_workqueue)
  1577. goto out_free_buf_zone;
  1578. xfsdatad_workqueue = create_workqueue("xfsdatad");
  1579. if (!xfsdatad_workqueue)
  1580. goto out_destroy_xfslogd_workqueue;
  1581. xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
  1582. if (!xfs_buf_shake)
  1583. goto out_destroy_xfsdatad_workqueue;
  1584. return 0;
  1585. out_destroy_xfsdatad_workqueue:
  1586. destroy_workqueue(xfsdatad_workqueue);
  1587. out_destroy_xfslogd_workqueue:
  1588. destroy_workqueue(xfslogd_workqueue);
  1589. out_free_buf_zone:
  1590. kmem_zone_destroy(xfs_buf_zone);
  1591. out_free_trace_buf:
  1592. #ifdef XFS_BUF_TRACE
  1593. ktrace_free(xfs_buf_trace_buf);
  1594. #endif
  1595. return -ENOMEM;
  1596. }
  1597. void
  1598. xfs_buf_terminate(void)
  1599. {
  1600. kmem_shake_deregister(xfs_buf_shake);
  1601. destroy_workqueue(xfsdatad_workqueue);
  1602. destroy_workqueue(xfslogd_workqueue);
  1603. kmem_zone_destroy(xfs_buf_zone);
  1604. #ifdef XFS_BUF_TRACE
  1605. ktrace_free(xfs_buf_trace_buf);
  1606. #endif
  1607. }
  1608. #ifdef CONFIG_KDB_MODULES
  1609. struct list_head *
  1610. xfs_get_buftarg_list(void)
  1611. {
  1612. return &xfs_buftarg_list;
  1613. }
  1614. #endif