xfs_buf.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/slab.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. static kmem_zone_t *xfs_buf_zone;
  37. STATIC int xfsbufd(void *);
  38. STATIC int xfsbufd_wakeup(int, gfp_t);
  39. STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
  40. static struct shrinker xfs_buf_shake = {
  41. .shrink = xfsbufd_wakeup,
  42. .seeks = DEFAULT_SEEKS,
  43. };
  44. static struct workqueue_struct *xfslogd_workqueue;
  45. struct workqueue_struct *xfsdatad_workqueue;
  46. #ifdef XFS_BUF_TRACE
  47. void
  48. xfs_buf_trace(
  49. xfs_buf_t *bp,
  50. char *id,
  51. void *data,
  52. void *ra)
  53. {
  54. ktrace_enter(xfs_buf_trace_buf,
  55. bp, id,
  56. (void *)(unsigned long)bp->b_flags,
  57. (void *)(unsigned long)bp->b_hold.counter,
  58. (void *)(unsigned long)bp->b_sema.count.counter,
  59. (void *)current,
  60. data, ra,
  61. (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
  62. (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
  63. (void *)(unsigned long)bp->b_buffer_length,
  64. NULL, NULL, NULL, NULL, NULL);
  65. }
  66. ktrace_t *xfs_buf_trace_buf;
  67. #define XFS_BUF_TRACE_SIZE 4096
  68. #define XB_TRACE(bp, id, data) \
  69. xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
  70. #else
  71. #define XB_TRACE(bp, id, data) do { } while (0)
  72. #endif
  73. #ifdef XFS_BUF_LOCK_TRACKING
  74. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  75. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  76. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  77. #else
  78. # define XB_SET_OWNER(bp) do { } while (0)
  79. # define XB_CLEAR_OWNER(bp) do { } while (0)
  80. # define XB_GET_OWNER(bp) do { } while (0)
  81. #endif
  82. #define xb_to_gfp(flags) \
  83. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
  84. ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
  85. #define xb_to_km(flags) \
  86. (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
  87. #define xfs_buf_allocate(flags) \
  88. kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
  89. #define xfs_buf_deallocate(bp) \
  90. kmem_zone_free(xfs_buf_zone, (bp));
  91. /*
  92. * Page Region interfaces.
  93. *
  94. * For pages in filesystems where the blocksize is smaller than the
  95. * pagesize, we use the page->private field (long) to hold a bitmap
  96. * of uptodate regions within the page.
  97. *
  98. * Each such region is "bytes per page / bits per long" bytes long.
  99. *
  100. * NBPPR == number-of-bytes-per-page-region
  101. * BTOPR == bytes-to-page-region (rounded up)
  102. * BTOPRT == bytes-to-page-region-truncated (rounded down)
  103. */
  104. #if (BITS_PER_LONG == 32)
  105. #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
  106. #elif (BITS_PER_LONG == 64)
  107. #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
  108. #else
  109. #error BITS_PER_LONG must be 32 or 64
  110. #endif
  111. #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
  112. #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
  113. #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
  114. STATIC unsigned long
  115. page_region_mask(
  116. size_t offset,
  117. size_t length)
  118. {
  119. unsigned long mask;
  120. int first, final;
  121. first = BTOPR(offset);
  122. final = BTOPRT(offset + length - 1);
  123. first = min(first, final);
  124. mask = ~0UL;
  125. mask <<= BITS_PER_LONG - (final - first);
  126. mask >>= BITS_PER_LONG - (final);
  127. ASSERT(offset + length <= PAGE_CACHE_SIZE);
  128. ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
  129. return mask;
  130. }
  131. STATIC_INLINE void
  132. set_page_region(
  133. struct page *page,
  134. size_t offset,
  135. size_t length)
  136. {
  137. set_page_private(page,
  138. page_private(page) | page_region_mask(offset, length));
  139. if (page_private(page) == ~0UL)
  140. SetPageUptodate(page);
  141. }
  142. STATIC_INLINE int
  143. test_page_region(
  144. struct page *page,
  145. size_t offset,
  146. size_t length)
  147. {
  148. unsigned long mask = page_region_mask(offset, length);
  149. return (mask && (page_private(page) & mask) == mask);
  150. }
  151. /*
  152. * Mapping of multi-page buffers into contiguous virtual space
  153. */
  154. typedef struct a_list {
  155. void *vm_addr;
  156. struct a_list *next;
  157. } a_list_t;
  158. static a_list_t *as_free_head;
  159. static int as_list_len;
  160. static DEFINE_SPINLOCK(as_lock);
  161. /*
  162. * Try to batch vunmaps because they are costly.
  163. */
  164. STATIC void
  165. free_address(
  166. void *addr)
  167. {
  168. a_list_t *aentry;
  169. #ifdef CONFIG_XEN
  170. /*
  171. * Xen needs to be able to make sure it can get an exclusive
  172. * RO mapping of pages it wants to turn into a pagetable. If
  173. * a newly allocated page is also still being vmap()ed by xfs,
  174. * it will cause pagetable construction to fail. This is a
  175. * quick workaround to always eagerly unmap pages so that Xen
  176. * is happy.
  177. */
  178. vunmap(addr);
  179. return;
  180. #endif
  181. aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
  182. if (likely(aentry)) {
  183. spin_lock(&as_lock);
  184. aentry->next = as_free_head;
  185. aentry->vm_addr = addr;
  186. as_free_head = aentry;
  187. as_list_len++;
  188. spin_unlock(&as_lock);
  189. } else {
  190. vunmap(addr);
  191. }
  192. }
  193. STATIC void
  194. purge_addresses(void)
  195. {
  196. a_list_t *aentry, *old;
  197. if (as_free_head == NULL)
  198. return;
  199. spin_lock(&as_lock);
  200. aentry = as_free_head;
  201. as_free_head = NULL;
  202. as_list_len = 0;
  203. spin_unlock(&as_lock);
  204. while ((old = aentry) != NULL) {
  205. vunmap(aentry->vm_addr);
  206. aentry = aentry->next;
  207. kfree(old);
  208. }
  209. }
  210. /*
  211. * Internal xfs_buf_t object manipulation
  212. */
  213. STATIC void
  214. _xfs_buf_initialize(
  215. xfs_buf_t *bp,
  216. xfs_buftarg_t *target,
  217. xfs_off_t range_base,
  218. size_t range_length,
  219. xfs_buf_flags_t flags)
  220. {
  221. /*
  222. * We don't want certain flags to appear in b_flags.
  223. */
  224. flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
  225. memset(bp, 0, sizeof(xfs_buf_t));
  226. atomic_set(&bp->b_hold, 1);
  227. init_MUTEX_LOCKED(&bp->b_iodonesema);
  228. INIT_LIST_HEAD(&bp->b_list);
  229. INIT_LIST_HEAD(&bp->b_hash_list);
  230. init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
  231. XB_SET_OWNER(bp);
  232. bp->b_target = target;
  233. bp->b_file_offset = range_base;
  234. /*
  235. * Set buffer_length and count_desired to the same value initially.
  236. * I/O routines should use count_desired, which will be the same in
  237. * most cases but may be reset (e.g. XFS recovery).
  238. */
  239. bp->b_buffer_length = bp->b_count_desired = range_length;
  240. bp->b_flags = flags;
  241. bp->b_bn = XFS_BUF_DADDR_NULL;
  242. atomic_set(&bp->b_pin_count, 0);
  243. init_waitqueue_head(&bp->b_waiters);
  244. XFS_STATS_INC(xb_create);
  245. XB_TRACE(bp, "initialize", target);
  246. }
  247. /*
  248. * Allocate a page array capable of holding a specified number
  249. * of pages, and point the page buf at it.
  250. */
  251. STATIC int
  252. _xfs_buf_get_pages(
  253. xfs_buf_t *bp,
  254. int page_count,
  255. xfs_buf_flags_t flags)
  256. {
  257. /* Make sure that we have a page list */
  258. if (bp->b_pages == NULL) {
  259. bp->b_offset = xfs_buf_poff(bp->b_file_offset);
  260. bp->b_page_count = page_count;
  261. if (page_count <= XB_PAGES) {
  262. bp->b_pages = bp->b_page_array;
  263. } else {
  264. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  265. page_count, xb_to_km(flags));
  266. if (bp->b_pages == NULL)
  267. return -ENOMEM;
  268. }
  269. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  270. }
  271. return 0;
  272. }
  273. /*
  274. * Frees b_pages if it was allocated.
  275. */
  276. STATIC void
  277. _xfs_buf_free_pages(
  278. xfs_buf_t *bp)
  279. {
  280. if (bp->b_pages != bp->b_page_array) {
  281. kmem_free(bp->b_pages,
  282. bp->b_page_count * sizeof(struct page *));
  283. }
  284. }
  285. /*
  286. * Releases the specified buffer.
  287. *
  288. * The modification state of any associated pages is left unchanged.
  289. * The buffer most not be on any hash - use xfs_buf_rele instead for
  290. * hashed and refcounted buffers
  291. */
  292. void
  293. xfs_buf_free(
  294. xfs_buf_t *bp)
  295. {
  296. XB_TRACE(bp, "free", 0);
  297. ASSERT(list_empty(&bp->b_hash_list));
  298. if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
  299. uint i;
  300. if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
  301. free_address(bp->b_addr - bp->b_offset);
  302. for (i = 0; i < bp->b_page_count; i++) {
  303. struct page *page = bp->b_pages[i];
  304. if (bp->b_flags & _XBF_PAGE_CACHE)
  305. ASSERT(!PagePrivate(page));
  306. page_cache_release(page);
  307. }
  308. _xfs_buf_free_pages(bp);
  309. }
  310. xfs_buf_deallocate(bp);
  311. }
  312. /*
  313. * Finds all pages for buffer in question and builds it's page list.
  314. */
  315. STATIC int
  316. _xfs_buf_lookup_pages(
  317. xfs_buf_t *bp,
  318. uint flags)
  319. {
  320. struct address_space *mapping = bp->b_target->bt_mapping;
  321. size_t blocksize = bp->b_target->bt_bsize;
  322. size_t size = bp->b_count_desired;
  323. size_t nbytes, offset;
  324. gfp_t gfp_mask = xb_to_gfp(flags);
  325. unsigned short page_count, i;
  326. pgoff_t first;
  327. xfs_off_t end;
  328. int error;
  329. end = bp->b_file_offset + bp->b_buffer_length;
  330. page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
  331. error = _xfs_buf_get_pages(bp, page_count, flags);
  332. if (unlikely(error))
  333. return error;
  334. bp->b_flags |= _XBF_PAGE_CACHE;
  335. offset = bp->b_offset;
  336. first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
  337. for (i = 0; i < bp->b_page_count; i++) {
  338. struct page *page;
  339. uint retries = 0;
  340. retry:
  341. page = find_or_create_page(mapping, first + i, gfp_mask);
  342. if (unlikely(page == NULL)) {
  343. if (flags & XBF_READ_AHEAD) {
  344. bp->b_page_count = i;
  345. return -ENOMEM;
  346. }
  347. /*
  348. * This could deadlock.
  349. *
  350. * But until all the XFS lowlevel code is revamped to
  351. * handle buffer allocation failures we can't do much.
  352. */
  353. if (!(++retries % 100))
  354. printk(KERN_ERR
  355. "XFS: possible memory allocation "
  356. "deadlock in %s (mode:0x%x)\n",
  357. __FUNCTION__, gfp_mask);
  358. XFS_STATS_INC(xb_page_retries);
  359. xfsbufd_wakeup(0, gfp_mask);
  360. congestion_wait(WRITE, HZ/50);
  361. goto retry;
  362. }
  363. XFS_STATS_INC(xb_page_found);
  364. nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
  365. size -= nbytes;
  366. ASSERT(!PagePrivate(page));
  367. if (!PageUptodate(page)) {
  368. page_count--;
  369. if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
  370. if (test_page_region(page, offset, nbytes))
  371. page_count++;
  372. }
  373. }
  374. unlock_page(page);
  375. bp->b_pages[i] = page;
  376. offset = 0;
  377. }
  378. if (page_count == bp->b_page_count)
  379. bp->b_flags |= XBF_DONE;
  380. XB_TRACE(bp, "lookup_pages", (long)page_count);
  381. return error;
  382. }
  383. /*
  384. * Map buffer into kernel address-space if nessecary.
  385. */
  386. STATIC int
  387. _xfs_buf_map_pages(
  388. xfs_buf_t *bp,
  389. uint flags)
  390. {
  391. /* A single page buffer is always mappable */
  392. if (bp->b_page_count == 1) {
  393. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  394. bp->b_flags |= XBF_MAPPED;
  395. } else if (flags & XBF_MAPPED) {
  396. if (as_list_len > 64)
  397. purge_addresses();
  398. bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
  399. VM_MAP, PAGE_KERNEL);
  400. if (unlikely(bp->b_addr == NULL))
  401. return -ENOMEM;
  402. bp->b_addr += bp->b_offset;
  403. bp->b_flags |= XBF_MAPPED;
  404. }
  405. return 0;
  406. }
  407. /*
  408. * Finding and Reading Buffers
  409. */
  410. /*
  411. * Look up, and creates if absent, a lockable buffer for
  412. * a given range of an inode. The buffer is returned
  413. * locked. If other overlapping buffers exist, they are
  414. * released before the new buffer is created and locked,
  415. * which may imply that this call will block until those buffers
  416. * are unlocked. No I/O is implied by this call.
  417. */
  418. xfs_buf_t *
  419. _xfs_buf_find(
  420. xfs_buftarg_t *btp, /* block device target */
  421. xfs_off_t ioff, /* starting offset of range */
  422. size_t isize, /* length of range */
  423. xfs_buf_flags_t flags,
  424. xfs_buf_t *new_bp)
  425. {
  426. xfs_off_t range_base;
  427. size_t range_length;
  428. xfs_bufhash_t *hash;
  429. xfs_buf_t *bp, *n;
  430. range_base = (ioff << BBSHIFT);
  431. range_length = (isize << BBSHIFT);
  432. /* Check for IOs smaller than the sector size / not sector aligned */
  433. ASSERT(!(range_length < (1 << btp->bt_sshift)));
  434. ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
  435. hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
  436. spin_lock(&hash->bh_lock);
  437. list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
  438. ASSERT(btp == bp->b_target);
  439. if (bp->b_file_offset == range_base &&
  440. bp->b_buffer_length == range_length) {
  441. /*
  442. * If we look at something, bring it to the
  443. * front of the list for next time.
  444. */
  445. atomic_inc(&bp->b_hold);
  446. list_move(&bp->b_hash_list, &hash->bh_list);
  447. goto found;
  448. }
  449. }
  450. /* No match found */
  451. if (new_bp) {
  452. _xfs_buf_initialize(new_bp, btp, range_base,
  453. range_length, flags);
  454. new_bp->b_hash = hash;
  455. list_add(&new_bp->b_hash_list, &hash->bh_list);
  456. } else {
  457. XFS_STATS_INC(xb_miss_locked);
  458. }
  459. spin_unlock(&hash->bh_lock);
  460. return new_bp;
  461. found:
  462. spin_unlock(&hash->bh_lock);
  463. /* Attempt to get the semaphore without sleeping,
  464. * if this does not work then we need to drop the
  465. * spinlock and do a hard attempt on the semaphore.
  466. */
  467. if (down_trylock(&bp->b_sema)) {
  468. if (!(flags & XBF_TRYLOCK)) {
  469. /* wait for buffer ownership */
  470. XB_TRACE(bp, "get_lock", 0);
  471. xfs_buf_lock(bp);
  472. XFS_STATS_INC(xb_get_locked_waited);
  473. } else {
  474. /* We asked for a trylock and failed, no need
  475. * to look at file offset and length here, we
  476. * know that this buffer at least overlaps our
  477. * buffer and is locked, therefore our buffer
  478. * either does not exist, or is this buffer.
  479. */
  480. xfs_buf_rele(bp);
  481. XFS_STATS_INC(xb_busy_locked);
  482. return NULL;
  483. }
  484. } else {
  485. /* trylock worked */
  486. XB_SET_OWNER(bp);
  487. }
  488. if (bp->b_flags & XBF_STALE) {
  489. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  490. bp->b_flags &= XBF_MAPPED;
  491. }
  492. XB_TRACE(bp, "got_lock", 0);
  493. XFS_STATS_INC(xb_get_locked);
  494. return bp;
  495. }
  496. /*
  497. * Assembles a buffer covering the specified range.
  498. * Storage in memory for all portions of the buffer will be allocated,
  499. * although backing storage may not be.
  500. */
  501. xfs_buf_t *
  502. xfs_buf_get_flags(
  503. xfs_buftarg_t *target,/* target for buffer */
  504. xfs_off_t ioff, /* starting offset of range */
  505. size_t isize, /* length of range */
  506. xfs_buf_flags_t flags)
  507. {
  508. xfs_buf_t *bp, *new_bp;
  509. int error = 0, i;
  510. new_bp = xfs_buf_allocate(flags);
  511. if (unlikely(!new_bp))
  512. return NULL;
  513. bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
  514. if (bp == new_bp) {
  515. error = _xfs_buf_lookup_pages(bp, flags);
  516. if (error)
  517. goto no_buffer;
  518. } else {
  519. xfs_buf_deallocate(new_bp);
  520. if (unlikely(bp == NULL))
  521. return NULL;
  522. }
  523. for (i = 0; i < bp->b_page_count; i++)
  524. mark_page_accessed(bp->b_pages[i]);
  525. if (!(bp->b_flags & XBF_MAPPED)) {
  526. error = _xfs_buf_map_pages(bp, flags);
  527. if (unlikely(error)) {
  528. printk(KERN_WARNING "%s: failed to map pages\n",
  529. __FUNCTION__);
  530. goto no_buffer;
  531. }
  532. }
  533. XFS_STATS_INC(xb_get);
  534. /*
  535. * Always fill in the block number now, the mapped cases can do
  536. * their own overlay of this later.
  537. */
  538. bp->b_bn = ioff;
  539. bp->b_count_desired = bp->b_buffer_length;
  540. XB_TRACE(bp, "get", (unsigned long)flags);
  541. return bp;
  542. no_buffer:
  543. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  544. xfs_buf_unlock(bp);
  545. xfs_buf_rele(bp);
  546. return NULL;
  547. }
  548. xfs_buf_t *
  549. xfs_buf_read_flags(
  550. xfs_buftarg_t *target,
  551. xfs_off_t ioff,
  552. size_t isize,
  553. xfs_buf_flags_t flags)
  554. {
  555. xfs_buf_t *bp;
  556. flags |= XBF_READ;
  557. bp = xfs_buf_get_flags(target, ioff, isize, flags);
  558. if (bp) {
  559. if (!XFS_BUF_ISDONE(bp)) {
  560. XB_TRACE(bp, "read", (unsigned long)flags);
  561. XFS_STATS_INC(xb_get_read);
  562. xfs_buf_iostart(bp, flags);
  563. } else if (flags & XBF_ASYNC) {
  564. XB_TRACE(bp, "read_async", (unsigned long)flags);
  565. /*
  566. * Read ahead call which is already satisfied,
  567. * drop the buffer
  568. */
  569. goto no_buffer;
  570. } else {
  571. XB_TRACE(bp, "read_done", (unsigned long)flags);
  572. /* We do not want read in the flags */
  573. bp->b_flags &= ~XBF_READ;
  574. }
  575. }
  576. return bp;
  577. no_buffer:
  578. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  579. xfs_buf_unlock(bp);
  580. xfs_buf_rele(bp);
  581. return NULL;
  582. }
  583. /*
  584. * If we are not low on memory then do the readahead in a deadlock
  585. * safe manner.
  586. */
  587. void
  588. xfs_buf_readahead(
  589. xfs_buftarg_t *target,
  590. xfs_off_t ioff,
  591. size_t isize,
  592. xfs_buf_flags_t flags)
  593. {
  594. struct backing_dev_info *bdi;
  595. bdi = target->bt_mapping->backing_dev_info;
  596. if (bdi_read_congested(bdi))
  597. return;
  598. flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
  599. xfs_buf_read_flags(target, ioff, isize, flags);
  600. }
  601. xfs_buf_t *
  602. xfs_buf_get_empty(
  603. size_t len,
  604. xfs_buftarg_t *target)
  605. {
  606. xfs_buf_t *bp;
  607. bp = xfs_buf_allocate(0);
  608. if (bp)
  609. _xfs_buf_initialize(bp, target, 0, len, 0);
  610. return bp;
  611. }
  612. static inline struct page *
  613. mem_to_page(
  614. void *addr)
  615. {
  616. if (((unsigned long)addr < VMALLOC_START) ||
  617. ((unsigned long)addr >= VMALLOC_END)) {
  618. return virt_to_page(addr);
  619. } else {
  620. return vmalloc_to_page(addr);
  621. }
  622. }
  623. int
  624. xfs_buf_associate_memory(
  625. xfs_buf_t *bp,
  626. void *mem,
  627. size_t len)
  628. {
  629. int rval;
  630. int i = 0;
  631. unsigned long pageaddr;
  632. unsigned long offset;
  633. size_t buflen;
  634. int page_count;
  635. pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
  636. offset = (unsigned long)mem - pageaddr;
  637. buflen = PAGE_CACHE_ALIGN(len + offset);
  638. page_count = buflen >> PAGE_CACHE_SHIFT;
  639. /* Free any previous set of page pointers */
  640. if (bp->b_pages)
  641. _xfs_buf_free_pages(bp);
  642. bp->b_pages = NULL;
  643. bp->b_addr = mem;
  644. rval = _xfs_buf_get_pages(bp, page_count, 0);
  645. if (rval)
  646. return rval;
  647. bp->b_offset = offset;
  648. for (i = 0; i < bp->b_page_count; i++) {
  649. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  650. pageaddr += PAGE_CACHE_SIZE;
  651. }
  652. bp->b_count_desired = len;
  653. bp->b_buffer_length = buflen;
  654. bp->b_flags |= XBF_MAPPED;
  655. return 0;
  656. }
  657. xfs_buf_t *
  658. xfs_buf_get_noaddr(
  659. size_t len,
  660. xfs_buftarg_t *target)
  661. {
  662. unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
  663. int error, i;
  664. xfs_buf_t *bp;
  665. bp = xfs_buf_allocate(0);
  666. if (unlikely(bp == NULL))
  667. goto fail;
  668. _xfs_buf_initialize(bp, target, 0, len, 0);
  669. error = _xfs_buf_get_pages(bp, page_count, 0);
  670. if (error)
  671. goto fail_free_buf;
  672. for (i = 0; i < page_count; i++) {
  673. bp->b_pages[i] = alloc_page(GFP_KERNEL);
  674. if (!bp->b_pages[i])
  675. goto fail_free_mem;
  676. }
  677. bp->b_flags |= _XBF_PAGES;
  678. error = _xfs_buf_map_pages(bp, XBF_MAPPED);
  679. if (unlikely(error)) {
  680. printk(KERN_WARNING "%s: failed to map pages\n",
  681. __FUNCTION__);
  682. goto fail_free_mem;
  683. }
  684. xfs_buf_unlock(bp);
  685. XB_TRACE(bp, "no_daddr", len);
  686. return bp;
  687. fail_free_mem:
  688. while (--i >= 0)
  689. __free_page(bp->b_pages[i]);
  690. _xfs_buf_free_pages(bp);
  691. fail_free_buf:
  692. xfs_buf_deallocate(bp);
  693. fail:
  694. return NULL;
  695. }
  696. /*
  697. * Increment reference count on buffer, to hold the buffer concurrently
  698. * with another thread which may release (free) the buffer asynchronously.
  699. * Must hold the buffer already to call this function.
  700. */
  701. void
  702. xfs_buf_hold(
  703. xfs_buf_t *bp)
  704. {
  705. atomic_inc(&bp->b_hold);
  706. XB_TRACE(bp, "hold", 0);
  707. }
  708. /*
  709. * Releases a hold on the specified buffer. If the
  710. * the hold count is 1, calls xfs_buf_free.
  711. */
  712. void
  713. xfs_buf_rele(
  714. xfs_buf_t *bp)
  715. {
  716. xfs_bufhash_t *hash = bp->b_hash;
  717. XB_TRACE(bp, "rele", bp->b_relse);
  718. if (unlikely(!hash)) {
  719. ASSERT(!bp->b_relse);
  720. if (atomic_dec_and_test(&bp->b_hold))
  721. xfs_buf_free(bp);
  722. return;
  723. }
  724. if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
  725. if (bp->b_relse) {
  726. atomic_inc(&bp->b_hold);
  727. spin_unlock(&hash->bh_lock);
  728. (*(bp->b_relse)) (bp);
  729. } else if (bp->b_flags & XBF_FS_MANAGED) {
  730. spin_unlock(&hash->bh_lock);
  731. } else {
  732. ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
  733. list_del_init(&bp->b_hash_list);
  734. spin_unlock(&hash->bh_lock);
  735. xfs_buf_free(bp);
  736. }
  737. } else {
  738. /*
  739. * Catch reference count leaks
  740. */
  741. ASSERT(atomic_read(&bp->b_hold) >= 0);
  742. }
  743. }
  744. /*
  745. * Mutual exclusion on buffers. Locking model:
  746. *
  747. * Buffers associated with inodes for which buffer locking
  748. * is not enabled are not protected by semaphores, and are
  749. * assumed to be exclusively owned by the caller. There is a
  750. * spinlock in the buffer, used by the caller when concurrent
  751. * access is possible.
  752. */
  753. /*
  754. * Locks a buffer object, if it is not already locked.
  755. * Note that this in no way locks the underlying pages, so it is only
  756. * useful for synchronizing concurrent use of buffer objects, not for
  757. * synchronizing independent access to the underlying pages.
  758. */
  759. int
  760. xfs_buf_cond_lock(
  761. xfs_buf_t *bp)
  762. {
  763. int locked;
  764. locked = down_trylock(&bp->b_sema) == 0;
  765. if (locked) {
  766. XB_SET_OWNER(bp);
  767. }
  768. XB_TRACE(bp, "cond_lock", (long)locked);
  769. return locked ? 0 : -EBUSY;
  770. }
  771. #if defined(DEBUG) || defined(XFS_BLI_TRACE)
  772. int
  773. xfs_buf_lock_value(
  774. xfs_buf_t *bp)
  775. {
  776. return atomic_read(&bp->b_sema.count);
  777. }
  778. #endif
  779. /*
  780. * Locks a buffer object.
  781. * Note that this in no way locks the underlying pages, so it is only
  782. * useful for synchronizing concurrent use of buffer objects, not for
  783. * synchronizing independent access to the underlying pages.
  784. */
  785. void
  786. xfs_buf_lock(
  787. xfs_buf_t *bp)
  788. {
  789. XB_TRACE(bp, "lock", 0);
  790. if (atomic_read(&bp->b_io_remaining))
  791. blk_run_address_space(bp->b_target->bt_mapping);
  792. down(&bp->b_sema);
  793. XB_SET_OWNER(bp);
  794. XB_TRACE(bp, "locked", 0);
  795. }
  796. /*
  797. * Releases the lock on the buffer object.
  798. * If the buffer is marked delwri but is not queued, do so before we
  799. * unlock the buffer as we need to set flags correctly. We also need to
  800. * take a reference for the delwri queue because the unlocker is going to
  801. * drop their's and they don't know we just queued it.
  802. */
  803. void
  804. xfs_buf_unlock(
  805. xfs_buf_t *bp)
  806. {
  807. if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
  808. atomic_inc(&bp->b_hold);
  809. bp->b_flags |= XBF_ASYNC;
  810. xfs_buf_delwri_queue(bp, 0);
  811. }
  812. XB_CLEAR_OWNER(bp);
  813. up(&bp->b_sema);
  814. XB_TRACE(bp, "unlock", 0);
  815. }
  816. /*
  817. * Pinning Buffer Storage in Memory
  818. * Ensure that no attempt to force a buffer to disk will succeed.
  819. */
  820. void
  821. xfs_buf_pin(
  822. xfs_buf_t *bp)
  823. {
  824. atomic_inc(&bp->b_pin_count);
  825. XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
  826. }
  827. void
  828. xfs_buf_unpin(
  829. xfs_buf_t *bp)
  830. {
  831. if (atomic_dec_and_test(&bp->b_pin_count))
  832. wake_up_all(&bp->b_waiters);
  833. XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
  834. }
  835. int
  836. xfs_buf_ispin(
  837. xfs_buf_t *bp)
  838. {
  839. return atomic_read(&bp->b_pin_count);
  840. }
  841. STATIC void
  842. xfs_buf_wait_unpin(
  843. xfs_buf_t *bp)
  844. {
  845. DECLARE_WAITQUEUE (wait, current);
  846. if (atomic_read(&bp->b_pin_count) == 0)
  847. return;
  848. add_wait_queue(&bp->b_waiters, &wait);
  849. for (;;) {
  850. set_current_state(TASK_UNINTERRUPTIBLE);
  851. if (atomic_read(&bp->b_pin_count) == 0)
  852. break;
  853. if (atomic_read(&bp->b_io_remaining))
  854. blk_run_address_space(bp->b_target->bt_mapping);
  855. schedule();
  856. }
  857. remove_wait_queue(&bp->b_waiters, &wait);
  858. set_current_state(TASK_RUNNING);
  859. }
  860. /*
  861. * Buffer Utility Routines
  862. */
  863. STATIC void
  864. xfs_buf_iodone_work(
  865. struct work_struct *work)
  866. {
  867. xfs_buf_t *bp =
  868. container_of(work, xfs_buf_t, b_iodone_work);
  869. /*
  870. * We can get an EOPNOTSUPP to ordered writes. Here we clear the
  871. * ordered flag and reissue them. Because we can't tell the higher
  872. * layers directly that they should not issue ordered I/O anymore, they
  873. * need to check if the ordered flag was cleared during I/O completion.
  874. */
  875. if ((bp->b_error == EOPNOTSUPP) &&
  876. (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
  877. XB_TRACE(bp, "ordered_retry", bp->b_iodone);
  878. bp->b_flags &= ~XBF_ORDERED;
  879. xfs_buf_iorequest(bp);
  880. } else if (bp->b_iodone)
  881. (*(bp->b_iodone))(bp);
  882. else if (bp->b_flags & XBF_ASYNC)
  883. xfs_buf_relse(bp);
  884. }
  885. void
  886. xfs_buf_ioend(
  887. xfs_buf_t *bp,
  888. int schedule)
  889. {
  890. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  891. if (bp->b_error == 0)
  892. bp->b_flags |= XBF_DONE;
  893. XB_TRACE(bp, "iodone", bp->b_iodone);
  894. if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
  895. if (schedule) {
  896. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  897. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  898. } else {
  899. xfs_buf_iodone_work(&bp->b_iodone_work);
  900. }
  901. } else {
  902. up(&bp->b_iodonesema);
  903. }
  904. }
  905. void
  906. xfs_buf_ioerror(
  907. xfs_buf_t *bp,
  908. int error)
  909. {
  910. ASSERT(error >= 0 && error <= 0xffff);
  911. bp->b_error = (unsigned short)error;
  912. XB_TRACE(bp, "ioerror", (unsigned long)error);
  913. }
  914. /*
  915. * Initiate I/O on a buffer, based on the flags supplied.
  916. * The b_iodone routine in the buffer supplied will only be called
  917. * when all of the subsidiary I/O requests, if any, have been completed.
  918. */
  919. int
  920. xfs_buf_iostart(
  921. xfs_buf_t *bp,
  922. xfs_buf_flags_t flags)
  923. {
  924. int status = 0;
  925. XB_TRACE(bp, "iostart", (unsigned long)flags);
  926. if (flags & XBF_DELWRI) {
  927. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
  928. bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
  929. xfs_buf_delwri_queue(bp, 1);
  930. return status;
  931. }
  932. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
  933. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  934. bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
  935. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  936. BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
  937. /* For writes allow an alternate strategy routine to precede
  938. * the actual I/O request (which may not be issued at all in
  939. * a shutdown situation, for example).
  940. */
  941. status = (flags & XBF_WRITE) ?
  942. xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
  943. /* Wait for I/O if we are not an async request.
  944. * Note: async I/O request completion will release the buffer,
  945. * and that can already be done by this point. So using the
  946. * buffer pointer from here on, after async I/O, is invalid.
  947. */
  948. if (!status && !(flags & XBF_ASYNC))
  949. status = xfs_buf_iowait(bp);
  950. return status;
  951. }
  952. STATIC_INLINE void
  953. _xfs_buf_ioend(
  954. xfs_buf_t *bp,
  955. int schedule)
  956. {
  957. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  958. xfs_buf_ioend(bp, schedule);
  959. }
  960. STATIC void
  961. xfs_buf_bio_end_io(
  962. struct bio *bio,
  963. int error)
  964. {
  965. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  966. unsigned int blocksize = bp->b_target->bt_bsize;
  967. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  968. if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
  969. bp->b_error = EIO;
  970. do {
  971. struct page *page = bvec->bv_page;
  972. ASSERT(!PagePrivate(page));
  973. if (unlikely(bp->b_error)) {
  974. if (bp->b_flags & XBF_READ)
  975. ClearPageUptodate(page);
  976. } else if (blocksize >= PAGE_CACHE_SIZE) {
  977. SetPageUptodate(page);
  978. } else if (!PagePrivate(page) &&
  979. (bp->b_flags & _XBF_PAGE_CACHE)) {
  980. set_page_region(page, bvec->bv_offset, bvec->bv_len);
  981. }
  982. if (--bvec >= bio->bi_io_vec)
  983. prefetchw(&bvec->bv_page->flags);
  984. } while (bvec >= bio->bi_io_vec);
  985. _xfs_buf_ioend(bp, 1);
  986. bio_put(bio);
  987. }
  988. STATIC void
  989. _xfs_buf_ioapply(
  990. xfs_buf_t *bp)
  991. {
  992. int rw, map_i, total_nr_pages, nr_pages;
  993. struct bio *bio;
  994. int offset = bp->b_offset;
  995. int size = bp->b_count_desired;
  996. sector_t sector = bp->b_bn;
  997. unsigned int blocksize = bp->b_target->bt_bsize;
  998. total_nr_pages = bp->b_page_count;
  999. map_i = 0;
  1000. if (bp->b_flags & XBF_ORDERED) {
  1001. ASSERT(!(bp->b_flags & XBF_READ));
  1002. rw = WRITE_BARRIER;
  1003. } else if (bp->b_flags & _XBF_RUN_QUEUES) {
  1004. ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
  1005. bp->b_flags &= ~_XBF_RUN_QUEUES;
  1006. rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
  1007. } else {
  1008. rw = (bp->b_flags & XBF_WRITE) ? WRITE :
  1009. (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
  1010. }
  1011. /* Special code path for reading a sub page size buffer in --
  1012. * we populate up the whole page, and hence the other metadata
  1013. * in the same page. This optimization is only valid when the
  1014. * filesystem block size is not smaller than the page size.
  1015. */
  1016. if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
  1017. (bp->b_flags & XBF_READ) &&
  1018. (blocksize >= PAGE_CACHE_SIZE)) {
  1019. bio = bio_alloc(GFP_NOIO, 1);
  1020. bio->bi_bdev = bp->b_target->bt_bdev;
  1021. bio->bi_sector = sector - (offset >> BBSHIFT);
  1022. bio->bi_end_io = xfs_buf_bio_end_io;
  1023. bio->bi_private = bp;
  1024. bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
  1025. size = 0;
  1026. atomic_inc(&bp->b_io_remaining);
  1027. goto submit_io;
  1028. }
  1029. next_chunk:
  1030. atomic_inc(&bp->b_io_remaining);
  1031. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1032. if (nr_pages > total_nr_pages)
  1033. nr_pages = total_nr_pages;
  1034. bio = bio_alloc(GFP_NOIO, nr_pages);
  1035. bio->bi_bdev = bp->b_target->bt_bdev;
  1036. bio->bi_sector = sector;
  1037. bio->bi_end_io = xfs_buf_bio_end_io;
  1038. bio->bi_private = bp;
  1039. for (; size && nr_pages; nr_pages--, map_i++) {
  1040. int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
  1041. if (nbytes > size)
  1042. nbytes = size;
  1043. rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
  1044. if (rbytes < nbytes)
  1045. break;
  1046. offset = 0;
  1047. sector += nbytes >> BBSHIFT;
  1048. size -= nbytes;
  1049. total_nr_pages--;
  1050. }
  1051. submit_io:
  1052. if (likely(bio->bi_size)) {
  1053. submit_bio(rw, bio);
  1054. if (size)
  1055. goto next_chunk;
  1056. } else {
  1057. bio_put(bio);
  1058. xfs_buf_ioerror(bp, EIO);
  1059. }
  1060. }
  1061. int
  1062. xfs_buf_iorequest(
  1063. xfs_buf_t *bp)
  1064. {
  1065. XB_TRACE(bp, "iorequest", 0);
  1066. if (bp->b_flags & XBF_DELWRI) {
  1067. xfs_buf_delwri_queue(bp, 1);
  1068. return 0;
  1069. }
  1070. if (bp->b_flags & XBF_WRITE) {
  1071. xfs_buf_wait_unpin(bp);
  1072. }
  1073. xfs_buf_hold(bp);
  1074. /* Set the count to 1 initially, this will stop an I/O
  1075. * completion callout which happens before we have started
  1076. * all the I/O from calling xfs_buf_ioend too early.
  1077. */
  1078. atomic_set(&bp->b_io_remaining, 1);
  1079. _xfs_buf_ioapply(bp);
  1080. _xfs_buf_ioend(bp, 0);
  1081. xfs_buf_rele(bp);
  1082. return 0;
  1083. }
  1084. /*
  1085. * Waits for I/O to complete on the buffer supplied.
  1086. * It returns immediately if no I/O is pending.
  1087. * It returns the I/O error code, if any, or 0 if there was no error.
  1088. */
  1089. int
  1090. xfs_buf_iowait(
  1091. xfs_buf_t *bp)
  1092. {
  1093. XB_TRACE(bp, "iowait", 0);
  1094. if (atomic_read(&bp->b_io_remaining))
  1095. blk_run_address_space(bp->b_target->bt_mapping);
  1096. down(&bp->b_iodonesema);
  1097. XB_TRACE(bp, "iowaited", (long)bp->b_error);
  1098. return bp->b_error;
  1099. }
  1100. xfs_caddr_t
  1101. xfs_buf_offset(
  1102. xfs_buf_t *bp,
  1103. size_t offset)
  1104. {
  1105. struct page *page;
  1106. if (bp->b_flags & XBF_MAPPED)
  1107. return XFS_BUF_PTR(bp) + offset;
  1108. offset += bp->b_offset;
  1109. page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
  1110. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
  1111. }
  1112. /*
  1113. * Move data into or out of a buffer.
  1114. */
  1115. void
  1116. xfs_buf_iomove(
  1117. xfs_buf_t *bp, /* buffer to process */
  1118. size_t boff, /* starting buffer offset */
  1119. size_t bsize, /* length to copy */
  1120. caddr_t data, /* data address */
  1121. xfs_buf_rw_t mode) /* read/write/zero flag */
  1122. {
  1123. size_t bend, cpoff, csize;
  1124. struct page *page;
  1125. bend = boff + bsize;
  1126. while (boff < bend) {
  1127. page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
  1128. cpoff = xfs_buf_poff(boff + bp->b_offset);
  1129. csize = min_t(size_t,
  1130. PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
  1131. ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
  1132. switch (mode) {
  1133. case XBRW_ZERO:
  1134. memset(page_address(page) + cpoff, 0, csize);
  1135. break;
  1136. case XBRW_READ:
  1137. memcpy(data, page_address(page) + cpoff, csize);
  1138. break;
  1139. case XBRW_WRITE:
  1140. memcpy(page_address(page) + cpoff, data, csize);
  1141. }
  1142. boff += csize;
  1143. data += csize;
  1144. }
  1145. }
  1146. /*
  1147. * Handling of buffer targets (buftargs).
  1148. */
  1149. /*
  1150. * Wait for any bufs with callbacks that have been submitted but
  1151. * have not yet returned... walk the hash list for the target.
  1152. */
  1153. void
  1154. xfs_wait_buftarg(
  1155. xfs_buftarg_t *btp)
  1156. {
  1157. xfs_buf_t *bp, *n;
  1158. xfs_bufhash_t *hash;
  1159. uint i;
  1160. for (i = 0; i < (1 << btp->bt_hashshift); i++) {
  1161. hash = &btp->bt_hash[i];
  1162. again:
  1163. spin_lock(&hash->bh_lock);
  1164. list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
  1165. ASSERT(btp == bp->b_target);
  1166. if (!(bp->b_flags & XBF_FS_MANAGED)) {
  1167. spin_unlock(&hash->bh_lock);
  1168. /*
  1169. * Catch superblock reference count leaks
  1170. * immediately
  1171. */
  1172. BUG_ON(bp->b_bn == 0);
  1173. delay(100);
  1174. goto again;
  1175. }
  1176. }
  1177. spin_unlock(&hash->bh_lock);
  1178. }
  1179. }
  1180. /*
  1181. * Allocate buffer hash table for a given target.
  1182. * For devices containing metadata (i.e. not the log/realtime devices)
  1183. * we need to allocate a much larger hash table.
  1184. */
  1185. STATIC void
  1186. xfs_alloc_bufhash(
  1187. xfs_buftarg_t *btp,
  1188. int external)
  1189. {
  1190. unsigned int i;
  1191. btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
  1192. btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
  1193. btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
  1194. sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
  1195. for (i = 0; i < (1 << btp->bt_hashshift); i++) {
  1196. spin_lock_init(&btp->bt_hash[i].bh_lock);
  1197. INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
  1198. }
  1199. }
  1200. STATIC void
  1201. xfs_free_bufhash(
  1202. xfs_buftarg_t *btp)
  1203. {
  1204. kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
  1205. btp->bt_hash = NULL;
  1206. }
  1207. /*
  1208. * buftarg list for delwrite queue processing
  1209. */
  1210. static LIST_HEAD(xfs_buftarg_list);
  1211. static DEFINE_SPINLOCK(xfs_buftarg_lock);
  1212. STATIC void
  1213. xfs_register_buftarg(
  1214. xfs_buftarg_t *btp)
  1215. {
  1216. spin_lock(&xfs_buftarg_lock);
  1217. list_add(&btp->bt_list, &xfs_buftarg_list);
  1218. spin_unlock(&xfs_buftarg_lock);
  1219. }
  1220. STATIC void
  1221. xfs_unregister_buftarg(
  1222. xfs_buftarg_t *btp)
  1223. {
  1224. spin_lock(&xfs_buftarg_lock);
  1225. list_del(&btp->bt_list);
  1226. spin_unlock(&xfs_buftarg_lock);
  1227. }
  1228. void
  1229. xfs_free_buftarg(
  1230. xfs_buftarg_t *btp,
  1231. int external)
  1232. {
  1233. xfs_flush_buftarg(btp, 1);
  1234. xfs_blkdev_issue_flush(btp);
  1235. if (external)
  1236. xfs_blkdev_put(btp->bt_bdev);
  1237. xfs_free_bufhash(btp);
  1238. iput(btp->bt_mapping->host);
  1239. /* Unregister the buftarg first so that we don't get a
  1240. * wakeup finding a non-existent task
  1241. */
  1242. xfs_unregister_buftarg(btp);
  1243. kthread_stop(btp->bt_task);
  1244. kmem_free(btp, sizeof(*btp));
  1245. }
  1246. STATIC int
  1247. xfs_setsize_buftarg_flags(
  1248. xfs_buftarg_t *btp,
  1249. unsigned int blocksize,
  1250. unsigned int sectorsize,
  1251. int verbose)
  1252. {
  1253. btp->bt_bsize = blocksize;
  1254. btp->bt_sshift = ffs(sectorsize) - 1;
  1255. btp->bt_smask = sectorsize - 1;
  1256. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1257. printk(KERN_WARNING
  1258. "XFS: Cannot set_blocksize to %u on device %s\n",
  1259. sectorsize, XFS_BUFTARG_NAME(btp));
  1260. return EINVAL;
  1261. }
  1262. if (verbose &&
  1263. (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
  1264. printk(KERN_WARNING
  1265. "XFS: %u byte sectors in use on device %s. "
  1266. "This is suboptimal; %u or greater is ideal.\n",
  1267. sectorsize, XFS_BUFTARG_NAME(btp),
  1268. (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
  1269. }
  1270. return 0;
  1271. }
  1272. /*
  1273. * When allocating the initial buffer target we have not yet
  1274. * read in the superblock, so don't know what sized sectors
  1275. * are being used is at this early stage. Play safe.
  1276. */
  1277. STATIC int
  1278. xfs_setsize_buftarg_early(
  1279. xfs_buftarg_t *btp,
  1280. struct block_device *bdev)
  1281. {
  1282. return xfs_setsize_buftarg_flags(btp,
  1283. PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
  1284. }
  1285. int
  1286. xfs_setsize_buftarg(
  1287. xfs_buftarg_t *btp,
  1288. unsigned int blocksize,
  1289. unsigned int sectorsize)
  1290. {
  1291. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1292. }
  1293. STATIC int
  1294. xfs_mapping_buftarg(
  1295. xfs_buftarg_t *btp,
  1296. struct block_device *bdev)
  1297. {
  1298. struct backing_dev_info *bdi;
  1299. struct inode *inode;
  1300. struct address_space *mapping;
  1301. static const struct address_space_operations mapping_aops = {
  1302. .sync_page = block_sync_page,
  1303. .migratepage = fail_migrate_page,
  1304. };
  1305. inode = new_inode(bdev->bd_inode->i_sb);
  1306. if (!inode) {
  1307. printk(KERN_WARNING
  1308. "XFS: Cannot allocate mapping inode for device %s\n",
  1309. XFS_BUFTARG_NAME(btp));
  1310. return ENOMEM;
  1311. }
  1312. inode->i_mode = S_IFBLK;
  1313. inode->i_bdev = bdev;
  1314. inode->i_rdev = bdev->bd_dev;
  1315. bdi = blk_get_backing_dev_info(bdev);
  1316. if (!bdi)
  1317. bdi = &default_backing_dev_info;
  1318. mapping = &inode->i_data;
  1319. mapping->a_ops = &mapping_aops;
  1320. mapping->backing_dev_info = bdi;
  1321. mapping_set_gfp_mask(mapping, GFP_NOFS);
  1322. btp->bt_mapping = mapping;
  1323. return 0;
  1324. }
  1325. STATIC int
  1326. xfs_alloc_delwrite_queue(
  1327. xfs_buftarg_t *btp)
  1328. {
  1329. int error = 0;
  1330. INIT_LIST_HEAD(&btp->bt_list);
  1331. INIT_LIST_HEAD(&btp->bt_delwrite_queue);
  1332. spin_lock_init(&btp->bt_delwrite_lock);
  1333. btp->bt_flags = 0;
  1334. btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
  1335. if (IS_ERR(btp->bt_task)) {
  1336. error = PTR_ERR(btp->bt_task);
  1337. goto out_error;
  1338. }
  1339. xfs_register_buftarg(btp);
  1340. out_error:
  1341. return error;
  1342. }
  1343. xfs_buftarg_t *
  1344. xfs_alloc_buftarg(
  1345. struct block_device *bdev,
  1346. int external)
  1347. {
  1348. xfs_buftarg_t *btp;
  1349. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
  1350. btp->bt_dev = bdev->bd_dev;
  1351. btp->bt_bdev = bdev;
  1352. if (xfs_setsize_buftarg_early(btp, bdev))
  1353. goto error;
  1354. if (xfs_mapping_buftarg(btp, bdev))
  1355. goto error;
  1356. if (xfs_alloc_delwrite_queue(btp))
  1357. goto error;
  1358. xfs_alloc_bufhash(btp, external);
  1359. return btp;
  1360. error:
  1361. kmem_free(btp, sizeof(*btp));
  1362. return NULL;
  1363. }
  1364. /*
  1365. * Delayed write buffer handling
  1366. */
  1367. STATIC void
  1368. xfs_buf_delwri_queue(
  1369. xfs_buf_t *bp,
  1370. int unlock)
  1371. {
  1372. struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
  1373. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1374. XB_TRACE(bp, "delwri_q", (long)unlock);
  1375. ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
  1376. spin_lock(dwlk);
  1377. /* If already in the queue, dequeue and place at tail */
  1378. if (!list_empty(&bp->b_list)) {
  1379. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1380. if (unlock)
  1381. atomic_dec(&bp->b_hold);
  1382. list_del(&bp->b_list);
  1383. }
  1384. bp->b_flags |= _XBF_DELWRI_Q;
  1385. list_add_tail(&bp->b_list, dwq);
  1386. bp->b_queuetime = jiffies;
  1387. spin_unlock(dwlk);
  1388. if (unlock)
  1389. xfs_buf_unlock(bp);
  1390. }
  1391. void
  1392. xfs_buf_delwri_dequeue(
  1393. xfs_buf_t *bp)
  1394. {
  1395. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1396. int dequeued = 0;
  1397. spin_lock(dwlk);
  1398. if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
  1399. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1400. list_del_init(&bp->b_list);
  1401. dequeued = 1;
  1402. }
  1403. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
  1404. spin_unlock(dwlk);
  1405. if (dequeued)
  1406. xfs_buf_rele(bp);
  1407. XB_TRACE(bp, "delwri_dq", (long)dequeued);
  1408. }
  1409. STATIC void
  1410. xfs_buf_runall_queues(
  1411. struct workqueue_struct *queue)
  1412. {
  1413. flush_workqueue(queue);
  1414. }
  1415. STATIC int
  1416. xfsbufd_wakeup(
  1417. int priority,
  1418. gfp_t mask)
  1419. {
  1420. xfs_buftarg_t *btp;
  1421. spin_lock(&xfs_buftarg_lock);
  1422. list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
  1423. if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
  1424. continue;
  1425. set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
  1426. wake_up_process(btp->bt_task);
  1427. }
  1428. spin_unlock(&xfs_buftarg_lock);
  1429. return 0;
  1430. }
  1431. /*
  1432. * Move as many buffers as specified to the supplied list
  1433. * idicating if we skipped any buffers to prevent deadlocks.
  1434. */
  1435. STATIC int
  1436. xfs_buf_delwri_split(
  1437. xfs_buftarg_t *target,
  1438. struct list_head *list,
  1439. unsigned long age)
  1440. {
  1441. xfs_buf_t *bp, *n;
  1442. struct list_head *dwq = &target->bt_delwrite_queue;
  1443. spinlock_t *dwlk = &target->bt_delwrite_lock;
  1444. int skipped = 0;
  1445. int force;
  1446. force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1447. INIT_LIST_HEAD(list);
  1448. spin_lock(dwlk);
  1449. list_for_each_entry_safe(bp, n, dwq, b_list) {
  1450. XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
  1451. ASSERT(bp->b_flags & XBF_DELWRI);
  1452. if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
  1453. if (!force &&
  1454. time_before(jiffies, bp->b_queuetime + age)) {
  1455. xfs_buf_unlock(bp);
  1456. break;
  1457. }
  1458. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
  1459. _XBF_RUN_QUEUES);
  1460. bp->b_flags |= XBF_WRITE;
  1461. list_move_tail(&bp->b_list, list);
  1462. } else
  1463. skipped++;
  1464. }
  1465. spin_unlock(dwlk);
  1466. return skipped;
  1467. }
  1468. STATIC int
  1469. xfsbufd(
  1470. void *data)
  1471. {
  1472. struct list_head tmp;
  1473. xfs_buftarg_t *target = (xfs_buftarg_t *)data;
  1474. int count;
  1475. xfs_buf_t *bp;
  1476. current->flags |= PF_MEMALLOC;
  1477. set_freezable();
  1478. do {
  1479. if (unlikely(freezing(current))) {
  1480. set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1481. refrigerator();
  1482. } else {
  1483. clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1484. }
  1485. schedule_timeout_interruptible(
  1486. xfs_buf_timer_centisecs * msecs_to_jiffies(10));
  1487. xfs_buf_delwri_split(target, &tmp,
  1488. xfs_buf_age_centisecs * msecs_to_jiffies(10));
  1489. count = 0;
  1490. while (!list_empty(&tmp)) {
  1491. bp = list_entry(tmp.next, xfs_buf_t, b_list);
  1492. ASSERT(target == bp->b_target);
  1493. list_del_init(&bp->b_list);
  1494. xfs_buf_iostrategy(bp);
  1495. count++;
  1496. }
  1497. if (as_list_len > 0)
  1498. purge_addresses();
  1499. if (count)
  1500. blk_run_address_space(target->bt_mapping);
  1501. } while (!kthread_should_stop());
  1502. return 0;
  1503. }
  1504. /*
  1505. * Go through all incore buffers, and release buffers if they belong to
  1506. * the given device. This is used in filesystem error handling to
  1507. * preserve the consistency of its metadata.
  1508. */
  1509. int
  1510. xfs_flush_buftarg(
  1511. xfs_buftarg_t *target,
  1512. int wait)
  1513. {
  1514. struct list_head tmp;
  1515. xfs_buf_t *bp, *n;
  1516. int pincount = 0;
  1517. xfs_buf_runall_queues(xfsdatad_workqueue);
  1518. xfs_buf_runall_queues(xfslogd_workqueue);
  1519. set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1520. pincount = xfs_buf_delwri_split(target, &tmp, 0);
  1521. /*
  1522. * Dropped the delayed write list lock, now walk the temporary list
  1523. */
  1524. list_for_each_entry_safe(bp, n, &tmp, b_list) {
  1525. ASSERT(target == bp->b_target);
  1526. if (wait)
  1527. bp->b_flags &= ~XBF_ASYNC;
  1528. else
  1529. list_del_init(&bp->b_list);
  1530. xfs_buf_iostrategy(bp);
  1531. }
  1532. if (wait)
  1533. blk_run_address_space(target->bt_mapping);
  1534. /*
  1535. * Remaining list items must be flushed before returning
  1536. */
  1537. while (!list_empty(&tmp)) {
  1538. bp = list_entry(tmp.next, xfs_buf_t, b_list);
  1539. list_del_init(&bp->b_list);
  1540. xfs_iowait(bp);
  1541. xfs_buf_relse(bp);
  1542. }
  1543. return pincount;
  1544. }
  1545. int __init
  1546. xfs_buf_init(void)
  1547. {
  1548. #ifdef XFS_BUF_TRACE
  1549. xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
  1550. #endif
  1551. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1552. KM_ZONE_HWALIGN, NULL);
  1553. if (!xfs_buf_zone)
  1554. goto out_free_trace_buf;
  1555. xfslogd_workqueue = create_workqueue("xfslogd");
  1556. if (!xfslogd_workqueue)
  1557. goto out_free_buf_zone;
  1558. xfsdatad_workqueue = create_workqueue("xfsdatad");
  1559. if (!xfsdatad_workqueue)
  1560. goto out_destroy_xfslogd_workqueue;
  1561. register_shrinker(&xfs_buf_shake);
  1562. return 0;
  1563. out_destroy_xfslogd_workqueue:
  1564. destroy_workqueue(xfslogd_workqueue);
  1565. out_free_buf_zone:
  1566. kmem_zone_destroy(xfs_buf_zone);
  1567. out_free_trace_buf:
  1568. #ifdef XFS_BUF_TRACE
  1569. ktrace_free(xfs_buf_trace_buf);
  1570. #endif
  1571. return -ENOMEM;
  1572. }
  1573. void
  1574. xfs_buf_terminate(void)
  1575. {
  1576. unregister_shrinker(&xfs_buf_shake);
  1577. destroy_workqueue(xfsdatad_workqueue);
  1578. destroy_workqueue(xfslogd_workqueue);
  1579. kmem_zone_destroy(xfs_buf_zone);
  1580. #ifdef XFS_BUF_TRACE
  1581. ktrace_free(xfs_buf_trace_buf);
  1582. #endif
  1583. }
  1584. #ifdef CONFIG_KDB_MODULES
  1585. struct list_head *
  1586. xfs_get_buftarg_list(void)
  1587. {
  1588. return &xfs_buftarg_list;
  1589. }
  1590. #endif