xfs_buf.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/slab.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. static kmem_zone_t *xfs_buf_zone;
  37. STATIC int xfsbufd(void *);
  38. STATIC int xfsbufd_wakeup(int, gfp_t);
  39. STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
  40. static struct shrinker xfs_buf_shake = {
  41. .shrink = xfsbufd_wakeup,
  42. .seeks = DEFAULT_SEEKS,
  43. };
  44. static struct workqueue_struct *xfslogd_workqueue;
  45. struct workqueue_struct *xfsdatad_workqueue;
  46. #ifdef XFS_BUF_TRACE
  47. void
  48. xfs_buf_trace(
  49. xfs_buf_t *bp,
  50. char *id,
  51. void *data,
  52. void *ra)
  53. {
  54. ktrace_enter(xfs_buf_trace_buf,
  55. bp, id,
  56. (void *)(unsigned long)bp->b_flags,
  57. (void *)(unsigned long)bp->b_hold.counter,
  58. (void *)(unsigned long)bp->b_sema.count,
  59. (void *)current,
  60. data, ra,
  61. (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
  62. (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
  63. (void *)(unsigned long)bp->b_buffer_length,
  64. NULL, NULL, NULL, NULL, NULL);
  65. }
  66. ktrace_t *xfs_buf_trace_buf;
  67. #define XFS_BUF_TRACE_SIZE 4096
  68. #define XB_TRACE(bp, id, data) \
  69. xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
  70. #else
  71. #define XB_TRACE(bp, id, data) do { } while (0)
  72. #endif
  73. #ifdef XFS_BUF_LOCK_TRACKING
  74. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  75. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  76. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  77. #else
  78. # define XB_SET_OWNER(bp) do { } while (0)
  79. # define XB_CLEAR_OWNER(bp) do { } while (0)
  80. # define XB_GET_OWNER(bp) do { } while (0)
  81. #endif
  82. #define xb_to_gfp(flags) \
  83. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
  84. ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
  85. #define xb_to_km(flags) \
  86. (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
  87. #define xfs_buf_allocate(flags) \
  88. kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
  89. #define xfs_buf_deallocate(bp) \
  90. kmem_zone_free(xfs_buf_zone, (bp));
  91. /*
  92. * Page Region interfaces.
  93. *
  94. * For pages in filesystems where the blocksize is smaller than the
  95. * pagesize, we use the page->private field (long) to hold a bitmap
  96. * of uptodate regions within the page.
  97. *
  98. * Each such region is "bytes per page / bits per long" bytes long.
  99. *
  100. * NBPPR == number-of-bytes-per-page-region
  101. * BTOPR == bytes-to-page-region (rounded up)
  102. * BTOPRT == bytes-to-page-region-truncated (rounded down)
  103. */
  104. #if (BITS_PER_LONG == 32)
  105. #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
  106. #elif (BITS_PER_LONG == 64)
  107. #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
  108. #else
  109. #error BITS_PER_LONG must be 32 or 64
  110. #endif
  111. #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
  112. #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
  113. #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
  114. STATIC unsigned long
  115. page_region_mask(
  116. size_t offset,
  117. size_t length)
  118. {
  119. unsigned long mask;
  120. int first, final;
  121. first = BTOPR(offset);
  122. final = BTOPRT(offset + length - 1);
  123. first = min(first, final);
  124. mask = ~0UL;
  125. mask <<= BITS_PER_LONG - (final - first);
  126. mask >>= BITS_PER_LONG - (final);
  127. ASSERT(offset + length <= PAGE_CACHE_SIZE);
  128. ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
  129. return mask;
  130. }
  131. STATIC_INLINE void
  132. set_page_region(
  133. struct page *page,
  134. size_t offset,
  135. size_t length)
  136. {
  137. set_page_private(page,
  138. page_private(page) | page_region_mask(offset, length));
  139. if (page_private(page) == ~0UL)
  140. SetPageUptodate(page);
  141. }
  142. STATIC_INLINE int
  143. test_page_region(
  144. struct page *page,
  145. size_t offset,
  146. size_t length)
  147. {
  148. unsigned long mask = page_region_mask(offset, length);
  149. return (mask && (page_private(page) & mask) == mask);
  150. }
  151. /*
  152. * Internal xfs_buf_t object manipulation
  153. */
  154. STATIC void
  155. _xfs_buf_initialize(
  156. xfs_buf_t *bp,
  157. xfs_buftarg_t *target,
  158. xfs_off_t range_base,
  159. size_t range_length,
  160. xfs_buf_flags_t flags)
  161. {
  162. /*
  163. * We don't want certain flags to appear in b_flags.
  164. */
  165. flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
  166. memset(bp, 0, sizeof(xfs_buf_t));
  167. atomic_set(&bp->b_hold, 1);
  168. init_completion(&bp->b_iowait);
  169. INIT_LIST_HEAD(&bp->b_list);
  170. INIT_LIST_HEAD(&bp->b_hash_list);
  171. init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
  172. XB_SET_OWNER(bp);
  173. bp->b_target = target;
  174. bp->b_file_offset = range_base;
  175. /*
  176. * Set buffer_length and count_desired to the same value initially.
  177. * I/O routines should use count_desired, which will be the same in
  178. * most cases but may be reset (e.g. XFS recovery).
  179. */
  180. bp->b_buffer_length = bp->b_count_desired = range_length;
  181. bp->b_flags = flags;
  182. bp->b_bn = XFS_BUF_DADDR_NULL;
  183. atomic_set(&bp->b_pin_count, 0);
  184. init_waitqueue_head(&bp->b_waiters);
  185. XFS_STATS_INC(xb_create);
  186. XB_TRACE(bp, "initialize", target);
  187. }
  188. /*
  189. * Allocate a page array capable of holding a specified number
  190. * of pages, and point the page buf at it.
  191. */
  192. STATIC int
  193. _xfs_buf_get_pages(
  194. xfs_buf_t *bp,
  195. int page_count,
  196. xfs_buf_flags_t flags)
  197. {
  198. /* Make sure that we have a page list */
  199. if (bp->b_pages == NULL) {
  200. bp->b_offset = xfs_buf_poff(bp->b_file_offset);
  201. bp->b_page_count = page_count;
  202. if (page_count <= XB_PAGES) {
  203. bp->b_pages = bp->b_page_array;
  204. } else {
  205. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  206. page_count, xb_to_km(flags));
  207. if (bp->b_pages == NULL)
  208. return -ENOMEM;
  209. }
  210. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  211. }
  212. return 0;
  213. }
  214. /*
  215. * Frees b_pages if it was allocated.
  216. */
  217. STATIC void
  218. _xfs_buf_free_pages(
  219. xfs_buf_t *bp)
  220. {
  221. if (bp->b_pages != bp->b_page_array) {
  222. kmem_free(bp->b_pages);
  223. }
  224. }
  225. /*
  226. * Releases the specified buffer.
  227. *
  228. * The modification state of any associated pages is left unchanged.
  229. * The buffer most not be on any hash - use xfs_buf_rele instead for
  230. * hashed and refcounted buffers
  231. */
  232. void
  233. xfs_buf_free(
  234. xfs_buf_t *bp)
  235. {
  236. XB_TRACE(bp, "free", 0);
  237. ASSERT(list_empty(&bp->b_hash_list));
  238. if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
  239. uint i;
  240. if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
  241. vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
  242. for (i = 0; i < bp->b_page_count; i++) {
  243. struct page *page = bp->b_pages[i];
  244. if (bp->b_flags & _XBF_PAGE_CACHE)
  245. ASSERT(!PagePrivate(page));
  246. page_cache_release(page);
  247. }
  248. _xfs_buf_free_pages(bp);
  249. }
  250. xfs_buf_deallocate(bp);
  251. }
  252. /*
  253. * Finds all pages for buffer in question and builds it's page list.
  254. */
  255. STATIC int
  256. _xfs_buf_lookup_pages(
  257. xfs_buf_t *bp,
  258. uint flags)
  259. {
  260. struct address_space *mapping = bp->b_target->bt_mapping;
  261. size_t blocksize = bp->b_target->bt_bsize;
  262. size_t size = bp->b_count_desired;
  263. size_t nbytes, offset;
  264. gfp_t gfp_mask = xb_to_gfp(flags);
  265. unsigned short page_count, i;
  266. pgoff_t first;
  267. xfs_off_t end;
  268. int error;
  269. end = bp->b_file_offset + bp->b_buffer_length;
  270. page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
  271. error = _xfs_buf_get_pages(bp, page_count, flags);
  272. if (unlikely(error))
  273. return error;
  274. bp->b_flags |= _XBF_PAGE_CACHE;
  275. offset = bp->b_offset;
  276. first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
  277. for (i = 0; i < bp->b_page_count; i++) {
  278. struct page *page;
  279. uint retries = 0;
  280. retry:
  281. page = find_or_create_page(mapping, first + i, gfp_mask);
  282. if (unlikely(page == NULL)) {
  283. if (flags & XBF_READ_AHEAD) {
  284. bp->b_page_count = i;
  285. for (i = 0; i < bp->b_page_count; i++)
  286. unlock_page(bp->b_pages[i]);
  287. return -ENOMEM;
  288. }
  289. /*
  290. * This could deadlock.
  291. *
  292. * But until all the XFS lowlevel code is revamped to
  293. * handle buffer allocation failures we can't do much.
  294. */
  295. if (!(++retries % 100))
  296. printk(KERN_ERR
  297. "XFS: possible memory allocation "
  298. "deadlock in %s (mode:0x%x)\n",
  299. __func__, gfp_mask);
  300. XFS_STATS_INC(xb_page_retries);
  301. xfsbufd_wakeup(0, gfp_mask);
  302. congestion_wait(WRITE, HZ/50);
  303. goto retry;
  304. }
  305. XFS_STATS_INC(xb_page_found);
  306. nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
  307. size -= nbytes;
  308. ASSERT(!PagePrivate(page));
  309. if (!PageUptodate(page)) {
  310. page_count--;
  311. if (blocksize >= PAGE_CACHE_SIZE) {
  312. if (flags & XBF_READ)
  313. bp->b_flags |= _XBF_PAGE_LOCKED;
  314. } else if (!PagePrivate(page)) {
  315. if (test_page_region(page, offset, nbytes))
  316. page_count++;
  317. }
  318. }
  319. bp->b_pages[i] = page;
  320. offset = 0;
  321. }
  322. if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
  323. for (i = 0; i < bp->b_page_count; i++)
  324. unlock_page(bp->b_pages[i]);
  325. }
  326. if (page_count == bp->b_page_count)
  327. bp->b_flags |= XBF_DONE;
  328. XB_TRACE(bp, "lookup_pages", (long)page_count);
  329. return error;
  330. }
  331. /*
  332. * Map buffer into kernel address-space if nessecary.
  333. */
  334. STATIC int
  335. _xfs_buf_map_pages(
  336. xfs_buf_t *bp,
  337. uint flags)
  338. {
  339. /* A single page buffer is always mappable */
  340. if (bp->b_page_count == 1) {
  341. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  342. bp->b_flags |= XBF_MAPPED;
  343. } else if (flags & XBF_MAPPED) {
  344. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  345. -1, PAGE_KERNEL);
  346. if (unlikely(bp->b_addr == NULL))
  347. return -ENOMEM;
  348. bp->b_addr += bp->b_offset;
  349. bp->b_flags |= XBF_MAPPED;
  350. }
  351. return 0;
  352. }
  353. /*
  354. * Finding and Reading Buffers
  355. */
  356. /*
  357. * Look up, and creates if absent, a lockable buffer for
  358. * a given range of an inode. The buffer is returned
  359. * locked. If other overlapping buffers exist, they are
  360. * released before the new buffer is created and locked,
  361. * which may imply that this call will block until those buffers
  362. * are unlocked. No I/O is implied by this call.
  363. */
  364. xfs_buf_t *
  365. _xfs_buf_find(
  366. xfs_buftarg_t *btp, /* block device target */
  367. xfs_off_t ioff, /* starting offset of range */
  368. size_t isize, /* length of range */
  369. xfs_buf_flags_t flags,
  370. xfs_buf_t *new_bp)
  371. {
  372. xfs_off_t range_base;
  373. size_t range_length;
  374. xfs_bufhash_t *hash;
  375. xfs_buf_t *bp, *n;
  376. range_base = (ioff << BBSHIFT);
  377. range_length = (isize << BBSHIFT);
  378. /* Check for IOs smaller than the sector size / not sector aligned */
  379. ASSERT(!(range_length < (1 << btp->bt_sshift)));
  380. ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
  381. hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
  382. spin_lock(&hash->bh_lock);
  383. list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
  384. ASSERT(btp == bp->b_target);
  385. if (bp->b_file_offset == range_base &&
  386. bp->b_buffer_length == range_length) {
  387. /*
  388. * If we look at something, bring it to the
  389. * front of the list for next time.
  390. */
  391. atomic_inc(&bp->b_hold);
  392. list_move(&bp->b_hash_list, &hash->bh_list);
  393. goto found;
  394. }
  395. }
  396. /* No match found */
  397. if (new_bp) {
  398. _xfs_buf_initialize(new_bp, btp, range_base,
  399. range_length, flags);
  400. new_bp->b_hash = hash;
  401. list_add(&new_bp->b_hash_list, &hash->bh_list);
  402. } else {
  403. XFS_STATS_INC(xb_miss_locked);
  404. }
  405. spin_unlock(&hash->bh_lock);
  406. return new_bp;
  407. found:
  408. spin_unlock(&hash->bh_lock);
  409. /* Attempt to get the semaphore without sleeping,
  410. * if this does not work then we need to drop the
  411. * spinlock and do a hard attempt on the semaphore.
  412. */
  413. if (down_trylock(&bp->b_sema)) {
  414. if (!(flags & XBF_TRYLOCK)) {
  415. /* wait for buffer ownership */
  416. XB_TRACE(bp, "get_lock", 0);
  417. xfs_buf_lock(bp);
  418. XFS_STATS_INC(xb_get_locked_waited);
  419. } else {
  420. /* We asked for a trylock and failed, no need
  421. * to look at file offset and length here, we
  422. * know that this buffer at least overlaps our
  423. * buffer and is locked, therefore our buffer
  424. * either does not exist, or is this buffer.
  425. */
  426. xfs_buf_rele(bp);
  427. XFS_STATS_INC(xb_busy_locked);
  428. return NULL;
  429. }
  430. } else {
  431. /* trylock worked */
  432. XB_SET_OWNER(bp);
  433. }
  434. if (bp->b_flags & XBF_STALE) {
  435. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  436. bp->b_flags &= XBF_MAPPED;
  437. }
  438. XB_TRACE(bp, "got_lock", 0);
  439. XFS_STATS_INC(xb_get_locked);
  440. return bp;
  441. }
  442. /*
  443. * Assembles a buffer covering the specified range.
  444. * Storage in memory for all portions of the buffer will be allocated,
  445. * although backing storage may not be.
  446. */
  447. xfs_buf_t *
  448. xfs_buf_get_flags(
  449. xfs_buftarg_t *target,/* target for buffer */
  450. xfs_off_t ioff, /* starting offset of range */
  451. size_t isize, /* length of range */
  452. xfs_buf_flags_t flags)
  453. {
  454. xfs_buf_t *bp, *new_bp;
  455. int error = 0, i;
  456. new_bp = xfs_buf_allocate(flags);
  457. if (unlikely(!new_bp))
  458. return NULL;
  459. bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
  460. if (bp == new_bp) {
  461. error = _xfs_buf_lookup_pages(bp, flags);
  462. if (error)
  463. goto no_buffer;
  464. } else {
  465. xfs_buf_deallocate(new_bp);
  466. if (unlikely(bp == NULL))
  467. return NULL;
  468. }
  469. for (i = 0; i < bp->b_page_count; i++)
  470. mark_page_accessed(bp->b_pages[i]);
  471. if (!(bp->b_flags & XBF_MAPPED)) {
  472. error = _xfs_buf_map_pages(bp, flags);
  473. if (unlikely(error)) {
  474. printk(KERN_WARNING "%s: failed to map pages\n",
  475. __func__);
  476. goto no_buffer;
  477. }
  478. }
  479. XFS_STATS_INC(xb_get);
  480. /*
  481. * Always fill in the block number now, the mapped cases can do
  482. * their own overlay of this later.
  483. */
  484. bp->b_bn = ioff;
  485. bp->b_count_desired = bp->b_buffer_length;
  486. XB_TRACE(bp, "get", (unsigned long)flags);
  487. return bp;
  488. no_buffer:
  489. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  490. xfs_buf_unlock(bp);
  491. xfs_buf_rele(bp);
  492. return NULL;
  493. }
  494. STATIC int
  495. _xfs_buf_read(
  496. xfs_buf_t *bp,
  497. xfs_buf_flags_t flags)
  498. {
  499. int status;
  500. XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
  501. ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
  502. ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
  503. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
  504. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  505. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
  506. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  507. status = xfs_buf_iorequest(bp);
  508. if (!status && !(flags & XBF_ASYNC))
  509. status = xfs_buf_iowait(bp);
  510. return status;
  511. }
  512. xfs_buf_t *
  513. xfs_buf_read_flags(
  514. xfs_buftarg_t *target,
  515. xfs_off_t ioff,
  516. size_t isize,
  517. xfs_buf_flags_t flags)
  518. {
  519. xfs_buf_t *bp;
  520. flags |= XBF_READ;
  521. bp = xfs_buf_get_flags(target, ioff, isize, flags);
  522. if (bp) {
  523. if (!XFS_BUF_ISDONE(bp)) {
  524. XB_TRACE(bp, "read", (unsigned long)flags);
  525. XFS_STATS_INC(xb_get_read);
  526. _xfs_buf_read(bp, flags);
  527. } else if (flags & XBF_ASYNC) {
  528. XB_TRACE(bp, "read_async", (unsigned long)flags);
  529. /*
  530. * Read ahead call which is already satisfied,
  531. * drop the buffer
  532. */
  533. goto no_buffer;
  534. } else {
  535. XB_TRACE(bp, "read_done", (unsigned long)flags);
  536. /* We do not want read in the flags */
  537. bp->b_flags &= ~XBF_READ;
  538. }
  539. }
  540. return bp;
  541. no_buffer:
  542. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  543. xfs_buf_unlock(bp);
  544. xfs_buf_rele(bp);
  545. return NULL;
  546. }
  547. /*
  548. * If we are not low on memory then do the readahead in a deadlock
  549. * safe manner.
  550. */
  551. void
  552. xfs_buf_readahead(
  553. xfs_buftarg_t *target,
  554. xfs_off_t ioff,
  555. size_t isize,
  556. xfs_buf_flags_t flags)
  557. {
  558. struct backing_dev_info *bdi;
  559. bdi = target->bt_mapping->backing_dev_info;
  560. if (bdi_read_congested(bdi))
  561. return;
  562. flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
  563. xfs_buf_read_flags(target, ioff, isize, flags);
  564. }
  565. xfs_buf_t *
  566. xfs_buf_get_empty(
  567. size_t len,
  568. xfs_buftarg_t *target)
  569. {
  570. xfs_buf_t *bp;
  571. bp = xfs_buf_allocate(0);
  572. if (bp)
  573. _xfs_buf_initialize(bp, target, 0, len, 0);
  574. return bp;
  575. }
  576. static inline struct page *
  577. mem_to_page(
  578. void *addr)
  579. {
  580. if ((!is_vmalloc_addr(addr))) {
  581. return virt_to_page(addr);
  582. } else {
  583. return vmalloc_to_page(addr);
  584. }
  585. }
  586. int
  587. xfs_buf_associate_memory(
  588. xfs_buf_t *bp,
  589. void *mem,
  590. size_t len)
  591. {
  592. int rval;
  593. int i = 0;
  594. unsigned long pageaddr;
  595. unsigned long offset;
  596. size_t buflen;
  597. int page_count;
  598. pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
  599. offset = (unsigned long)mem - pageaddr;
  600. buflen = PAGE_CACHE_ALIGN(len + offset);
  601. page_count = buflen >> PAGE_CACHE_SHIFT;
  602. /* Free any previous set of page pointers */
  603. if (bp->b_pages)
  604. _xfs_buf_free_pages(bp);
  605. bp->b_pages = NULL;
  606. bp->b_addr = mem;
  607. rval = _xfs_buf_get_pages(bp, page_count, 0);
  608. if (rval)
  609. return rval;
  610. bp->b_offset = offset;
  611. for (i = 0; i < bp->b_page_count; i++) {
  612. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  613. pageaddr += PAGE_CACHE_SIZE;
  614. }
  615. bp->b_count_desired = len;
  616. bp->b_buffer_length = buflen;
  617. bp->b_flags |= XBF_MAPPED;
  618. bp->b_flags &= ~_XBF_PAGE_LOCKED;
  619. return 0;
  620. }
  621. xfs_buf_t *
  622. xfs_buf_get_noaddr(
  623. size_t len,
  624. xfs_buftarg_t *target)
  625. {
  626. unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
  627. int error, i;
  628. xfs_buf_t *bp;
  629. bp = xfs_buf_allocate(0);
  630. if (unlikely(bp == NULL))
  631. goto fail;
  632. _xfs_buf_initialize(bp, target, 0, len, 0);
  633. error = _xfs_buf_get_pages(bp, page_count, 0);
  634. if (error)
  635. goto fail_free_buf;
  636. for (i = 0; i < page_count; i++) {
  637. bp->b_pages[i] = alloc_page(GFP_KERNEL);
  638. if (!bp->b_pages[i])
  639. goto fail_free_mem;
  640. }
  641. bp->b_flags |= _XBF_PAGES;
  642. error = _xfs_buf_map_pages(bp, XBF_MAPPED);
  643. if (unlikely(error)) {
  644. printk(KERN_WARNING "%s: failed to map pages\n",
  645. __func__);
  646. goto fail_free_mem;
  647. }
  648. xfs_buf_unlock(bp);
  649. XB_TRACE(bp, "no_daddr", len);
  650. return bp;
  651. fail_free_mem:
  652. while (--i >= 0)
  653. __free_page(bp->b_pages[i]);
  654. _xfs_buf_free_pages(bp);
  655. fail_free_buf:
  656. xfs_buf_deallocate(bp);
  657. fail:
  658. return NULL;
  659. }
  660. /*
  661. * Increment reference count on buffer, to hold the buffer concurrently
  662. * with another thread which may release (free) the buffer asynchronously.
  663. * Must hold the buffer already to call this function.
  664. */
  665. void
  666. xfs_buf_hold(
  667. xfs_buf_t *bp)
  668. {
  669. atomic_inc(&bp->b_hold);
  670. XB_TRACE(bp, "hold", 0);
  671. }
  672. /*
  673. * Releases a hold on the specified buffer. If the
  674. * the hold count is 1, calls xfs_buf_free.
  675. */
  676. void
  677. xfs_buf_rele(
  678. xfs_buf_t *bp)
  679. {
  680. xfs_bufhash_t *hash = bp->b_hash;
  681. XB_TRACE(bp, "rele", bp->b_relse);
  682. if (unlikely(!hash)) {
  683. ASSERT(!bp->b_relse);
  684. if (atomic_dec_and_test(&bp->b_hold))
  685. xfs_buf_free(bp);
  686. return;
  687. }
  688. ASSERT(atomic_read(&bp->b_hold) > 0);
  689. if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
  690. if (bp->b_relse) {
  691. atomic_inc(&bp->b_hold);
  692. spin_unlock(&hash->bh_lock);
  693. (*(bp->b_relse)) (bp);
  694. } else if (bp->b_flags & XBF_FS_MANAGED) {
  695. spin_unlock(&hash->bh_lock);
  696. } else {
  697. ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
  698. list_del_init(&bp->b_hash_list);
  699. spin_unlock(&hash->bh_lock);
  700. xfs_buf_free(bp);
  701. }
  702. }
  703. }
  704. /*
  705. * Mutual exclusion on buffers. Locking model:
  706. *
  707. * Buffers associated with inodes for which buffer locking
  708. * is not enabled are not protected by semaphores, and are
  709. * assumed to be exclusively owned by the caller. There is a
  710. * spinlock in the buffer, used by the caller when concurrent
  711. * access is possible.
  712. */
  713. /*
  714. * Locks a buffer object, if it is not already locked.
  715. * Note that this in no way locks the underlying pages, so it is only
  716. * useful for synchronizing concurrent use of buffer objects, not for
  717. * synchronizing independent access to the underlying pages.
  718. */
  719. int
  720. xfs_buf_cond_lock(
  721. xfs_buf_t *bp)
  722. {
  723. int locked;
  724. locked = down_trylock(&bp->b_sema) == 0;
  725. if (locked) {
  726. XB_SET_OWNER(bp);
  727. }
  728. XB_TRACE(bp, "cond_lock", (long)locked);
  729. return locked ? 0 : -EBUSY;
  730. }
  731. #if defined(DEBUG) || defined(XFS_BLI_TRACE)
  732. int
  733. xfs_buf_lock_value(
  734. xfs_buf_t *bp)
  735. {
  736. return bp->b_sema.count;
  737. }
  738. #endif
  739. /*
  740. * Locks a buffer object.
  741. * Note that this in no way locks the underlying pages, so it is only
  742. * useful for synchronizing concurrent use of buffer objects, not for
  743. * synchronizing independent access to the underlying pages.
  744. */
  745. void
  746. xfs_buf_lock(
  747. xfs_buf_t *bp)
  748. {
  749. XB_TRACE(bp, "lock", 0);
  750. if (atomic_read(&bp->b_io_remaining))
  751. blk_run_address_space(bp->b_target->bt_mapping);
  752. down(&bp->b_sema);
  753. XB_SET_OWNER(bp);
  754. XB_TRACE(bp, "locked", 0);
  755. }
  756. /*
  757. * Releases the lock on the buffer object.
  758. * If the buffer is marked delwri but is not queued, do so before we
  759. * unlock the buffer as we need to set flags correctly. We also need to
  760. * take a reference for the delwri queue because the unlocker is going to
  761. * drop their's and they don't know we just queued it.
  762. */
  763. void
  764. xfs_buf_unlock(
  765. xfs_buf_t *bp)
  766. {
  767. if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
  768. atomic_inc(&bp->b_hold);
  769. bp->b_flags |= XBF_ASYNC;
  770. xfs_buf_delwri_queue(bp, 0);
  771. }
  772. XB_CLEAR_OWNER(bp);
  773. up(&bp->b_sema);
  774. XB_TRACE(bp, "unlock", 0);
  775. }
  776. /*
  777. * Pinning Buffer Storage in Memory
  778. * Ensure that no attempt to force a buffer to disk will succeed.
  779. */
  780. void
  781. xfs_buf_pin(
  782. xfs_buf_t *bp)
  783. {
  784. atomic_inc(&bp->b_pin_count);
  785. XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
  786. }
  787. void
  788. xfs_buf_unpin(
  789. xfs_buf_t *bp)
  790. {
  791. if (atomic_dec_and_test(&bp->b_pin_count))
  792. wake_up_all(&bp->b_waiters);
  793. XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
  794. }
  795. int
  796. xfs_buf_ispin(
  797. xfs_buf_t *bp)
  798. {
  799. return atomic_read(&bp->b_pin_count);
  800. }
  801. STATIC void
  802. xfs_buf_wait_unpin(
  803. xfs_buf_t *bp)
  804. {
  805. DECLARE_WAITQUEUE (wait, current);
  806. if (atomic_read(&bp->b_pin_count) == 0)
  807. return;
  808. add_wait_queue(&bp->b_waiters, &wait);
  809. for (;;) {
  810. set_current_state(TASK_UNINTERRUPTIBLE);
  811. if (atomic_read(&bp->b_pin_count) == 0)
  812. break;
  813. if (atomic_read(&bp->b_io_remaining))
  814. blk_run_address_space(bp->b_target->bt_mapping);
  815. schedule();
  816. }
  817. remove_wait_queue(&bp->b_waiters, &wait);
  818. set_current_state(TASK_RUNNING);
  819. }
  820. /*
  821. * Buffer Utility Routines
  822. */
  823. STATIC void
  824. xfs_buf_iodone_work(
  825. struct work_struct *work)
  826. {
  827. xfs_buf_t *bp =
  828. container_of(work, xfs_buf_t, b_iodone_work);
  829. /*
  830. * We can get an EOPNOTSUPP to ordered writes. Here we clear the
  831. * ordered flag and reissue them. Because we can't tell the higher
  832. * layers directly that they should not issue ordered I/O anymore, they
  833. * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
  834. */
  835. if ((bp->b_error == EOPNOTSUPP) &&
  836. (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
  837. XB_TRACE(bp, "ordered_retry", bp->b_iodone);
  838. bp->b_flags &= ~XBF_ORDERED;
  839. bp->b_flags |= _XFS_BARRIER_FAILED;
  840. xfs_buf_iorequest(bp);
  841. } else if (bp->b_iodone)
  842. (*(bp->b_iodone))(bp);
  843. else if (bp->b_flags & XBF_ASYNC)
  844. xfs_buf_relse(bp);
  845. }
  846. void
  847. xfs_buf_ioend(
  848. xfs_buf_t *bp,
  849. int schedule)
  850. {
  851. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  852. if (bp->b_error == 0)
  853. bp->b_flags |= XBF_DONE;
  854. XB_TRACE(bp, "iodone", bp->b_iodone);
  855. if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
  856. if (schedule) {
  857. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  858. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  859. } else {
  860. xfs_buf_iodone_work(&bp->b_iodone_work);
  861. }
  862. } else {
  863. complete(&bp->b_iowait);
  864. }
  865. }
  866. void
  867. xfs_buf_ioerror(
  868. xfs_buf_t *bp,
  869. int error)
  870. {
  871. ASSERT(error >= 0 && error <= 0xffff);
  872. bp->b_error = (unsigned short)error;
  873. XB_TRACE(bp, "ioerror", (unsigned long)error);
  874. }
  875. int
  876. xfs_bawrite(
  877. void *mp,
  878. struct xfs_buf *bp)
  879. {
  880. XB_TRACE(bp, "bawrite", 0);
  881. ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
  882. xfs_buf_delwri_dequeue(bp);
  883. bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
  884. bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
  885. bp->b_mount = mp;
  886. bp->b_strat = xfs_bdstrat_cb;
  887. return xfs_bdstrat_cb(bp);
  888. }
  889. void
  890. xfs_bdwrite(
  891. void *mp,
  892. struct xfs_buf *bp)
  893. {
  894. XB_TRACE(bp, "bdwrite", 0);
  895. bp->b_strat = xfs_bdstrat_cb;
  896. bp->b_mount = mp;
  897. bp->b_flags &= ~XBF_READ;
  898. bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
  899. xfs_buf_delwri_queue(bp, 1);
  900. }
  901. STATIC_INLINE void
  902. _xfs_buf_ioend(
  903. xfs_buf_t *bp,
  904. int schedule)
  905. {
  906. if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
  907. bp->b_flags &= ~_XBF_PAGE_LOCKED;
  908. xfs_buf_ioend(bp, schedule);
  909. }
  910. }
  911. STATIC void
  912. xfs_buf_bio_end_io(
  913. struct bio *bio,
  914. int error)
  915. {
  916. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  917. unsigned int blocksize = bp->b_target->bt_bsize;
  918. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  919. xfs_buf_ioerror(bp, -error);
  920. do {
  921. struct page *page = bvec->bv_page;
  922. ASSERT(!PagePrivate(page));
  923. if (unlikely(bp->b_error)) {
  924. if (bp->b_flags & XBF_READ)
  925. ClearPageUptodate(page);
  926. } else if (blocksize >= PAGE_CACHE_SIZE) {
  927. SetPageUptodate(page);
  928. } else if (!PagePrivate(page) &&
  929. (bp->b_flags & _XBF_PAGE_CACHE)) {
  930. set_page_region(page, bvec->bv_offset, bvec->bv_len);
  931. }
  932. if (--bvec >= bio->bi_io_vec)
  933. prefetchw(&bvec->bv_page->flags);
  934. if (bp->b_flags & _XBF_PAGE_LOCKED)
  935. unlock_page(page);
  936. } while (bvec >= bio->bi_io_vec);
  937. _xfs_buf_ioend(bp, 1);
  938. bio_put(bio);
  939. }
  940. STATIC void
  941. _xfs_buf_ioapply(
  942. xfs_buf_t *bp)
  943. {
  944. int rw, map_i, total_nr_pages, nr_pages;
  945. struct bio *bio;
  946. int offset = bp->b_offset;
  947. int size = bp->b_count_desired;
  948. sector_t sector = bp->b_bn;
  949. unsigned int blocksize = bp->b_target->bt_bsize;
  950. total_nr_pages = bp->b_page_count;
  951. map_i = 0;
  952. if (bp->b_flags & XBF_ORDERED) {
  953. ASSERT(!(bp->b_flags & XBF_READ));
  954. rw = WRITE_BARRIER;
  955. } else if (bp->b_flags & _XBF_RUN_QUEUES) {
  956. ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
  957. bp->b_flags &= ~_XBF_RUN_QUEUES;
  958. rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
  959. } else {
  960. rw = (bp->b_flags & XBF_WRITE) ? WRITE :
  961. (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
  962. }
  963. /* Special code path for reading a sub page size buffer in --
  964. * we populate up the whole page, and hence the other metadata
  965. * in the same page. This optimization is only valid when the
  966. * filesystem block size is not smaller than the page size.
  967. */
  968. if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
  969. ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
  970. (XBF_READ|_XBF_PAGE_LOCKED)) &&
  971. (blocksize >= PAGE_CACHE_SIZE)) {
  972. bio = bio_alloc(GFP_NOIO, 1);
  973. bio->bi_bdev = bp->b_target->bt_bdev;
  974. bio->bi_sector = sector - (offset >> BBSHIFT);
  975. bio->bi_end_io = xfs_buf_bio_end_io;
  976. bio->bi_private = bp;
  977. bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
  978. size = 0;
  979. atomic_inc(&bp->b_io_remaining);
  980. goto submit_io;
  981. }
  982. next_chunk:
  983. atomic_inc(&bp->b_io_remaining);
  984. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  985. if (nr_pages > total_nr_pages)
  986. nr_pages = total_nr_pages;
  987. bio = bio_alloc(GFP_NOIO, nr_pages);
  988. bio->bi_bdev = bp->b_target->bt_bdev;
  989. bio->bi_sector = sector;
  990. bio->bi_end_io = xfs_buf_bio_end_io;
  991. bio->bi_private = bp;
  992. for (; size && nr_pages; nr_pages--, map_i++) {
  993. int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
  994. if (nbytes > size)
  995. nbytes = size;
  996. rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
  997. if (rbytes < nbytes)
  998. break;
  999. offset = 0;
  1000. sector += nbytes >> BBSHIFT;
  1001. size -= nbytes;
  1002. total_nr_pages--;
  1003. }
  1004. submit_io:
  1005. if (likely(bio->bi_size)) {
  1006. submit_bio(rw, bio);
  1007. if (size)
  1008. goto next_chunk;
  1009. } else {
  1010. bio_put(bio);
  1011. xfs_buf_ioerror(bp, EIO);
  1012. }
  1013. }
  1014. int
  1015. xfs_buf_iorequest(
  1016. xfs_buf_t *bp)
  1017. {
  1018. XB_TRACE(bp, "iorequest", 0);
  1019. if (bp->b_flags & XBF_DELWRI) {
  1020. xfs_buf_delwri_queue(bp, 1);
  1021. return 0;
  1022. }
  1023. if (bp->b_flags & XBF_WRITE) {
  1024. xfs_buf_wait_unpin(bp);
  1025. }
  1026. xfs_buf_hold(bp);
  1027. /* Set the count to 1 initially, this will stop an I/O
  1028. * completion callout which happens before we have started
  1029. * all the I/O from calling xfs_buf_ioend too early.
  1030. */
  1031. atomic_set(&bp->b_io_remaining, 1);
  1032. _xfs_buf_ioapply(bp);
  1033. _xfs_buf_ioend(bp, 0);
  1034. xfs_buf_rele(bp);
  1035. return 0;
  1036. }
  1037. /*
  1038. * Waits for I/O to complete on the buffer supplied.
  1039. * It returns immediately if no I/O is pending.
  1040. * It returns the I/O error code, if any, or 0 if there was no error.
  1041. */
  1042. int
  1043. xfs_buf_iowait(
  1044. xfs_buf_t *bp)
  1045. {
  1046. XB_TRACE(bp, "iowait", 0);
  1047. if (atomic_read(&bp->b_io_remaining))
  1048. blk_run_address_space(bp->b_target->bt_mapping);
  1049. wait_for_completion(&bp->b_iowait);
  1050. XB_TRACE(bp, "iowaited", (long)bp->b_error);
  1051. return bp->b_error;
  1052. }
  1053. xfs_caddr_t
  1054. xfs_buf_offset(
  1055. xfs_buf_t *bp,
  1056. size_t offset)
  1057. {
  1058. struct page *page;
  1059. if (bp->b_flags & XBF_MAPPED)
  1060. return XFS_BUF_PTR(bp) + offset;
  1061. offset += bp->b_offset;
  1062. page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
  1063. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
  1064. }
  1065. /*
  1066. * Move data into or out of a buffer.
  1067. */
  1068. void
  1069. xfs_buf_iomove(
  1070. xfs_buf_t *bp, /* buffer to process */
  1071. size_t boff, /* starting buffer offset */
  1072. size_t bsize, /* length to copy */
  1073. caddr_t data, /* data address */
  1074. xfs_buf_rw_t mode) /* read/write/zero flag */
  1075. {
  1076. size_t bend, cpoff, csize;
  1077. struct page *page;
  1078. bend = boff + bsize;
  1079. while (boff < bend) {
  1080. page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
  1081. cpoff = xfs_buf_poff(boff + bp->b_offset);
  1082. csize = min_t(size_t,
  1083. PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
  1084. ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
  1085. switch (mode) {
  1086. case XBRW_ZERO:
  1087. memset(page_address(page) + cpoff, 0, csize);
  1088. break;
  1089. case XBRW_READ:
  1090. memcpy(data, page_address(page) + cpoff, csize);
  1091. break;
  1092. case XBRW_WRITE:
  1093. memcpy(page_address(page) + cpoff, data, csize);
  1094. }
  1095. boff += csize;
  1096. data += csize;
  1097. }
  1098. }
  1099. /*
  1100. * Handling of buffer targets (buftargs).
  1101. */
  1102. /*
  1103. * Wait for any bufs with callbacks that have been submitted but
  1104. * have not yet returned... walk the hash list for the target.
  1105. */
  1106. void
  1107. xfs_wait_buftarg(
  1108. xfs_buftarg_t *btp)
  1109. {
  1110. xfs_buf_t *bp, *n;
  1111. xfs_bufhash_t *hash;
  1112. uint i;
  1113. for (i = 0; i < (1 << btp->bt_hashshift); i++) {
  1114. hash = &btp->bt_hash[i];
  1115. again:
  1116. spin_lock(&hash->bh_lock);
  1117. list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
  1118. ASSERT(btp == bp->b_target);
  1119. if (!(bp->b_flags & XBF_FS_MANAGED)) {
  1120. spin_unlock(&hash->bh_lock);
  1121. /*
  1122. * Catch superblock reference count leaks
  1123. * immediately
  1124. */
  1125. BUG_ON(bp->b_bn == 0);
  1126. delay(100);
  1127. goto again;
  1128. }
  1129. }
  1130. spin_unlock(&hash->bh_lock);
  1131. }
  1132. }
  1133. /*
  1134. * Allocate buffer hash table for a given target.
  1135. * For devices containing metadata (i.e. not the log/realtime devices)
  1136. * we need to allocate a much larger hash table.
  1137. */
  1138. STATIC void
  1139. xfs_alloc_bufhash(
  1140. xfs_buftarg_t *btp,
  1141. int external)
  1142. {
  1143. unsigned int i;
  1144. btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
  1145. btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
  1146. btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
  1147. sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
  1148. for (i = 0; i < (1 << btp->bt_hashshift); i++) {
  1149. spin_lock_init(&btp->bt_hash[i].bh_lock);
  1150. INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
  1151. }
  1152. }
  1153. STATIC void
  1154. xfs_free_bufhash(
  1155. xfs_buftarg_t *btp)
  1156. {
  1157. kmem_free(btp->bt_hash);
  1158. btp->bt_hash = NULL;
  1159. }
  1160. /*
  1161. * buftarg list for delwrite queue processing
  1162. */
  1163. static LIST_HEAD(xfs_buftarg_list);
  1164. static DEFINE_SPINLOCK(xfs_buftarg_lock);
  1165. STATIC void
  1166. xfs_register_buftarg(
  1167. xfs_buftarg_t *btp)
  1168. {
  1169. spin_lock(&xfs_buftarg_lock);
  1170. list_add(&btp->bt_list, &xfs_buftarg_list);
  1171. spin_unlock(&xfs_buftarg_lock);
  1172. }
  1173. STATIC void
  1174. xfs_unregister_buftarg(
  1175. xfs_buftarg_t *btp)
  1176. {
  1177. spin_lock(&xfs_buftarg_lock);
  1178. list_del(&btp->bt_list);
  1179. spin_unlock(&xfs_buftarg_lock);
  1180. }
  1181. void
  1182. xfs_free_buftarg(
  1183. xfs_buftarg_t *btp)
  1184. {
  1185. xfs_flush_buftarg(btp, 1);
  1186. xfs_blkdev_issue_flush(btp);
  1187. xfs_free_bufhash(btp);
  1188. iput(btp->bt_mapping->host);
  1189. /* Unregister the buftarg first so that we don't get a
  1190. * wakeup finding a non-existent task
  1191. */
  1192. xfs_unregister_buftarg(btp);
  1193. kthread_stop(btp->bt_task);
  1194. kmem_free(btp);
  1195. }
  1196. STATIC int
  1197. xfs_setsize_buftarg_flags(
  1198. xfs_buftarg_t *btp,
  1199. unsigned int blocksize,
  1200. unsigned int sectorsize,
  1201. int verbose)
  1202. {
  1203. btp->bt_bsize = blocksize;
  1204. btp->bt_sshift = ffs(sectorsize) - 1;
  1205. btp->bt_smask = sectorsize - 1;
  1206. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1207. printk(KERN_WARNING
  1208. "XFS: Cannot set_blocksize to %u on device %s\n",
  1209. sectorsize, XFS_BUFTARG_NAME(btp));
  1210. return EINVAL;
  1211. }
  1212. if (verbose &&
  1213. (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
  1214. printk(KERN_WARNING
  1215. "XFS: %u byte sectors in use on device %s. "
  1216. "This is suboptimal; %u or greater is ideal.\n",
  1217. sectorsize, XFS_BUFTARG_NAME(btp),
  1218. (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
  1219. }
  1220. return 0;
  1221. }
  1222. /*
  1223. * When allocating the initial buffer target we have not yet
  1224. * read in the superblock, so don't know what sized sectors
  1225. * are being used is at this early stage. Play safe.
  1226. */
  1227. STATIC int
  1228. xfs_setsize_buftarg_early(
  1229. xfs_buftarg_t *btp,
  1230. struct block_device *bdev)
  1231. {
  1232. return xfs_setsize_buftarg_flags(btp,
  1233. PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
  1234. }
  1235. int
  1236. xfs_setsize_buftarg(
  1237. xfs_buftarg_t *btp,
  1238. unsigned int blocksize,
  1239. unsigned int sectorsize)
  1240. {
  1241. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1242. }
  1243. STATIC int
  1244. xfs_mapping_buftarg(
  1245. xfs_buftarg_t *btp,
  1246. struct block_device *bdev)
  1247. {
  1248. struct backing_dev_info *bdi;
  1249. struct inode *inode;
  1250. struct address_space *mapping;
  1251. static const struct address_space_operations mapping_aops = {
  1252. .sync_page = block_sync_page,
  1253. .migratepage = fail_migrate_page,
  1254. };
  1255. inode = new_inode(bdev->bd_inode->i_sb);
  1256. if (!inode) {
  1257. printk(KERN_WARNING
  1258. "XFS: Cannot allocate mapping inode for device %s\n",
  1259. XFS_BUFTARG_NAME(btp));
  1260. return ENOMEM;
  1261. }
  1262. inode->i_mode = S_IFBLK;
  1263. inode->i_bdev = bdev;
  1264. inode->i_rdev = bdev->bd_dev;
  1265. bdi = blk_get_backing_dev_info(bdev);
  1266. if (!bdi)
  1267. bdi = &default_backing_dev_info;
  1268. mapping = &inode->i_data;
  1269. mapping->a_ops = &mapping_aops;
  1270. mapping->backing_dev_info = bdi;
  1271. mapping_set_gfp_mask(mapping, GFP_NOFS);
  1272. btp->bt_mapping = mapping;
  1273. return 0;
  1274. }
  1275. STATIC int
  1276. xfs_alloc_delwrite_queue(
  1277. xfs_buftarg_t *btp)
  1278. {
  1279. int error = 0;
  1280. INIT_LIST_HEAD(&btp->bt_list);
  1281. INIT_LIST_HEAD(&btp->bt_delwrite_queue);
  1282. spin_lock_init(&btp->bt_delwrite_lock);
  1283. btp->bt_flags = 0;
  1284. btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
  1285. if (IS_ERR(btp->bt_task)) {
  1286. error = PTR_ERR(btp->bt_task);
  1287. goto out_error;
  1288. }
  1289. xfs_register_buftarg(btp);
  1290. out_error:
  1291. return error;
  1292. }
  1293. xfs_buftarg_t *
  1294. xfs_alloc_buftarg(
  1295. struct block_device *bdev,
  1296. int external)
  1297. {
  1298. xfs_buftarg_t *btp;
  1299. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
  1300. btp->bt_dev = bdev->bd_dev;
  1301. btp->bt_bdev = bdev;
  1302. if (xfs_setsize_buftarg_early(btp, bdev))
  1303. goto error;
  1304. if (xfs_mapping_buftarg(btp, bdev))
  1305. goto error;
  1306. if (xfs_alloc_delwrite_queue(btp))
  1307. goto error;
  1308. xfs_alloc_bufhash(btp, external);
  1309. return btp;
  1310. error:
  1311. kmem_free(btp);
  1312. return NULL;
  1313. }
  1314. /*
  1315. * Delayed write buffer handling
  1316. */
  1317. STATIC void
  1318. xfs_buf_delwri_queue(
  1319. xfs_buf_t *bp,
  1320. int unlock)
  1321. {
  1322. struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
  1323. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1324. XB_TRACE(bp, "delwri_q", (long)unlock);
  1325. ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
  1326. spin_lock(dwlk);
  1327. /* If already in the queue, dequeue and place at tail */
  1328. if (!list_empty(&bp->b_list)) {
  1329. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1330. if (unlock)
  1331. atomic_dec(&bp->b_hold);
  1332. list_del(&bp->b_list);
  1333. }
  1334. bp->b_flags |= _XBF_DELWRI_Q;
  1335. list_add_tail(&bp->b_list, dwq);
  1336. bp->b_queuetime = jiffies;
  1337. spin_unlock(dwlk);
  1338. if (unlock)
  1339. xfs_buf_unlock(bp);
  1340. }
  1341. void
  1342. xfs_buf_delwri_dequeue(
  1343. xfs_buf_t *bp)
  1344. {
  1345. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1346. int dequeued = 0;
  1347. spin_lock(dwlk);
  1348. if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
  1349. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1350. list_del_init(&bp->b_list);
  1351. dequeued = 1;
  1352. }
  1353. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
  1354. spin_unlock(dwlk);
  1355. if (dequeued)
  1356. xfs_buf_rele(bp);
  1357. XB_TRACE(bp, "delwri_dq", (long)dequeued);
  1358. }
  1359. STATIC void
  1360. xfs_buf_runall_queues(
  1361. struct workqueue_struct *queue)
  1362. {
  1363. flush_workqueue(queue);
  1364. }
  1365. STATIC int
  1366. xfsbufd_wakeup(
  1367. int priority,
  1368. gfp_t mask)
  1369. {
  1370. xfs_buftarg_t *btp;
  1371. spin_lock(&xfs_buftarg_lock);
  1372. list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
  1373. if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
  1374. continue;
  1375. set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
  1376. wake_up_process(btp->bt_task);
  1377. }
  1378. spin_unlock(&xfs_buftarg_lock);
  1379. return 0;
  1380. }
  1381. /*
  1382. * Move as many buffers as specified to the supplied list
  1383. * idicating if we skipped any buffers to prevent deadlocks.
  1384. */
  1385. STATIC int
  1386. xfs_buf_delwri_split(
  1387. xfs_buftarg_t *target,
  1388. struct list_head *list,
  1389. unsigned long age)
  1390. {
  1391. xfs_buf_t *bp, *n;
  1392. struct list_head *dwq = &target->bt_delwrite_queue;
  1393. spinlock_t *dwlk = &target->bt_delwrite_lock;
  1394. int skipped = 0;
  1395. int force;
  1396. force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1397. INIT_LIST_HEAD(list);
  1398. spin_lock(dwlk);
  1399. list_for_each_entry_safe(bp, n, dwq, b_list) {
  1400. XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
  1401. ASSERT(bp->b_flags & XBF_DELWRI);
  1402. if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
  1403. if (!force &&
  1404. time_before(jiffies, bp->b_queuetime + age)) {
  1405. xfs_buf_unlock(bp);
  1406. break;
  1407. }
  1408. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
  1409. _XBF_RUN_QUEUES);
  1410. bp->b_flags |= XBF_WRITE;
  1411. list_move_tail(&bp->b_list, list);
  1412. } else
  1413. skipped++;
  1414. }
  1415. spin_unlock(dwlk);
  1416. return skipped;
  1417. }
  1418. STATIC int
  1419. xfsbufd(
  1420. void *data)
  1421. {
  1422. struct list_head tmp;
  1423. xfs_buftarg_t *target = (xfs_buftarg_t *)data;
  1424. int count;
  1425. xfs_buf_t *bp;
  1426. current->flags |= PF_MEMALLOC;
  1427. set_freezable();
  1428. do {
  1429. if (unlikely(freezing(current))) {
  1430. set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1431. refrigerator();
  1432. } else {
  1433. clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1434. }
  1435. schedule_timeout_interruptible(
  1436. xfs_buf_timer_centisecs * msecs_to_jiffies(10));
  1437. xfs_buf_delwri_split(target, &tmp,
  1438. xfs_buf_age_centisecs * msecs_to_jiffies(10));
  1439. count = 0;
  1440. while (!list_empty(&tmp)) {
  1441. bp = list_entry(tmp.next, xfs_buf_t, b_list);
  1442. ASSERT(target == bp->b_target);
  1443. list_del_init(&bp->b_list);
  1444. xfs_buf_iostrategy(bp);
  1445. count++;
  1446. }
  1447. if (count)
  1448. blk_run_address_space(target->bt_mapping);
  1449. } while (!kthread_should_stop());
  1450. return 0;
  1451. }
  1452. /*
  1453. * Go through all incore buffers, and release buffers if they belong to
  1454. * the given device. This is used in filesystem error handling to
  1455. * preserve the consistency of its metadata.
  1456. */
  1457. int
  1458. xfs_flush_buftarg(
  1459. xfs_buftarg_t *target,
  1460. int wait)
  1461. {
  1462. struct list_head tmp;
  1463. xfs_buf_t *bp, *n;
  1464. int pincount = 0;
  1465. xfs_buf_runall_queues(xfsdatad_workqueue);
  1466. xfs_buf_runall_queues(xfslogd_workqueue);
  1467. set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1468. pincount = xfs_buf_delwri_split(target, &tmp, 0);
  1469. /*
  1470. * Dropped the delayed write list lock, now walk the temporary list
  1471. */
  1472. list_for_each_entry_safe(bp, n, &tmp, b_list) {
  1473. ASSERT(target == bp->b_target);
  1474. if (wait)
  1475. bp->b_flags &= ~XBF_ASYNC;
  1476. else
  1477. list_del_init(&bp->b_list);
  1478. xfs_buf_iostrategy(bp);
  1479. }
  1480. if (wait)
  1481. blk_run_address_space(target->bt_mapping);
  1482. /*
  1483. * Remaining list items must be flushed before returning
  1484. */
  1485. while (!list_empty(&tmp)) {
  1486. bp = list_entry(tmp.next, xfs_buf_t, b_list);
  1487. list_del_init(&bp->b_list);
  1488. xfs_iowait(bp);
  1489. xfs_buf_relse(bp);
  1490. }
  1491. return pincount;
  1492. }
  1493. int __init
  1494. xfs_buf_init(void)
  1495. {
  1496. #ifdef XFS_BUF_TRACE
  1497. xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
  1498. #endif
  1499. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1500. KM_ZONE_HWALIGN, NULL);
  1501. if (!xfs_buf_zone)
  1502. goto out_free_trace_buf;
  1503. xfslogd_workqueue = create_workqueue("xfslogd");
  1504. if (!xfslogd_workqueue)
  1505. goto out_free_buf_zone;
  1506. xfsdatad_workqueue = create_workqueue("xfsdatad");
  1507. if (!xfsdatad_workqueue)
  1508. goto out_destroy_xfslogd_workqueue;
  1509. register_shrinker(&xfs_buf_shake);
  1510. return 0;
  1511. out_destroy_xfslogd_workqueue:
  1512. destroy_workqueue(xfslogd_workqueue);
  1513. out_free_buf_zone:
  1514. kmem_zone_destroy(xfs_buf_zone);
  1515. out_free_trace_buf:
  1516. #ifdef XFS_BUF_TRACE
  1517. ktrace_free(xfs_buf_trace_buf);
  1518. #endif
  1519. return -ENOMEM;
  1520. }
  1521. void
  1522. xfs_buf_terminate(void)
  1523. {
  1524. unregister_shrinker(&xfs_buf_shake);
  1525. destroy_workqueue(xfsdatad_workqueue);
  1526. destroy_workqueue(xfslogd_workqueue);
  1527. kmem_zone_destroy(xfs_buf_zone);
  1528. #ifdef XFS_BUF_TRACE
  1529. ktrace_free(xfs_buf_trace_buf);
  1530. #endif
  1531. }
  1532. #ifdef CONFIG_KDB_MODULES
  1533. struct list_head *
  1534. xfs_get_buftarg_list(void)
  1535. {
  1536. return &xfs_buftarg_list;
  1537. }
  1538. #endif