xfs_buf.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/gfp.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. #include "xfs_sb.h"
  37. #include "xfs_log.h"
  38. #include "xfs_ag.h"
  39. #include "xfs_mount.h"
  40. #include "xfs_trace.h"
  41. static kmem_zone_t *xfs_buf_zone;
  42. static struct workqueue_struct *xfslogd_workqueue;
  43. #ifdef XFS_BUF_LOCK_TRACKING
  44. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  45. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  46. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  47. #else
  48. # define XB_SET_OWNER(bp) do { } while (0)
  49. # define XB_CLEAR_OWNER(bp) do { } while (0)
  50. # define XB_GET_OWNER(bp) do { } while (0)
  51. #endif
  52. #define xb_to_gfp(flags) \
  53. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  54. static inline int
  55. xfs_buf_is_vmapped(
  56. struct xfs_buf *bp)
  57. {
  58. /*
  59. * Return true if the buffer is vmapped.
  60. *
  61. * b_addr is null if the buffer is not mapped, but the code is clever
  62. * enough to know it doesn't have to map a single page, so the check has
  63. * to be both for b_addr and bp->b_page_count > 1.
  64. */
  65. return bp->b_addr && bp->b_page_count > 1;
  66. }
  67. static inline int
  68. xfs_buf_vmap_len(
  69. struct xfs_buf *bp)
  70. {
  71. return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  72. }
  73. /*
  74. * xfs_buf_lru_add - add a buffer to the LRU.
  75. *
  76. * The LRU takes a new reference to the buffer so that it will only be freed
  77. * once the shrinker takes the buffer off the LRU.
  78. */
  79. STATIC void
  80. xfs_buf_lru_add(
  81. struct xfs_buf *bp)
  82. {
  83. struct xfs_buftarg *btp = bp->b_target;
  84. spin_lock(&btp->bt_lru_lock);
  85. if (list_empty(&bp->b_lru)) {
  86. atomic_inc(&bp->b_hold);
  87. list_add_tail(&bp->b_lru, &btp->bt_lru);
  88. btp->bt_lru_nr++;
  89. bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
  90. }
  91. spin_unlock(&btp->bt_lru_lock);
  92. }
  93. /*
  94. * xfs_buf_lru_del - remove a buffer from the LRU
  95. *
  96. * The unlocked check is safe here because it only occurs when there are not
  97. * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
  98. * to optimise the shrinker removing the buffer from the LRU and calling
  99. * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
  100. * bt_lru_lock.
  101. */
  102. STATIC void
  103. xfs_buf_lru_del(
  104. struct xfs_buf *bp)
  105. {
  106. struct xfs_buftarg *btp = bp->b_target;
  107. if (list_empty(&bp->b_lru))
  108. return;
  109. spin_lock(&btp->bt_lru_lock);
  110. if (!list_empty(&bp->b_lru)) {
  111. list_del_init(&bp->b_lru);
  112. btp->bt_lru_nr--;
  113. }
  114. spin_unlock(&btp->bt_lru_lock);
  115. }
  116. /*
  117. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  118. * b_lru_ref count so that the buffer is freed immediately when the buffer
  119. * reference count falls to zero. If the buffer is already on the LRU, we need
  120. * to remove the reference that LRU holds on the buffer.
  121. *
  122. * This prevents build-up of stale buffers on the LRU.
  123. */
  124. void
  125. xfs_buf_stale(
  126. struct xfs_buf *bp)
  127. {
  128. ASSERT(xfs_buf_islocked(bp));
  129. bp->b_flags |= XBF_STALE;
  130. /*
  131. * Clear the delwri status so that a delwri queue walker will not
  132. * flush this buffer to disk now that it is stale. The delwri queue has
  133. * a reference to the buffer, so this is safe to do.
  134. */
  135. bp->b_flags &= ~_XBF_DELWRI_Q;
  136. atomic_set(&(bp)->b_lru_ref, 0);
  137. if (!list_empty(&bp->b_lru)) {
  138. struct xfs_buftarg *btp = bp->b_target;
  139. spin_lock(&btp->bt_lru_lock);
  140. if (!list_empty(&bp->b_lru) &&
  141. !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
  142. list_del_init(&bp->b_lru);
  143. btp->bt_lru_nr--;
  144. atomic_dec(&bp->b_hold);
  145. }
  146. spin_unlock(&btp->bt_lru_lock);
  147. }
  148. ASSERT(atomic_read(&bp->b_hold) >= 1);
  149. }
  150. static int
  151. xfs_buf_get_maps(
  152. struct xfs_buf *bp,
  153. int map_count)
  154. {
  155. ASSERT(bp->b_maps == NULL);
  156. bp->b_map_count = map_count;
  157. if (map_count == 1) {
  158. bp->b_maps = &bp->__b_map;
  159. return 0;
  160. }
  161. bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
  162. KM_NOFS);
  163. if (!bp->b_maps)
  164. return ENOMEM;
  165. return 0;
  166. }
  167. /*
  168. * Frees b_pages if it was allocated.
  169. */
  170. static void
  171. xfs_buf_free_maps(
  172. struct xfs_buf *bp)
  173. {
  174. if (bp->b_maps != &bp->__b_map) {
  175. kmem_free(bp->b_maps);
  176. bp->b_maps = NULL;
  177. }
  178. }
  179. struct xfs_buf *
  180. _xfs_buf_alloc(
  181. struct xfs_buftarg *target,
  182. struct xfs_buf_map *map,
  183. int nmaps,
  184. xfs_buf_flags_t flags)
  185. {
  186. struct xfs_buf *bp;
  187. int error;
  188. int i;
  189. bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
  190. if (unlikely(!bp))
  191. return NULL;
  192. /*
  193. * We don't want certain flags to appear in b_flags unless they are
  194. * specifically set by later operations on the buffer.
  195. */
  196. flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
  197. atomic_set(&bp->b_hold, 1);
  198. atomic_set(&bp->b_lru_ref, 1);
  199. init_completion(&bp->b_iowait);
  200. INIT_LIST_HEAD(&bp->b_lru);
  201. INIT_LIST_HEAD(&bp->b_list);
  202. RB_CLEAR_NODE(&bp->b_rbnode);
  203. sema_init(&bp->b_sema, 0); /* held, no waiters */
  204. XB_SET_OWNER(bp);
  205. bp->b_target = target;
  206. bp->b_flags = flags;
  207. /*
  208. * Set length and io_length to the same value initially.
  209. * I/O routines should use io_length, which will be the same in
  210. * most cases but may be reset (e.g. XFS recovery).
  211. */
  212. error = xfs_buf_get_maps(bp, nmaps);
  213. if (error) {
  214. kmem_zone_free(xfs_buf_zone, bp);
  215. return NULL;
  216. }
  217. bp->b_bn = map[0].bm_bn;
  218. bp->b_length = 0;
  219. for (i = 0; i < nmaps; i++) {
  220. bp->b_maps[i].bm_bn = map[i].bm_bn;
  221. bp->b_maps[i].bm_len = map[i].bm_len;
  222. bp->b_length += map[i].bm_len;
  223. }
  224. bp->b_io_length = bp->b_length;
  225. atomic_set(&bp->b_pin_count, 0);
  226. init_waitqueue_head(&bp->b_waiters);
  227. XFS_STATS_INC(xb_create);
  228. trace_xfs_buf_init(bp, _RET_IP_);
  229. return bp;
  230. }
  231. /*
  232. * Allocate a page array capable of holding a specified number
  233. * of pages, and point the page buf at it.
  234. */
  235. STATIC int
  236. _xfs_buf_get_pages(
  237. xfs_buf_t *bp,
  238. int page_count,
  239. xfs_buf_flags_t flags)
  240. {
  241. /* Make sure that we have a page list */
  242. if (bp->b_pages == NULL) {
  243. bp->b_page_count = page_count;
  244. if (page_count <= XB_PAGES) {
  245. bp->b_pages = bp->b_page_array;
  246. } else {
  247. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  248. page_count, KM_NOFS);
  249. if (bp->b_pages == NULL)
  250. return -ENOMEM;
  251. }
  252. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  253. }
  254. return 0;
  255. }
  256. /*
  257. * Frees b_pages if it was allocated.
  258. */
  259. STATIC void
  260. _xfs_buf_free_pages(
  261. xfs_buf_t *bp)
  262. {
  263. if (bp->b_pages != bp->b_page_array) {
  264. kmem_free(bp->b_pages);
  265. bp->b_pages = NULL;
  266. }
  267. }
  268. /*
  269. * Releases the specified buffer.
  270. *
  271. * The modification state of any associated pages is left unchanged.
  272. * The buffer most not be on any hash - use xfs_buf_rele instead for
  273. * hashed and refcounted buffers
  274. */
  275. void
  276. xfs_buf_free(
  277. xfs_buf_t *bp)
  278. {
  279. trace_xfs_buf_free(bp, _RET_IP_);
  280. ASSERT(list_empty(&bp->b_lru));
  281. if (bp->b_flags & _XBF_PAGES) {
  282. uint i;
  283. if (xfs_buf_is_vmapped(bp))
  284. vm_unmap_ram(bp->b_addr - bp->b_offset,
  285. bp->b_page_count);
  286. for (i = 0; i < bp->b_page_count; i++) {
  287. struct page *page = bp->b_pages[i];
  288. __free_page(page);
  289. }
  290. } else if (bp->b_flags & _XBF_KMEM)
  291. kmem_free(bp->b_addr);
  292. _xfs_buf_free_pages(bp);
  293. xfs_buf_free_maps(bp);
  294. kmem_zone_free(xfs_buf_zone, bp);
  295. }
  296. /*
  297. * Allocates all the pages for buffer in question and builds it's page list.
  298. */
  299. STATIC int
  300. xfs_buf_allocate_memory(
  301. xfs_buf_t *bp,
  302. uint flags)
  303. {
  304. size_t size;
  305. size_t nbytes, offset;
  306. gfp_t gfp_mask = xb_to_gfp(flags);
  307. unsigned short page_count, i;
  308. xfs_off_t start, end;
  309. int error;
  310. /*
  311. * for buffers that are contained within a single page, just allocate
  312. * the memory from the heap - there's no need for the complexity of
  313. * page arrays to keep allocation down to order 0.
  314. */
  315. size = BBTOB(bp->b_length);
  316. if (size < PAGE_SIZE) {
  317. bp->b_addr = kmem_alloc(size, KM_NOFS);
  318. if (!bp->b_addr) {
  319. /* low memory - use alloc_page loop instead */
  320. goto use_alloc_page;
  321. }
  322. if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
  323. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  324. /* b_addr spans two pages - use alloc_page instead */
  325. kmem_free(bp->b_addr);
  326. bp->b_addr = NULL;
  327. goto use_alloc_page;
  328. }
  329. bp->b_offset = offset_in_page(bp->b_addr);
  330. bp->b_pages = bp->b_page_array;
  331. bp->b_pages[0] = virt_to_page(bp->b_addr);
  332. bp->b_page_count = 1;
  333. bp->b_flags |= _XBF_KMEM;
  334. return 0;
  335. }
  336. use_alloc_page:
  337. start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
  338. end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
  339. >> PAGE_SHIFT;
  340. page_count = end - start;
  341. error = _xfs_buf_get_pages(bp, page_count, flags);
  342. if (unlikely(error))
  343. return error;
  344. offset = bp->b_offset;
  345. bp->b_flags |= _XBF_PAGES;
  346. for (i = 0; i < bp->b_page_count; i++) {
  347. struct page *page;
  348. uint retries = 0;
  349. retry:
  350. page = alloc_page(gfp_mask);
  351. if (unlikely(page == NULL)) {
  352. if (flags & XBF_READ_AHEAD) {
  353. bp->b_page_count = i;
  354. error = ENOMEM;
  355. goto out_free_pages;
  356. }
  357. /*
  358. * This could deadlock.
  359. *
  360. * But until all the XFS lowlevel code is revamped to
  361. * handle buffer allocation failures we can't do much.
  362. */
  363. if (!(++retries % 100))
  364. xfs_err(NULL,
  365. "possible memory allocation deadlock in %s (mode:0x%x)",
  366. __func__, gfp_mask);
  367. XFS_STATS_INC(xb_page_retries);
  368. congestion_wait(BLK_RW_ASYNC, HZ/50);
  369. goto retry;
  370. }
  371. XFS_STATS_INC(xb_page_found);
  372. nbytes = min_t(size_t, size, PAGE_SIZE - offset);
  373. size -= nbytes;
  374. bp->b_pages[i] = page;
  375. offset = 0;
  376. }
  377. return 0;
  378. out_free_pages:
  379. for (i = 0; i < bp->b_page_count; i++)
  380. __free_page(bp->b_pages[i]);
  381. return error;
  382. }
  383. /*
  384. * Map buffer into kernel address-space if necessary.
  385. */
  386. STATIC int
  387. _xfs_buf_map_pages(
  388. xfs_buf_t *bp,
  389. uint flags)
  390. {
  391. ASSERT(bp->b_flags & _XBF_PAGES);
  392. if (bp->b_page_count == 1) {
  393. /* A single page buffer is always mappable */
  394. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  395. } else if (flags & XBF_UNMAPPED) {
  396. bp->b_addr = NULL;
  397. } else {
  398. int retried = 0;
  399. do {
  400. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  401. -1, PAGE_KERNEL);
  402. if (bp->b_addr)
  403. break;
  404. vm_unmap_aliases();
  405. } while (retried++ <= 1);
  406. if (!bp->b_addr)
  407. return -ENOMEM;
  408. bp->b_addr += bp->b_offset;
  409. }
  410. return 0;
  411. }
  412. /*
  413. * Finding and Reading Buffers
  414. */
  415. /*
  416. * Look up, and creates if absent, a lockable buffer for
  417. * a given range of an inode. The buffer is returned
  418. * locked. No I/O is implied by this call.
  419. */
  420. xfs_buf_t *
  421. _xfs_buf_find(
  422. struct xfs_buftarg *btp,
  423. struct xfs_buf_map *map,
  424. int nmaps,
  425. xfs_buf_flags_t flags,
  426. xfs_buf_t *new_bp)
  427. {
  428. size_t numbytes;
  429. struct xfs_perag *pag;
  430. struct rb_node **rbp;
  431. struct rb_node *parent;
  432. xfs_buf_t *bp;
  433. xfs_daddr_t blkno = map[0].bm_bn;
  434. xfs_daddr_t eofs;
  435. int numblks = 0;
  436. int i;
  437. for (i = 0; i < nmaps; i++)
  438. numblks += map[i].bm_len;
  439. numbytes = BBTOB(numblks);
  440. /* Check for IOs smaller than the sector size / not sector aligned */
  441. ASSERT(!(numbytes < (1 << btp->bt_sshift)));
  442. ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
  443. /*
  444. * Corrupted block numbers can get through to here, unfortunately, so we
  445. * have to check that the buffer falls within the filesystem bounds.
  446. */
  447. eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
  448. if (blkno >= eofs) {
  449. /*
  450. * XXX (dgc): we should really be returning EFSCORRUPTED here,
  451. * but none of the higher level infrastructure supports
  452. * returning a specific error on buffer lookup failures.
  453. */
  454. xfs_alert(btp->bt_mount,
  455. "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
  456. __func__, blkno, eofs);
  457. WARN_ON(1);
  458. return NULL;
  459. }
  460. /* get tree root */
  461. pag = xfs_perag_get(btp->bt_mount,
  462. xfs_daddr_to_agno(btp->bt_mount, blkno));
  463. /* walk tree */
  464. spin_lock(&pag->pag_buf_lock);
  465. rbp = &pag->pag_buf_tree.rb_node;
  466. parent = NULL;
  467. bp = NULL;
  468. while (*rbp) {
  469. parent = *rbp;
  470. bp = rb_entry(parent, struct xfs_buf, b_rbnode);
  471. if (blkno < bp->b_bn)
  472. rbp = &(*rbp)->rb_left;
  473. else if (blkno > bp->b_bn)
  474. rbp = &(*rbp)->rb_right;
  475. else {
  476. /*
  477. * found a block number match. If the range doesn't
  478. * match, the only way this is allowed is if the buffer
  479. * in the cache is stale and the transaction that made
  480. * it stale has not yet committed. i.e. we are
  481. * reallocating a busy extent. Skip this buffer and
  482. * continue searching to the right for an exact match.
  483. */
  484. if (bp->b_length != numblks) {
  485. ASSERT(bp->b_flags & XBF_STALE);
  486. rbp = &(*rbp)->rb_right;
  487. continue;
  488. }
  489. atomic_inc(&bp->b_hold);
  490. goto found;
  491. }
  492. }
  493. /* No match found */
  494. if (new_bp) {
  495. rb_link_node(&new_bp->b_rbnode, parent, rbp);
  496. rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
  497. /* the buffer keeps the perag reference until it is freed */
  498. new_bp->b_pag = pag;
  499. spin_unlock(&pag->pag_buf_lock);
  500. } else {
  501. XFS_STATS_INC(xb_miss_locked);
  502. spin_unlock(&pag->pag_buf_lock);
  503. xfs_perag_put(pag);
  504. }
  505. return new_bp;
  506. found:
  507. spin_unlock(&pag->pag_buf_lock);
  508. xfs_perag_put(pag);
  509. if (!xfs_buf_trylock(bp)) {
  510. if (flags & XBF_TRYLOCK) {
  511. xfs_buf_rele(bp);
  512. XFS_STATS_INC(xb_busy_locked);
  513. return NULL;
  514. }
  515. xfs_buf_lock(bp);
  516. XFS_STATS_INC(xb_get_locked_waited);
  517. }
  518. /*
  519. * if the buffer is stale, clear all the external state associated with
  520. * it. We need to keep flags such as how we allocated the buffer memory
  521. * intact here.
  522. */
  523. if (bp->b_flags & XBF_STALE) {
  524. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  525. ASSERT(bp->b_iodone == NULL);
  526. bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
  527. bp->b_ops = NULL;
  528. }
  529. trace_xfs_buf_find(bp, flags, _RET_IP_);
  530. XFS_STATS_INC(xb_get_locked);
  531. return bp;
  532. }
  533. /*
  534. * Assembles a buffer covering the specified range. The code is optimised for
  535. * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  536. * more hits than misses.
  537. */
  538. struct xfs_buf *
  539. xfs_buf_get_map(
  540. struct xfs_buftarg *target,
  541. struct xfs_buf_map *map,
  542. int nmaps,
  543. xfs_buf_flags_t flags)
  544. {
  545. struct xfs_buf *bp;
  546. struct xfs_buf *new_bp;
  547. int error = 0;
  548. bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
  549. if (likely(bp))
  550. goto found;
  551. new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
  552. if (unlikely(!new_bp))
  553. return NULL;
  554. error = xfs_buf_allocate_memory(new_bp, flags);
  555. if (error) {
  556. xfs_buf_free(new_bp);
  557. return NULL;
  558. }
  559. bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
  560. if (!bp) {
  561. xfs_buf_free(new_bp);
  562. return NULL;
  563. }
  564. if (bp != new_bp)
  565. xfs_buf_free(new_bp);
  566. found:
  567. if (!bp->b_addr) {
  568. error = _xfs_buf_map_pages(bp, flags);
  569. if (unlikely(error)) {
  570. xfs_warn(target->bt_mount,
  571. "%s: failed to map pages\n", __func__);
  572. xfs_buf_relse(bp);
  573. return NULL;
  574. }
  575. }
  576. XFS_STATS_INC(xb_get);
  577. trace_xfs_buf_get(bp, flags, _RET_IP_);
  578. return bp;
  579. }
  580. STATIC int
  581. _xfs_buf_read(
  582. xfs_buf_t *bp,
  583. xfs_buf_flags_t flags)
  584. {
  585. ASSERT(!(flags & XBF_WRITE));
  586. ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
  587. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
  588. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
  589. xfs_buf_iorequest(bp);
  590. if (flags & XBF_ASYNC)
  591. return 0;
  592. return xfs_buf_iowait(bp);
  593. }
  594. xfs_buf_t *
  595. xfs_buf_read_map(
  596. struct xfs_buftarg *target,
  597. struct xfs_buf_map *map,
  598. int nmaps,
  599. xfs_buf_flags_t flags,
  600. const struct xfs_buf_ops *ops)
  601. {
  602. struct xfs_buf *bp;
  603. flags |= XBF_READ;
  604. bp = xfs_buf_get_map(target, map, nmaps, flags);
  605. if (bp) {
  606. trace_xfs_buf_read(bp, flags, _RET_IP_);
  607. if (!XFS_BUF_ISDONE(bp)) {
  608. XFS_STATS_INC(xb_get_read);
  609. bp->b_ops = ops;
  610. _xfs_buf_read(bp, flags);
  611. } else if (flags & XBF_ASYNC) {
  612. /*
  613. * Read ahead call which is already satisfied,
  614. * drop the buffer
  615. */
  616. xfs_buf_relse(bp);
  617. return NULL;
  618. } else {
  619. /* We do not want read in the flags */
  620. bp->b_flags &= ~XBF_READ;
  621. }
  622. }
  623. return bp;
  624. }
  625. /*
  626. * If we are not low on memory then do the readahead in a deadlock
  627. * safe manner.
  628. */
  629. void
  630. xfs_buf_readahead_map(
  631. struct xfs_buftarg *target,
  632. struct xfs_buf_map *map,
  633. int nmaps,
  634. const struct xfs_buf_ops *ops)
  635. {
  636. if (bdi_read_congested(target->bt_bdi))
  637. return;
  638. xfs_buf_read_map(target, map, nmaps,
  639. XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
  640. }
  641. /*
  642. * Read an uncached buffer from disk. Allocates and returns a locked
  643. * buffer containing the disk contents or nothing.
  644. */
  645. struct xfs_buf *
  646. xfs_buf_read_uncached(
  647. struct xfs_buftarg *target,
  648. xfs_daddr_t daddr,
  649. size_t numblks,
  650. int flags,
  651. const struct xfs_buf_ops *ops)
  652. {
  653. struct xfs_buf *bp;
  654. bp = xfs_buf_get_uncached(target, numblks, flags);
  655. if (!bp)
  656. return NULL;
  657. /* set up the buffer for a read IO */
  658. ASSERT(bp->b_map_count == 1);
  659. bp->b_bn = daddr;
  660. bp->b_maps[0].bm_bn = daddr;
  661. bp->b_flags |= XBF_READ;
  662. bp->b_ops = ops;
  663. xfsbdstrat(target->bt_mount, bp);
  664. xfs_buf_iowait(bp);
  665. return bp;
  666. }
  667. /*
  668. * Return a buffer allocated as an empty buffer and associated to external
  669. * memory via xfs_buf_associate_memory() back to it's empty state.
  670. */
  671. void
  672. xfs_buf_set_empty(
  673. struct xfs_buf *bp,
  674. size_t numblks)
  675. {
  676. if (bp->b_pages)
  677. _xfs_buf_free_pages(bp);
  678. bp->b_pages = NULL;
  679. bp->b_page_count = 0;
  680. bp->b_addr = NULL;
  681. bp->b_length = numblks;
  682. bp->b_io_length = numblks;
  683. ASSERT(bp->b_map_count == 1);
  684. bp->b_bn = XFS_BUF_DADDR_NULL;
  685. bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
  686. bp->b_maps[0].bm_len = bp->b_length;
  687. }
  688. static inline struct page *
  689. mem_to_page(
  690. void *addr)
  691. {
  692. if ((!is_vmalloc_addr(addr))) {
  693. return virt_to_page(addr);
  694. } else {
  695. return vmalloc_to_page(addr);
  696. }
  697. }
  698. int
  699. xfs_buf_associate_memory(
  700. xfs_buf_t *bp,
  701. void *mem,
  702. size_t len)
  703. {
  704. int rval;
  705. int i = 0;
  706. unsigned long pageaddr;
  707. unsigned long offset;
  708. size_t buflen;
  709. int page_count;
  710. pageaddr = (unsigned long)mem & PAGE_MASK;
  711. offset = (unsigned long)mem - pageaddr;
  712. buflen = PAGE_ALIGN(len + offset);
  713. page_count = buflen >> PAGE_SHIFT;
  714. /* Free any previous set of page pointers */
  715. if (bp->b_pages)
  716. _xfs_buf_free_pages(bp);
  717. bp->b_pages = NULL;
  718. bp->b_addr = mem;
  719. rval = _xfs_buf_get_pages(bp, page_count, 0);
  720. if (rval)
  721. return rval;
  722. bp->b_offset = offset;
  723. for (i = 0; i < bp->b_page_count; i++) {
  724. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  725. pageaddr += PAGE_SIZE;
  726. }
  727. bp->b_io_length = BTOBB(len);
  728. bp->b_length = BTOBB(buflen);
  729. return 0;
  730. }
  731. xfs_buf_t *
  732. xfs_buf_get_uncached(
  733. struct xfs_buftarg *target,
  734. size_t numblks,
  735. int flags)
  736. {
  737. unsigned long page_count;
  738. int error, i;
  739. struct xfs_buf *bp;
  740. DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
  741. bp = _xfs_buf_alloc(target, &map, 1, 0);
  742. if (unlikely(bp == NULL))
  743. goto fail;
  744. page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
  745. error = _xfs_buf_get_pages(bp, page_count, 0);
  746. if (error)
  747. goto fail_free_buf;
  748. for (i = 0; i < page_count; i++) {
  749. bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
  750. if (!bp->b_pages[i])
  751. goto fail_free_mem;
  752. }
  753. bp->b_flags |= _XBF_PAGES;
  754. error = _xfs_buf_map_pages(bp, 0);
  755. if (unlikely(error)) {
  756. xfs_warn(target->bt_mount,
  757. "%s: failed to map pages\n", __func__);
  758. goto fail_free_mem;
  759. }
  760. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  761. return bp;
  762. fail_free_mem:
  763. while (--i >= 0)
  764. __free_page(bp->b_pages[i]);
  765. _xfs_buf_free_pages(bp);
  766. fail_free_buf:
  767. xfs_buf_free_maps(bp);
  768. kmem_zone_free(xfs_buf_zone, bp);
  769. fail:
  770. return NULL;
  771. }
  772. /*
  773. * Increment reference count on buffer, to hold the buffer concurrently
  774. * with another thread which may release (free) the buffer asynchronously.
  775. * Must hold the buffer already to call this function.
  776. */
  777. void
  778. xfs_buf_hold(
  779. xfs_buf_t *bp)
  780. {
  781. trace_xfs_buf_hold(bp, _RET_IP_);
  782. atomic_inc(&bp->b_hold);
  783. }
  784. /*
  785. * Releases a hold on the specified buffer. If the
  786. * the hold count is 1, calls xfs_buf_free.
  787. */
  788. void
  789. xfs_buf_rele(
  790. xfs_buf_t *bp)
  791. {
  792. struct xfs_perag *pag = bp->b_pag;
  793. trace_xfs_buf_rele(bp, _RET_IP_);
  794. if (!pag) {
  795. ASSERT(list_empty(&bp->b_lru));
  796. ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
  797. if (atomic_dec_and_test(&bp->b_hold))
  798. xfs_buf_free(bp);
  799. return;
  800. }
  801. ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
  802. ASSERT(atomic_read(&bp->b_hold) > 0);
  803. if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
  804. if (!(bp->b_flags & XBF_STALE) &&
  805. atomic_read(&bp->b_lru_ref)) {
  806. xfs_buf_lru_add(bp);
  807. spin_unlock(&pag->pag_buf_lock);
  808. } else {
  809. xfs_buf_lru_del(bp);
  810. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  811. rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
  812. spin_unlock(&pag->pag_buf_lock);
  813. xfs_perag_put(pag);
  814. xfs_buf_free(bp);
  815. }
  816. }
  817. }
  818. /*
  819. * Lock a buffer object, if it is not already locked.
  820. *
  821. * If we come across a stale, pinned, locked buffer, we know that we are
  822. * being asked to lock a buffer that has been reallocated. Because it is
  823. * pinned, we know that the log has not been pushed to disk and hence it
  824. * will still be locked. Rather than continuing to have trylock attempts
  825. * fail until someone else pushes the log, push it ourselves before
  826. * returning. This means that the xfsaild will not get stuck trying
  827. * to push on stale inode buffers.
  828. */
  829. int
  830. xfs_buf_trylock(
  831. struct xfs_buf *bp)
  832. {
  833. int locked;
  834. locked = down_trylock(&bp->b_sema) == 0;
  835. if (locked)
  836. XB_SET_OWNER(bp);
  837. trace_xfs_buf_trylock(bp, _RET_IP_);
  838. return locked;
  839. }
  840. /*
  841. * Lock a buffer object.
  842. *
  843. * If we come across a stale, pinned, locked buffer, we know that we
  844. * are being asked to lock a buffer that has been reallocated. Because
  845. * it is pinned, we know that the log has not been pushed to disk and
  846. * hence it will still be locked. Rather than sleeping until someone
  847. * else pushes the log, push it ourselves before trying to get the lock.
  848. */
  849. void
  850. xfs_buf_lock(
  851. struct xfs_buf *bp)
  852. {
  853. trace_xfs_buf_lock(bp, _RET_IP_);
  854. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  855. xfs_log_force(bp->b_target->bt_mount, 0);
  856. down(&bp->b_sema);
  857. XB_SET_OWNER(bp);
  858. trace_xfs_buf_lock_done(bp, _RET_IP_);
  859. }
  860. void
  861. xfs_buf_unlock(
  862. struct xfs_buf *bp)
  863. {
  864. XB_CLEAR_OWNER(bp);
  865. up(&bp->b_sema);
  866. trace_xfs_buf_unlock(bp, _RET_IP_);
  867. }
  868. STATIC void
  869. xfs_buf_wait_unpin(
  870. xfs_buf_t *bp)
  871. {
  872. DECLARE_WAITQUEUE (wait, current);
  873. if (atomic_read(&bp->b_pin_count) == 0)
  874. return;
  875. add_wait_queue(&bp->b_waiters, &wait);
  876. for (;;) {
  877. set_current_state(TASK_UNINTERRUPTIBLE);
  878. if (atomic_read(&bp->b_pin_count) == 0)
  879. break;
  880. io_schedule();
  881. }
  882. remove_wait_queue(&bp->b_waiters, &wait);
  883. set_current_state(TASK_RUNNING);
  884. }
  885. /*
  886. * Buffer Utility Routines
  887. */
  888. STATIC void
  889. xfs_buf_iodone_work(
  890. struct work_struct *work)
  891. {
  892. struct xfs_buf *bp =
  893. container_of(work, xfs_buf_t, b_iodone_work);
  894. bool read = !!(bp->b_flags & XBF_READ);
  895. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  896. /* only validate buffers that were read without errors */
  897. if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
  898. bp->b_ops->verify_read(bp);
  899. if (bp->b_iodone)
  900. (*(bp->b_iodone))(bp);
  901. else if (bp->b_flags & XBF_ASYNC)
  902. xfs_buf_relse(bp);
  903. else {
  904. ASSERT(read && bp->b_ops);
  905. complete(&bp->b_iowait);
  906. }
  907. }
  908. void
  909. xfs_buf_ioend(
  910. struct xfs_buf *bp,
  911. int schedule)
  912. {
  913. bool read = !!(bp->b_flags & XBF_READ);
  914. trace_xfs_buf_iodone(bp, _RET_IP_);
  915. if (bp->b_error == 0)
  916. bp->b_flags |= XBF_DONE;
  917. if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
  918. if (schedule) {
  919. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  920. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  921. } else {
  922. xfs_buf_iodone_work(&bp->b_iodone_work);
  923. }
  924. } else {
  925. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  926. complete(&bp->b_iowait);
  927. }
  928. }
  929. void
  930. xfs_buf_ioerror(
  931. xfs_buf_t *bp,
  932. int error)
  933. {
  934. ASSERT(error >= 0 && error <= 0xffff);
  935. bp->b_error = (unsigned short)error;
  936. trace_xfs_buf_ioerror(bp, error, _RET_IP_);
  937. }
  938. void
  939. xfs_buf_ioerror_alert(
  940. struct xfs_buf *bp,
  941. const char *func)
  942. {
  943. xfs_alert(bp->b_target->bt_mount,
  944. "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
  945. (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
  946. }
  947. /*
  948. * Called when we want to stop a buffer from getting written or read.
  949. * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
  950. * so that the proper iodone callbacks get called.
  951. */
  952. STATIC int
  953. xfs_bioerror(
  954. xfs_buf_t *bp)
  955. {
  956. #ifdef XFSERRORDEBUG
  957. ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
  958. #endif
  959. /*
  960. * No need to wait until the buffer is unpinned, we aren't flushing it.
  961. */
  962. xfs_buf_ioerror(bp, EIO);
  963. /*
  964. * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
  965. */
  966. XFS_BUF_UNREAD(bp);
  967. XFS_BUF_UNDONE(bp);
  968. xfs_buf_stale(bp);
  969. xfs_buf_ioend(bp, 0);
  970. return EIO;
  971. }
  972. /*
  973. * Same as xfs_bioerror, except that we are releasing the buffer
  974. * here ourselves, and avoiding the xfs_buf_ioend call.
  975. * This is meant for userdata errors; metadata bufs come with
  976. * iodone functions attached, so that we can track down errors.
  977. */
  978. STATIC int
  979. xfs_bioerror_relse(
  980. struct xfs_buf *bp)
  981. {
  982. int64_t fl = bp->b_flags;
  983. /*
  984. * No need to wait until the buffer is unpinned.
  985. * We aren't flushing it.
  986. *
  987. * chunkhold expects B_DONE to be set, whether
  988. * we actually finish the I/O or not. We don't want to
  989. * change that interface.
  990. */
  991. XFS_BUF_UNREAD(bp);
  992. XFS_BUF_DONE(bp);
  993. xfs_buf_stale(bp);
  994. bp->b_iodone = NULL;
  995. if (!(fl & XBF_ASYNC)) {
  996. /*
  997. * Mark b_error and B_ERROR _both_.
  998. * Lot's of chunkcache code assumes that.
  999. * There's no reason to mark error for
  1000. * ASYNC buffers.
  1001. */
  1002. xfs_buf_ioerror(bp, EIO);
  1003. complete(&bp->b_iowait);
  1004. } else {
  1005. xfs_buf_relse(bp);
  1006. }
  1007. return EIO;
  1008. }
  1009. STATIC int
  1010. xfs_bdstrat_cb(
  1011. struct xfs_buf *bp)
  1012. {
  1013. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  1014. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  1015. /*
  1016. * Metadata write that didn't get logged but
  1017. * written delayed anyway. These aren't associated
  1018. * with a transaction, and can be ignored.
  1019. */
  1020. if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
  1021. return xfs_bioerror_relse(bp);
  1022. else
  1023. return xfs_bioerror(bp);
  1024. }
  1025. xfs_buf_iorequest(bp);
  1026. return 0;
  1027. }
  1028. int
  1029. xfs_bwrite(
  1030. struct xfs_buf *bp)
  1031. {
  1032. int error;
  1033. ASSERT(xfs_buf_islocked(bp));
  1034. bp->b_flags |= XBF_WRITE;
  1035. bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
  1036. xfs_bdstrat_cb(bp);
  1037. error = xfs_buf_iowait(bp);
  1038. if (error) {
  1039. xfs_force_shutdown(bp->b_target->bt_mount,
  1040. SHUTDOWN_META_IO_ERROR);
  1041. }
  1042. return error;
  1043. }
  1044. /*
  1045. * Wrapper around bdstrat so that we can stop data from going to disk in case
  1046. * we are shutting down the filesystem. Typically user data goes thru this
  1047. * path; one of the exceptions is the superblock.
  1048. */
  1049. void
  1050. xfsbdstrat(
  1051. struct xfs_mount *mp,
  1052. struct xfs_buf *bp)
  1053. {
  1054. if (XFS_FORCED_SHUTDOWN(mp)) {
  1055. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  1056. xfs_bioerror_relse(bp);
  1057. return;
  1058. }
  1059. xfs_buf_iorequest(bp);
  1060. }
  1061. STATIC void
  1062. _xfs_buf_ioend(
  1063. xfs_buf_t *bp,
  1064. int schedule)
  1065. {
  1066. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1067. xfs_buf_ioend(bp, schedule);
  1068. }
  1069. STATIC void
  1070. xfs_buf_bio_end_io(
  1071. struct bio *bio,
  1072. int error)
  1073. {
  1074. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  1075. /*
  1076. * don't overwrite existing errors - otherwise we can lose errors on
  1077. * buffers that require multiple bios to complete.
  1078. */
  1079. if (!bp->b_error)
  1080. xfs_buf_ioerror(bp, -error);
  1081. if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  1082. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  1083. _xfs_buf_ioend(bp, 1);
  1084. bio_put(bio);
  1085. }
  1086. static void
  1087. xfs_buf_ioapply_map(
  1088. struct xfs_buf *bp,
  1089. int map,
  1090. int *buf_offset,
  1091. int *count,
  1092. int rw)
  1093. {
  1094. int page_index;
  1095. int total_nr_pages = bp->b_page_count;
  1096. int nr_pages;
  1097. struct bio *bio;
  1098. sector_t sector = bp->b_maps[map].bm_bn;
  1099. int size;
  1100. int offset;
  1101. total_nr_pages = bp->b_page_count;
  1102. /* skip the pages in the buffer before the start offset */
  1103. page_index = 0;
  1104. offset = *buf_offset;
  1105. while (offset >= PAGE_SIZE) {
  1106. page_index++;
  1107. offset -= PAGE_SIZE;
  1108. }
  1109. /*
  1110. * Limit the IO size to the length of the current vector, and update the
  1111. * remaining IO count for the next time around.
  1112. */
  1113. size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
  1114. *count -= size;
  1115. *buf_offset += size;
  1116. next_chunk:
  1117. atomic_inc(&bp->b_io_remaining);
  1118. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1119. if (nr_pages > total_nr_pages)
  1120. nr_pages = total_nr_pages;
  1121. bio = bio_alloc(GFP_NOIO, nr_pages);
  1122. bio->bi_bdev = bp->b_target->bt_bdev;
  1123. bio->bi_sector = sector;
  1124. bio->bi_end_io = xfs_buf_bio_end_io;
  1125. bio->bi_private = bp;
  1126. for (; size && nr_pages; nr_pages--, page_index++) {
  1127. int rbytes, nbytes = PAGE_SIZE - offset;
  1128. if (nbytes > size)
  1129. nbytes = size;
  1130. rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
  1131. offset);
  1132. if (rbytes < nbytes)
  1133. break;
  1134. offset = 0;
  1135. sector += BTOBB(nbytes);
  1136. size -= nbytes;
  1137. total_nr_pages--;
  1138. }
  1139. if (likely(bio->bi_size)) {
  1140. if (xfs_buf_is_vmapped(bp)) {
  1141. flush_kernel_vmap_range(bp->b_addr,
  1142. xfs_buf_vmap_len(bp));
  1143. }
  1144. submit_bio(rw, bio);
  1145. if (size)
  1146. goto next_chunk;
  1147. } else {
  1148. /*
  1149. * This is guaranteed not to be the last io reference count
  1150. * because the caller (xfs_buf_iorequest) holds a count itself.
  1151. */
  1152. atomic_dec(&bp->b_io_remaining);
  1153. xfs_buf_ioerror(bp, EIO);
  1154. bio_put(bio);
  1155. }
  1156. }
  1157. STATIC void
  1158. _xfs_buf_ioapply(
  1159. struct xfs_buf *bp)
  1160. {
  1161. struct blk_plug plug;
  1162. int rw;
  1163. int offset;
  1164. int size;
  1165. int i;
  1166. /*
  1167. * Make sure we capture only current IO errors rather than stale errors
  1168. * left over from previous use of the buffer (e.g. failed readahead).
  1169. */
  1170. bp->b_error = 0;
  1171. if (bp->b_flags & XBF_WRITE) {
  1172. if (bp->b_flags & XBF_SYNCIO)
  1173. rw = WRITE_SYNC;
  1174. else
  1175. rw = WRITE;
  1176. if (bp->b_flags & XBF_FUA)
  1177. rw |= REQ_FUA;
  1178. if (bp->b_flags & XBF_FLUSH)
  1179. rw |= REQ_FLUSH;
  1180. /*
  1181. * Run the write verifier callback function if it exists. If
  1182. * this function fails it will mark the buffer with an error and
  1183. * the IO should not be dispatched.
  1184. */
  1185. if (bp->b_ops) {
  1186. bp->b_ops->verify_write(bp);
  1187. if (bp->b_error) {
  1188. xfs_force_shutdown(bp->b_target->bt_mount,
  1189. SHUTDOWN_CORRUPT_INCORE);
  1190. return;
  1191. }
  1192. }
  1193. } else if (bp->b_flags & XBF_READ_AHEAD) {
  1194. rw = READA;
  1195. } else {
  1196. rw = READ;
  1197. }
  1198. /* we only use the buffer cache for meta-data */
  1199. rw |= REQ_META;
  1200. /*
  1201. * Walk all the vectors issuing IO on them. Set up the initial offset
  1202. * into the buffer and the desired IO size before we start -
  1203. * _xfs_buf_ioapply_vec() will modify them appropriately for each
  1204. * subsequent call.
  1205. */
  1206. offset = bp->b_offset;
  1207. size = BBTOB(bp->b_io_length);
  1208. blk_start_plug(&plug);
  1209. for (i = 0; i < bp->b_map_count; i++) {
  1210. xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
  1211. if (bp->b_error)
  1212. break;
  1213. if (size <= 0)
  1214. break; /* all done */
  1215. }
  1216. blk_finish_plug(&plug);
  1217. }
  1218. void
  1219. xfs_buf_iorequest(
  1220. xfs_buf_t *bp)
  1221. {
  1222. trace_xfs_buf_iorequest(bp, _RET_IP_);
  1223. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  1224. if (bp->b_flags & XBF_WRITE)
  1225. xfs_buf_wait_unpin(bp);
  1226. xfs_buf_hold(bp);
  1227. /* Set the count to 1 initially, this will stop an I/O
  1228. * completion callout which happens before we have started
  1229. * all the I/O from calling xfs_buf_ioend too early.
  1230. */
  1231. atomic_set(&bp->b_io_remaining, 1);
  1232. _xfs_buf_ioapply(bp);
  1233. _xfs_buf_ioend(bp, 1);
  1234. xfs_buf_rele(bp);
  1235. }
  1236. /*
  1237. * Waits for I/O to complete on the buffer supplied. It returns immediately if
  1238. * no I/O is pending or there is already a pending error on the buffer. It
  1239. * returns the I/O error code, if any, or 0 if there was no error.
  1240. */
  1241. int
  1242. xfs_buf_iowait(
  1243. xfs_buf_t *bp)
  1244. {
  1245. trace_xfs_buf_iowait(bp, _RET_IP_);
  1246. if (!bp->b_error)
  1247. wait_for_completion(&bp->b_iowait);
  1248. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1249. return bp->b_error;
  1250. }
  1251. xfs_caddr_t
  1252. xfs_buf_offset(
  1253. xfs_buf_t *bp,
  1254. size_t offset)
  1255. {
  1256. struct page *page;
  1257. if (bp->b_addr)
  1258. return bp->b_addr + offset;
  1259. offset += bp->b_offset;
  1260. page = bp->b_pages[offset >> PAGE_SHIFT];
  1261. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
  1262. }
  1263. /*
  1264. * Move data into or out of a buffer.
  1265. */
  1266. void
  1267. xfs_buf_iomove(
  1268. xfs_buf_t *bp, /* buffer to process */
  1269. size_t boff, /* starting buffer offset */
  1270. size_t bsize, /* length to copy */
  1271. void *data, /* data address */
  1272. xfs_buf_rw_t mode) /* read/write/zero flag */
  1273. {
  1274. size_t bend;
  1275. bend = boff + bsize;
  1276. while (boff < bend) {
  1277. struct page *page;
  1278. int page_index, page_offset, csize;
  1279. page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
  1280. page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
  1281. page = bp->b_pages[page_index];
  1282. csize = min_t(size_t, PAGE_SIZE - page_offset,
  1283. BBTOB(bp->b_io_length) - boff);
  1284. ASSERT((csize + page_offset) <= PAGE_SIZE);
  1285. switch (mode) {
  1286. case XBRW_ZERO:
  1287. memset(page_address(page) + page_offset, 0, csize);
  1288. break;
  1289. case XBRW_READ:
  1290. memcpy(data, page_address(page) + page_offset, csize);
  1291. break;
  1292. case XBRW_WRITE:
  1293. memcpy(page_address(page) + page_offset, data, csize);
  1294. }
  1295. boff += csize;
  1296. data += csize;
  1297. }
  1298. }
  1299. /*
  1300. * Handling of buffer targets (buftargs).
  1301. */
  1302. /*
  1303. * Wait for any bufs with callbacks that have been submitted but have not yet
  1304. * returned. These buffers will have an elevated hold count, so wait on those
  1305. * while freeing all the buffers only held by the LRU.
  1306. */
  1307. void
  1308. xfs_wait_buftarg(
  1309. struct xfs_buftarg *btp)
  1310. {
  1311. struct xfs_buf *bp;
  1312. restart:
  1313. spin_lock(&btp->bt_lru_lock);
  1314. while (!list_empty(&btp->bt_lru)) {
  1315. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1316. if (atomic_read(&bp->b_hold) > 1) {
  1317. trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
  1318. list_move_tail(&bp->b_lru, &btp->bt_lru);
  1319. spin_unlock(&btp->bt_lru_lock);
  1320. delay(100);
  1321. goto restart;
  1322. }
  1323. /*
  1324. * clear the LRU reference count so the buffer doesn't get
  1325. * ignored in xfs_buf_rele().
  1326. */
  1327. atomic_set(&bp->b_lru_ref, 0);
  1328. spin_unlock(&btp->bt_lru_lock);
  1329. xfs_buf_rele(bp);
  1330. spin_lock(&btp->bt_lru_lock);
  1331. }
  1332. spin_unlock(&btp->bt_lru_lock);
  1333. }
  1334. int
  1335. xfs_buftarg_shrink(
  1336. struct shrinker *shrink,
  1337. struct shrink_control *sc)
  1338. {
  1339. struct xfs_buftarg *btp = container_of(shrink,
  1340. struct xfs_buftarg, bt_shrinker);
  1341. struct xfs_buf *bp;
  1342. int nr_to_scan = sc->nr_to_scan;
  1343. LIST_HEAD(dispose);
  1344. if (!nr_to_scan)
  1345. return btp->bt_lru_nr;
  1346. spin_lock(&btp->bt_lru_lock);
  1347. while (!list_empty(&btp->bt_lru)) {
  1348. if (nr_to_scan-- <= 0)
  1349. break;
  1350. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1351. /*
  1352. * Decrement the b_lru_ref count unless the value is already
  1353. * zero. If the value is already zero, we need to reclaim the
  1354. * buffer, otherwise it gets another trip through the LRU.
  1355. */
  1356. if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1357. list_move_tail(&bp->b_lru, &btp->bt_lru);
  1358. continue;
  1359. }
  1360. /*
  1361. * remove the buffer from the LRU now to avoid needing another
  1362. * lock round trip inside xfs_buf_rele().
  1363. */
  1364. list_move(&bp->b_lru, &dispose);
  1365. btp->bt_lru_nr--;
  1366. bp->b_lru_flags |= _XBF_LRU_DISPOSE;
  1367. }
  1368. spin_unlock(&btp->bt_lru_lock);
  1369. while (!list_empty(&dispose)) {
  1370. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1371. list_del_init(&bp->b_lru);
  1372. xfs_buf_rele(bp);
  1373. }
  1374. return btp->bt_lru_nr;
  1375. }
  1376. void
  1377. xfs_free_buftarg(
  1378. struct xfs_mount *mp,
  1379. struct xfs_buftarg *btp)
  1380. {
  1381. unregister_shrinker(&btp->bt_shrinker);
  1382. if (mp->m_flags & XFS_MOUNT_BARRIER)
  1383. xfs_blkdev_issue_flush(btp);
  1384. kmem_free(btp);
  1385. }
  1386. STATIC int
  1387. xfs_setsize_buftarg_flags(
  1388. xfs_buftarg_t *btp,
  1389. unsigned int blocksize,
  1390. unsigned int sectorsize,
  1391. int verbose)
  1392. {
  1393. btp->bt_bsize = blocksize;
  1394. btp->bt_sshift = ffs(sectorsize) - 1;
  1395. btp->bt_smask = sectorsize - 1;
  1396. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1397. char name[BDEVNAME_SIZE];
  1398. bdevname(btp->bt_bdev, name);
  1399. xfs_warn(btp->bt_mount,
  1400. "Cannot set_blocksize to %u on device %s\n",
  1401. sectorsize, name);
  1402. return EINVAL;
  1403. }
  1404. return 0;
  1405. }
  1406. /*
  1407. * When allocating the initial buffer target we have not yet
  1408. * read in the superblock, so don't know what sized sectors
  1409. * are being used is at this early stage. Play safe.
  1410. */
  1411. STATIC int
  1412. xfs_setsize_buftarg_early(
  1413. xfs_buftarg_t *btp,
  1414. struct block_device *bdev)
  1415. {
  1416. return xfs_setsize_buftarg_flags(btp,
  1417. PAGE_SIZE, bdev_logical_block_size(bdev), 0);
  1418. }
  1419. int
  1420. xfs_setsize_buftarg(
  1421. xfs_buftarg_t *btp,
  1422. unsigned int blocksize,
  1423. unsigned int sectorsize)
  1424. {
  1425. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1426. }
  1427. xfs_buftarg_t *
  1428. xfs_alloc_buftarg(
  1429. struct xfs_mount *mp,
  1430. struct block_device *bdev,
  1431. int external,
  1432. const char *fsname)
  1433. {
  1434. xfs_buftarg_t *btp;
  1435. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
  1436. btp->bt_mount = mp;
  1437. btp->bt_dev = bdev->bd_dev;
  1438. btp->bt_bdev = bdev;
  1439. btp->bt_bdi = blk_get_backing_dev_info(bdev);
  1440. if (!btp->bt_bdi)
  1441. goto error;
  1442. INIT_LIST_HEAD(&btp->bt_lru);
  1443. spin_lock_init(&btp->bt_lru_lock);
  1444. if (xfs_setsize_buftarg_early(btp, bdev))
  1445. goto error;
  1446. btp->bt_shrinker.shrink = xfs_buftarg_shrink;
  1447. btp->bt_shrinker.seeks = DEFAULT_SEEKS;
  1448. register_shrinker(&btp->bt_shrinker);
  1449. return btp;
  1450. error:
  1451. kmem_free(btp);
  1452. return NULL;
  1453. }
  1454. /*
  1455. * Add a buffer to the delayed write list.
  1456. *
  1457. * This queues a buffer for writeout if it hasn't already been. Note that
  1458. * neither this routine nor the buffer list submission functions perform
  1459. * any internal synchronization. It is expected that the lists are thread-local
  1460. * to the callers.
  1461. *
  1462. * Returns true if we queued up the buffer, or false if it already had
  1463. * been on the buffer list.
  1464. */
  1465. bool
  1466. xfs_buf_delwri_queue(
  1467. struct xfs_buf *bp,
  1468. struct list_head *list)
  1469. {
  1470. ASSERT(xfs_buf_islocked(bp));
  1471. ASSERT(!(bp->b_flags & XBF_READ));
  1472. /*
  1473. * If the buffer is already marked delwri it already is queued up
  1474. * by someone else for imediate writeout. Just ignore it in that
  1475. * case.
  1476. */
  1477. if (bp->b_flags & _XBF_DELWRI_Q) {
  1478. trace_xfs_buf_delwri_queued(bp, _RET_IP_);
  1479. return false;
  1480. }
  1481. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1482. /*
  1483. * If a buffer gets written out synchronously or marked stale while it
  1484. * is on a delwri list we lazily remove it. To do this, the other party
  1485. * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
  1486. * It remains referenced and on the list. In a rare corner case it
  1487. * might get readded to a delwri list after the synchronous writeout, in
  1488. * which case we need just need to re-add the flag here.
  1489. */
  1490. bp->b_flags |= _XBF_DELWRI_Q;
  1491. if (list_empty(&bp->b_list)) {
  1492. atomic_inc(&bp->b_hold);
  1493. list_add_tail(&bp->b_list, list);
  1494. }
  1495. return true;
  1496. }
  1497. /*
  1498. * Compare function is more complex than it needs to be because
  1499. * the return value is only 32 bits and we are doing comparisons
  1500. * on 64 bit values
  1501. */
  1502. static int
  1503. xfs_buf_cmp(
  1504. void *priv,
  1505. struct list_head *a,
  1506. struct list_head *b)
  1507. {
  1508. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1509. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1510. xfs_daddr_t diff;
  1511. diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
  1512. if (diff < 0)
  1513. return -1;
  1514. if (diff > 0)
  1515. return 1;
  1516. return 0;
  1517. }
  1518. static int
  1519. __xfs_buf_delwri_submit(
  1520. struct list_head *buffer_list,
  1521. struct list_head *io_list,
  1522. bool wait)
  1523. {
  1524. struct blk_plug plug;
  1525. struct xfs_buf *bp, *n;
  1526. int pinned = 0;
  1527. list_for_each_entry_safe(bp, n, buffer_list, b_list) {
  1528. if (!wait) {
  1529. if (xfs_buf_ispinned(bp)) {
  1530. pinned++;
  1531. continue;
  1532. }
  1533. if (!xfs_buf_trylock(bp))
  1534. continue;
  1535. } else {
  1536. xfs_buf_lock(bp);
  1537. }
  1538. /*
  1539. * Someone else might have written the buffer synchronously or
  1540. * marked it stale in the meantime. In that case only the
  1541. * _XBF_DELWRI_Q flag got cleared, and we have to drop the
  1542. * reference and remove it from the list here.
  1543. */
  1544. if (!(bp->b_flags & _XBF_DELWRI_Q)) {
  1545. list_del_init(&bp->b_list);
  1546. xfs_buf_relse(bp);
  1547. continue;
  1548. }
  1549. list_move_tail(&bp->b_list, io_list);
  1550. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  1551. }
  1552. list_sort(NULL, io_list, xfs_buf_cmp);
  1553. blk_start_plug(&plug);
  1554. list_for_each_entry_safe(bp, n, io_list, b_list) {
  1555. bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
  1556. bp->b_flags |= XBF_WRITE;
  1557. if (!wait) {
  1558. bp->b_flags |= XBF_ASYNC;
  1559. list_del_init(&bp->b_list);
  1560. }
  1561. xfs_bdstrat_cb(bp);
  1562. }
  1563. blk_finish_plug(&plug);
  1564. return pinned;
  1565. }
  1566. /*
  1567. * Write out a buffer list asynchronously.
  1568. *
  1569. * This will take the @buffer_list, write all non-locked and non-pinned buffers
  1570. * out and not wait for I/O completion on any of the buffers. This interface
  1571. * is only safely useable for callers that can track I/O completion by higher
  1572. * level means, e.g. AIL pushing as the @buffer_list is consumed in this
  1573. * function.
  1574. */
  1575. int
  1576. xfs_buf_delwri_submit_nowait(
  1577. struct list_head *buffer_list)
  1578. {
  1579. LIST_HEAD (io_list);
  1580. return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
  1581. }
  1582. /*
  1583. * Write out a buffer list synchronously.
  1584. *
  1585. * This will take the @buffer_list, write all buffers out and wait for I/O
  1586. * completion on all of the buffers. @buffer_list is consumed by the function,
  1587. * so callers must have some other way of tracking buffers if they require such
  1588. * functionality.
  1589. */
  1590. int
  1591. xfs_buf_delwri_submit(
  1592. struct list_head *buffer_list)
  1593. {
  1594. LIST_HEAD (io_list);
  1595. int error = 0, error2;
  1596. struct xfs_buf *bp;
  1597. __xfs_buf_delwri_submit(buffer_list, &io_list, true);
  1598. /* Wait for IO to complete. */
  1599. while (!list_empty(&io_list)) {
  1600. bp = list_first_entry(&io_list, struct xfs_buf, b_list);
  1601. list_del_init(&bp->b_list);
  1602. error2 = xfs_buf_iowait(bp);
  1603. xfs_buf_relse(bp);
  1604. if (!error)
  1605. error = error2;
  1606. }
  1607. return error;
  1608. }
  1609. int __init
  1610. xfs_buf_init(void)
  1611. {
  1612. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1613. KM_ZONE_HWALIGN, NULL);
  1614. if (!xfs_buf_zone)
  1615. goto out;
  1616. xfslogd_workqueue = alloc_workqueue("xfslogd",
  1617. WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
  1618. if (!xfslogd_workqueue)
  1619. goto out_free_buf_zone;
  1620. return 0;
  1621. out_free_buf_zone:
  1622. kmem_zone_destroy(xfs_buf_zone);
  1623. out:
  1624. return -ENOMEM;
  1625. }
  1626. void
  1627. xfs_buf_terminate(void)
  1628. {
  1629. destroy_workqueue(xfslogd_workqueue);
  1630. kmem_zone_destroy(xfs_buf_zone);
  1631. }