xfs_buf.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/gfp.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. #include "xfs_sb.h"
  37. #include "xfs_inum.h"
  38. #include "xfs_log.h"
  39. #include "xfs_ag.h"
  40. #include "xfs_mount.h"
  41. #include "xfs_trace.h"
  42. static kmem_zone_t *xfs_buf_zone;
  43. STATIC int xfsbufd(void *);
  44. STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
  45. static struct workqueue_struct *xfslogd_workqueue;
  46. struct workqueue_struct *xfsdatad_workqueue;
  47. struct workqueue_struct *xfsconvertd_workqueue;
  48. #ifdef XFS_BUF_LOCK_TRACKING
  49. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  50. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  51. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  52. #else
  53. # define XB_SET_OWNER(bp) do { } while (0)
  54. # define XB_CLEAR_OWNER(bp) do { } while (0)
  55. # define XB_GET_OWNER(bp) do { } while (0)
  56. #endif
  57. #define xb_to_gfp(flags) \
  58. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
  59. ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
  60. #define xb_to_km(flags) \
  61. (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
  62. #define xfs_buf_allocate(flags) \
  63. kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
  64. #define xfs_buf_deallocate(bp) \
  65. kmem_zone_free(xfs_buf_zone, (bp));
  66. static inline int
  67. xfs_buf_is_vmapped(
  68. struct xfs_buf *bp)
  69. {
  70. /*
  71. * Return true if the buffer is vmapped.
  72. *
  73. * The XBF_MAPPED flag is set if the buffer should be mapped, but the
  74. * code is clever enough to know it doesn't have to map a single page,
  75. * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
  76. */
  77. return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
  78. }
  79. static inline int
  80. xfs_buf_vmap_len(
  81. struct xfs_buf *bp)
  82. {
  83. return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  84. }
  85. /*
  86. * xfs_buf_lru_add - add a buffer to the LRU.
  87. *
  88. * The LRU takes a new reference to the buffer so that it will only be freed
  89. * once the shrinker takes the buffer off the LRU.
  90. */
  91. STATIC void
  92. xfs_buf_lru_add(
  93. struct xfs_buf *bp)
  94. {
  95. struct xfs_buftarg *btp = bp->b_target;
  96. spin_lock(&btp->bt_lru_lock);
  97. if (list_empty(&bp->b_lru)) {
  98. atomic_inc(&bp->b_hold);
  99. list_add_tail(&bp->b_lru, &btp->bt_lru);
  100. btp->bt_lru_nr++;
  101. }
  102. spin_unlock(&btp->bt_lru_lock);
  103. }
  104. /*
  105. * xfs_buf_lru_del - remove a buffer from the LRU
  106. *
  107. * The unlocked check is safe here because it only occurs when there are not
  108. * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
  109. * to optimise the shrinker removing the buffer from the LRU and calling
  110. * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
  111. * bt_lru_lock.
  112. */
  113. STATIC void
  114. xfs_buf_lru_del(
  115. struct xfs_buf *bp)
  116. {
  117. struct xfs_buftarg *btp = bp->b_target;
  118. if (list_empty(&bp->b_lru))
  119. return;
  120. spin_lock(&btp->bt_lru_lock);
  121. if (!list_empty(&bp->b_lru)) {
  122. list_del_init(&bp->b_lru);
  123. btp->bt_lru_nr--;
  124. }
  125. spin_unlock(&btp->bt_lru_lock);
  126. }
  127. /*
  128. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  129. * b_lru_ref count so that the buffer is freed immediately when the buffer
  130. * reference count falls to zero. If the buffer is already on the LRU, we need
  131. * to remove the reference that LRU holds on the buffer.
  132. *
  133. * This prevents build-up of stale buffers on the LRU.
  134. */
  135. void
  136. xfs_buf_stale(
  137. struct xfs_buf *bp)
  138. {
  139. bp->b_flags |= XBF_STALE;
  140. atomic_set(&(bp)->b_lru_ref, 0);
  141. if (!list_empty(&bp->b_lru)) {
  142. struct xfs_buftarg *btp = bp->b_target;
  143. spin_lock(&btp->bt_lru_lock);
  144. if (!list_empty(&bp->b_lru)) {
  145. list_del_init(&bp->b_lru);
  146. btp->bt_lru_nr--;
  147. atomic_dec(&bp->b_hold);
  148. }
  149. spin_unlock(&btp->bt_lru_lock);
  150. }
  151. ASSERT(atomic_read(&bp->b_hold) >= 1);
  152. }
  153. STATIC void
  154. _xfs_buf_initialize(
  155. xfs_buf_t *bp,
  156. xfs_buftarg_t *target,
  157. xfs_off_t range_base,
  158. size_t range_length,
  159. xfs_buf_flags_t flags)
  160. {
  161. /*
  162. * We don't want certain flags to appear in b_flags.
  163. */
  164. flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
  165. memset(bp, 0, sizeof(xfs_buf_t));
  166. atomic_set(&bp->b_hold, 1);
  167. atomic_set(&bp->b_lru_ref, 1);
  168. init_completion(&bp->b_iowait);
  169. INIT_LIST_HEAD(&bp->b_lru);
  170. INIT_LIST_HEAD(&bp->b_list);
  171. RB_CLEAR_NODE(&bp->b_rbnode);
  172. sema_init(&bp->b_sema, 0); /* held, no waiters */
  173. XB_SET_OWNER(bp);
  174. bp->b_target = target;
  175. bp->b_file_offset = range_base;
  176. /*
  177. * Set buffer_length and count_desired to the same value initially.
  178. * I/O routines should use count_desired, which will be the same in
  179. * most cases but may be reset (e.g. XFS recovery).
  180. */
  181. bp->b_buffer_length = bp->b_count_desired = range_length;
  182. bp->b_flags = flags;
  183. bp->b_bn = XFS_BUF_DADDR_NULL;
  184. atomic_set(&bp->b_pin_count, 0);
  185. init_waitqueue_head(&bp->b_waiters);
  186. XFS_STATS_INC(xb_create);
  187. trace_xfs_buf_init(bp, _RET_IP_);
  188. }
  189. /*
  190. * Allocate a page array capable of holding a specified number
  191. * of pages, and point the page buf at it.
  192. */
  193. STATIC int
  194. _xfs_buf_get_pages(
  195. xfs_buf_t *bp,
  196. int page_count,
  197. xfs_buf_flags_t flags)
  198. {
  199. /* Make sure that we have a page list */
  200. if (bp->b_pages == NULL) {
  201. bp->b_offset = xfs_buf_poff(bp->b_file_offset);
  202. bp->b_page_count = page_count;
  203. if (page_count <= XB_PAGES) {
  204. bp->b_pages = bp->b_page_array;
  205. } else {
  206. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  207. page_count, xb_to_km(flags));
  208. if (bp->b_pages == NULL)
  209. return -ENOMEM;
  210. }
  211. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  212. }
  213. return 0;
  214. }
  215. /*
  216. * Frees b_pages if it was allocated.
  217. */
  218. STATIC void
  219. _xfs_buf_free_pages(
  220. xfs_buf_t *bp)
  221. {
  222. if (bp->b_pages != bp->b_page_array) {
  223. kmem_free(bp->b_pages);
  224. bp->b_pages = NULL;
  225. }
  226. }
  227. /*
  228. * Releases the specified buffer.
  229. *
  230. * The modification state of any associated pages is left unchanged.
  231. * The buffer most not be on any hash - use xfs_buf_rele instead for
  232. * hashed and refcounted buffers
  233. */
  234. void
  235. xfs_buf_free(
  236. xfs_buf_t *bp)
  237. {
  238. trace_xfs_buf_free(bp, _RET_IP_);
  239. ASSERT(list_empty(&bp->b_lru));
  240. if (bp->b_flags & _XBF_PAGES) {
  241. uint i;
  242. if (xfs_buf_is_vmapped(bp))
  243. vm_unmap_ram(bp->b_addr - bp->b_offset,
  244. bp->b_page_count);
  245. for (i = 0; i < bp->b_page_count; i++) {
  246. struct page *page = bp->b_pages[i];
  247. __free_page(page);
  248. }
  249. } else if (bp->b_flags & _XBF_KMEM)
  250. kmem_free(bp->b_addr);
  251. _xfs_buf_free_pages(bp);
  252. xfs_buf_deallocate(bp);
  253. }
  254. /*
  255. * Allocates all the pages for buffer in question and builds it's page list.
  256. */
  257. STATIC int
  258. xfs_buf_allocate_memory(
  259. xfs_buf_t *bp,
  260. uint flags)
  261. {
  262. size_t size = bp->b_count_desired;
  263. size_t nbytes, offset;
  264. gfp_t gfp_mask = xb_to_gfp(flags);
  265. unsigned short page_count, i;
  266. xfs_off_t end;
  267. int error;
  268. /*
  269. * for buffers that are contained within a single page, just allocate
  270. * the memory from the heap - there's no need for the complexity of
  271. * page arrays to keep allocation down to order 0.
  272. */
  273. if (bp->b_buffer_length < PAGE_SIZE) {
  274. bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
  275. if (!bp->b_addr) {
  276. /* low memory - use alloc_page loop instead */
  277. goto use_alloc_page;
  278. }
  279. if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
  280. PAGE_MASK) !=
  281. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  282. /* b_addr spans two pages - use alloc_page instead */
  283. kmem_free(bp->b_addr);
  284. bp->b_addr = NULL;
  285. goto use_alloc_page;
  286. }
  287. bp->b_offset = offset_in_page(bp->b_addr);
  288. bp->b_pages = bp->b_page_array;
  289. bp->b_pages[0] = virt_to_page(bp->b_addr);
  290. bp->b_page_count = 1;
  291. bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
  292. return 0;
  293. }
  294. use_alloc_page:
  295. end = bp->b_file_offset + bp->b_buffer_length;
  296. page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
  297. error = _xfs_buf_get_pages(bp, page_count, flags);
  298. if (unlikely(error))
  299. return error;
  300. offset = bp->b_offset;
  301. bp->b_flags |= _XBF_PAGES;
  302. for (i = 0; i < bp->b_page_count; i++) {
  303. struct page *page;
  304. uint retries = 0;
  305. retry:
  306. page = alloc_page(gfp_mask);
  307. if (unlikely(page == NULL)) {
  308. if (flags & XBF_READ_AHEAD) {
  309. bp->b_page_count = i;
  310. error = ENOMEM;
  311. goto out_free_pages;
  312. }
  313. /*
  314. * This could deadlock.
  315. *
  316. * But until all the XFS lowlevel code is revamped to
  317. * handle buffer allocation failures we can't do much.
  318. */
  319. if (!(++retries % 100))
  320. xfs_err(NULL,
  321. "possible memory allocation deadlock in %s (mode:0x%x)",
  322. __func__, gfp_mask);
  323. XFS_STATS_INC(xb_page_retries);
  324. congestion_wait(BLK_RW_ASYNC, HZ/50);
  325. goto retry;
  326. }
  327. XFS_STATS_INC(xb_page_found);
  328. nbytes = min_t(size_t, size, PAGE_SIZE - offset);
  329. size -= nbytes;
  330. bp->b_pages[i] = page;
  331. offset = 0;
  332. }
  333. return 0;
  334. out_free_pages:
  335. for (i = 0; i < bp->b_page_count; i++)
  336. __free_page(bp->b_pages[i]);
  337. return error;
  338. }
  339. /*
  340. * Map buffer into kernel address-space if necessary.
  341. */
  342. STATIC int
  343. _xfs_buf_map_pages(
  344. xfs_buf_t *bp,
  345. uint flags)
  346. {
  347. ASSERT(bp->b_flags & _XBF_PAGES);
  348. if (bp->b_page_count == 1) {
  349. /* A single page buffer is always mappable */
  350. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  351. bp->b_flags |= XBF_MAPPED;
  352. } else if (flags & XBF_MAPPED) {
  353. int retried = 0;
  354. do {
  355. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  356. -1, PAGE_KERNEL);
  357. if (bp->b_addr)
  358. break;
  359. vm_unmap_aliases();
  360. } while (retried++ <= 1);
  361. if (!bp->b_addr)
  362. return -ENOMEM;
  363. bp->b_addr += bp->b_offset;
  364. bp->b_flags |= XBF_MAPPED;
  365. }
  366. return 0;
  367. }
  368. /*
  369. * Finding and Reading Buffers
  370. */
  371. /*
  372. * Look up, and creates if absent, a lockable buffer for
  373. * a given range of an inode. The buffer is returned
  374. * locked. If other overlapping buffers exist, they are
  375. * released before the new buffer is created and locked,
  376. * which may imply that this call will block until those buffers
  377. * are unlocked. No I/O is implied by this call.
  378. */
  379. xfs_buf_t *
  380. _xfs_buf_find(
  381. xfs_buftarg_t *btp, /* block device target */
  382. xfs_off_t ioff, /* starting offset of range */
  383. size_t isize, /* length of range */
  384. xfs_buf_flags_t flags,
  385. xfs_buf_t *new_bp)
  386. {
  387. xfs_off_t range_base;
  388. size_t range_length;
  389. struct xfs_perag *pag;
  390. struct rb_node **rbp;
  391. struct rb_node *parent;
  392. xfs_buf_t *bp;
  393. range_base = (ioff << BBSHIFT);
  394. range_length = (isize << BBSHIFT);
  395. /* Check for IOs smaller than the sector size / not sector aligned */
  396. ASSERT(!(range_length < (1 << btp->bt_sshift)));
  397. ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
  398. /* get tree root */
  399. pag = xfs_perag_get(btp->bt_mount,
  400. xfs_daddr_to_agno(btp->bt_mount, ioff));
  401. /* walk tree */
  402. spin_lock(&pag->pag_buf_lock);
  403. rbp = &pag->pag_buf_tree.rb_node;
  404. parent = NULL;
  405. bp = NULL;
  406. while (*rbp) {
  407. parent = *rbp;
  408. bp = rb_entry(parent, struct xfs_buf, b_rbnode);
  409. if (range_base < bp->b_file_offset)
  410. rbp = &(*rbp)->rb_left;
  411. else if (range_base > bp->b_file_offset)
  412. rbp = &(*rbp)->rb_right;
  413. else {
  414. /*
  415. * found a block offset match. If the range doesn't
  416. * match, the only way this is allowed is if the buffer
  417. * in the cache is stale and the transaction that made
  418. * it stale has not yet committed. i.e. we are
  419. * reallocating a busy extent. Skip this buffer and
  420. * continue searching to the right for an exact match.
  421. */
  422. if (bp->b_buffer_length != range_length) {
  423. ASSERT(bp->b_flags & XBF_STALE);
  424. rbp = &(*rbp)->rb_right;
  425. continue;
  426. }
  427. atomic_inc(&bp->b_hold);
  428. goto found;
  429. }
  430. }
  431. /* No match found */
  432. if (new_bp) {
  433. _xfs_buf_initialize(new_bp, btp, range_base,
  434. range_length, flags);
  435. rb_link_node(&new_bp->b_rbnode, parent, rbp);
  436. rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
  437. /* the buffer keeps the perag reference until it is freed */
  438. new_bp->b_pag = pag;
  439. spin_unlock(&pag->pag_buf_lock);
  440. } else {
  441. XFS_STATS_INC(xb_miss_locked);
  442. spin_unlock(&pag->pag_buf_lock);
  443. xfs_perag_put(pag);
  444. }
  445. return new_bp;
  446. found:
  447. spin_unlock(&pag->pag_buf_lock);
  448. xfs_perag_put(pag);
  449. if (!xfs_buf_trylock(bp)) {
  450. if (flags & XBF_TRYLOCK) {
  451. xfs_buf_rele(bp);
  452. XFS_STATS_INC(xb_busy_locked);
  453. return NULL;
  454. }
  455. xfs_buf_lock(bp);
  456. XFS_STATS_INC(xb_get_locked_waited);
  457. }
  458. /*
  459. * if the buffer is stale, clear all the external state associated with
  460. * it. We need to keep flags such as how we allocated the buffer memory
  461. * intact here.
  462. */
  463. if (bp->b_flags & XBF_STALE) {
  464. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  465. bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
  466. }
  467. trace_xfs_buf_find(bp, flags, _RET_IP_);
  468. XFS_STATS_INC(xb_get_locked);
  469. return bp;
  470. }
  471. /*
  472. * Assembles a buffer covering the specified range.
  473. * Storage in memory for all portions of the buffer will be allocated,
  474. * although backing storage may not be.
  475. */
  476. xfs_buf_t *
  477. xfs_buf_get(
  478. xfs_buftarg_t *target,/* target for buffer */
  479. xfs_off_t ioff, /* starting offset of range */
  480. size_t isize, /* length of range */
  481. xfs_buf_flags_t flags)
  482. {
  483. xfs_buf_t *bp, *new_bp;
  484. int error = 0;
  485. new_bp = xfs_buf_allocate(flags);
  486. if (unlikely(!new_bp))
  487. return NULL;
  488. bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
  489. if (bp == new_bp) {
  490. error = xfs_buf_allocate_memory(bp, flags);
  491. if (error)
  492. goto no_buffer;
  493. } else {
  494. xfs_buf_deallocate(new_bp);
  495. if (unlikely(bp == NULL))
  496. return NULL;
  497. }
  498. if (!(bp->b_flags & XBF_MAPPED)) {
  499. error = _xfs_buf_map_pages(bp, flags);
  500. if (unlikely(error)) {
  501. xfs_warn(target->bt_mount,
  502. "%s: failed to map pages\n", __func__);
  503. goto no_buffer;
  504. }
  505. }
  506. XFS_STATS_INC(xb_get);
  507. /*
  508. * Always fill in the block number now, the mapped cases can do
  509. * their own overlay of this later.
  510. */
  511. bp->b_bn = ioff;
  512. bp->b_count_desired = bp->b_buffer_length;
  513. trace_xfs_buf_get(bp, flags, _RET_IP_);
  514. return bp;
  515. no_buffer:
  516. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  517. xfs_buf_unlock(bp);
  518. xfs_buf_rele(bp);
  519. return NULL;
  520. }
  521. STATIC int
  522. _xfs_buf_read(
  523. xfs_buf_t *bp,
  524. xfs_buf_flags_t flags)
  525. {
  526. int status;
  527. ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
  528. ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
  529. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
  530. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
  531. status = xfs_buf_iorequest(bp);
  532. if (status || bp->b_error || (flags & XBF_ASYNC))
  533. return status;
  534. return xfs_buf_iowait(bp);
  535. }
  536. xfs_buf_t *
  537. xfs_buf_read(
  538. xfs_buftarg_t *target,
  539. xfs_off_t ioff,
  540. size_t isize,
  541. xfs_buf_flags_t flags)
  542. {
  543. xfs_buf_t *bp;
  544. flags |= XBF_READ;
  545. bp = xfs_buf_get(target, ioff, isize, flags);
  546. if (bp) {
  547. trace_xfs_buf_read(bp, flags, _RET_IP_);
  548. if (!XFS_BUF_ISDONE(bp)) {
  549. XFS_STATS_INC(xb_get_read);
  550. _xfs_buf_read(bp, flags);
  551. } else if (flags & XBF_ASYNC) {
  552. /*
  553. * Read ahead call which is already satisfied,
  554. * drop the buffer
  555. */
  556. goto no_buffer;
  557. } else {
  558. /* We do not want read in the flags */
  559. bp->b_flags &= ~XBF_READ;
  560. }
  561. }
  562. return bp;
  563. no_buffer:
  564. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  565. xfs_buf_unlock(bp);
  566. xfs_buf_rele(bp);
  567. return NULL;
  568. }
  569. /*
  570. * If we are not low on memory then do the readahead in a deadlock
  571. * safe manner.
  572. */
  573. void
  574. xfs_buf_readahead(
  575. xfs_buftarg_t *target,
  576. xfs_off_t ioff,
  577. size_t isize)
  578. {
  579. if (bdi_read_congested(target->bt_bdi))
  580. return;
  581. xfs_buf_read(target, ioff, isize,
  582. XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
  583. }
  584. /*
  585. * Read an uncached buffer from disk. Allocates and returns a locked
  586. * buffer containing the disk contents or nothing.
  587. */
  588. struct xfs_buf *
  589. xfs_buf_read_uncached(
  590. struct xfs_mount *mp,
  591. struct xfs_buftarg *target,
  592. xfs_daddr_t daddr,
  593. size_t length,
  594. int flags)
  595. {
  596. xfs_buf_t *bp;
  597. int error;
  598. bp = xfs_buf_get_uncached(target, length, flags);
  599. if (!bp)
  600. return NULL;
  601. /* set up the buffer for a read IO */
  602. XFS_BUF_SET_ADDR(bp, daddr);
  603. XFS_BUF_READ(bp);
  604. xfsbdstrat(mp, bp);
  605. error = xfs_buf_iowait(bp);
  606. if (error || bp->b_error) {
  607. xfs_buf_relse(bp);
  608. return NULL;
  609. }
  610. return bp;
  611. }
  612. xfs_buf_t *
  613. xfs_buf_get_empty(
  614. size_t len,
  615. xfs_buftarg_t *target)
  616. {
  617. xfs_buf_t *bp;
  618. bp = xfs_buf_allocate(0);
  619. if (bp)
  620. _xfs_buf_initialize(bp, target, 0, len, 0);
  621. return bp;
  622. }
  623. /*
  624. * Return a buffer allocated as an empty buffer and associated to external
  625. * memory via xfs_buf_associate_memory() back to it's empty state.
  626. */
  627. void
  628. xfs_buf_set_empty(
  629. struct xfs_buf *bp,
  630. size_t len)
  631. {
  632. if (bp->b_pages)
  633. _xfs_buf_free_pages(bp);
  634. bp->b_pages = NULL;
  635. bp->b_page_count = 0;
  636. bp->b_addr = NULL;
  637. bp->b_file_offset = 0;
  638. bp->b_buffer_length = bp->b_count_desired = len;
  639. bp->b_bn = XFS_BUF_DADDR_NULL;
  640. bp->b_flags &= ~XBF_MAPPED;
  641. }
  642. static inline struct page *
  643. mem_to_page(
  644. void *addr)
  645. {
  646. if ((!is_vmalloc_addr(addr))) {
  647. return virt_to_page(addr);
  648. } else {
  649. return vmalloc_to_page(addr);
  650. }
  651. }
  652. int
  653. xfs_buf_associate_memory(
  654. xfs_buf_t *bp,
  655. void *mem,
  656. size_t len)
  657. {
  658. int rval;
  659. int i = 0;
  660. unsigned long pageaddr;
  661. unsigned long offset;
  662. size_t buflen;
  663. int page_count;
  664. pageaddr = (unsigned long)mem & PAGE_MASK;
  665. offset = (unsigned long)mem - pageaddr;
  666. buflen = PAGE_ALIGN(len + offset);
  667. page_count = buflen >> PAGE_SHIFT;
  668. /* Free any previous set of page pointers */
  669. if (bp->b_pages)
  670. _xfs_buf_free_pages(bp);
  671. bp->b_pages = NULL;
  672. bp->b_addr = mem;
  673. rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
  674. if (rval)
  675. return rval;
  676. bp->b_offset = offset;
  677. for (i = 0; i < bp->b_page_count; i++) {
  678. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  679. pageaddr += PAGE_SIZE;
  680. }
  681. bp->b_count_desired = len;
  682. bp->b_buffer_length = buflen;
  683. bp->b_flags |= XBF_MAPPED;
  684. return 0;
  685. }
  686. xfs_buf_t *
  687. xfs_buf_get_uncached(
  688. struct xfs_buftarg *target,
  689. size_t len,
  690. int flags)
  691. {
  692. unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
  693. int error, i;
  694. xfs_buf_t *bp;
  695. bp = xfs_buf_allocate(0);
  696. if (unlikely(bp == NULL))
  697. goto fail;
  698. _xfs_buf_initialize(bp, target, 0, len, 0);
  699. error = _xfs_buf_get_pages(bp, page_count, 0);
  700. if (error)
  701. goto fail_free_buf;
  702. for (i = 0; i < page_count; i++) {
  703. bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
  704. if (!bp->b_pages[i])
  705. goto fail_free_mem;
  706. }
  707. bp->b_flags |= _XBF_PAGES;
  708. error = _xfs_buf_map_pages(bp, XBF_MAPPED);
  709. if (unlikely(error)) {
  710. xfs_warn(target->bt_mount,
  711. "%s: failed to map pages\n", __func__);
  712. goto fail_free_mem;
  713. }
  714. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  715. return bp;
  716. fail_free_mem:
  717. while (--i >= 0)
  718. __free_page(bp->b_pages[i]);
  719. _xfs_buf_free_pages(bp);
  720. fail_free_buf:
  721. xfs_buf_deallocate(bp);
  722. fail:
  723. return NULL;
  724. }
  725. /*
  726. * Increment reference count on buffer, to hold the buffer concurrently
  727. * with another thread which may release (free) the buffer asynchronously.
  728. * Must hold the buffer already to call this function.
  729. */
  730. void
  731. xfs_buf_hold(
  732. xfs_buf_t *bp)
  733. {
  734. trace_xfs_buf_hold(bp, _RET_IP_);
  735. atomic_inc(&bp->b_hold);
  736. }
  737. /*
  738. * Releases a hold on the specified buffer. If the
  739. * the hold count is 1, calls xfs_buf_free.
  740. */
  741. void
  742. xfs_buf_rele(
  743. xfs_buf_t *bp)
  744. {
  745. struct xfs_perag *pag = bp->b_pag;
  746. trace_xfs_buf_rele(bp, _RET_IP_);
  747. if (!pag) {
  748. ASSERT(list_empty(&bp->b_lru));
  749. ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
  750. if (atomic_dec_and_test(&bp->b_hold))
  751. xfs_buf_free(bp);
  752. return;
  753. }
  754. ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
  755. ASSERT(atomic_read(&bp->b_hold) > 0);
  756. if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
  757. if (!(bp->b_flags & XBF_STALE) &&
  758. atomic_read(&bp->b_lru_ref)) {
  759. xfs_buf_lru_add(bp);
  760. spin_unlock(&pag->pag_buf_lock);
  761. } else {
  762. xfs_buf_lru_del(bp);
  763. ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
  764. rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
  765. spin_unlock(&pag->pag_buf_lock);
  766. xfs_perag_put(pag);
  767. xfs_buf_free(bp);
  768. }
  769. }
  770. }
  771. /*
  772. * Lock a buffer object, if it is not already locked.
  773. *
  774. * If we come across a stale, pinned, locked buffer, we know that we are
  775. * being asked to lock a buffer that has been reallocated. Because it is
  776. * pinned, we know that the log has not been pushed to disk and hence it
  777. * will still be locked. Rather than continuing to have trylock attempts
  778. * fail until someone else pushes the log, push it ourselves before
  779. * returning. This means that the xfsaild will not get stuck trying
  780. * to push on stale inode buffers.
  781. */
  782. int
  783. xfs_buf_trylock(
  784. struct xfs_buf *bp)
  785. {
  786. int locked;
  787. locked = down_trylock(&bp->b_sema) == 0;
  788. if (locked)
  789. XB_SET_OWNER(bp);
  790. else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  791. xfs_log_force(bp->b_target->bt_mount, 0);
  792. trace_xfs_buf_trylock(bp, _RET_IP_);
  793. return locked;
  794. }
  795. /*
  796. * Lock a buffer object.
  797. *
  798. * If we come across a stale, pinned, locked buffer, we know that we
  799. * are being asked to lock a buffer that has been reallocated. Because
  800. * it is pinned, we know that the log has not been pushed to disk and
  801. * hence it will still be locked. Rather than sleeping until someone
  802. * else pushes the log, push it ourselves before trying to get the lock.
  803. */
  804. void
  805. xfs_buf_lock(
  806. struct xfs_buf *bp)
  807. {
  808. trace_xfs_buf_lock(bp, _RET_IP_);
  809. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  810. xfs_log_force(bp->b_target->bt_mount, 0);
  811. down(&bp->b_sema);
  812. XB_SET_OWNER(bp);
  813. trace_xfs_buf_lock_done(bp, _RET_IP_);
  814. }
  815. /*
  816. * Releases the lock on the buffer object.
  817. * If the buffer is marked delwri but is not queued, do so before we
  818. * unlock the buffer as we need to set flags correctly. We also need to
  819. * take a reference for the delwri queue because the unlocker is going to
  820. * drop their's and they don't know we just queued it.
  821. */
  822. void
  823. xfs_buf_unlock(
  824. struct xfs_buf *bp)
  825. {
  826. if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
  827. atomic_inc(&bp->b_hold);
  828. bp->b_flags |= XBF_ASYNC;
  829. xfs_buf_delwri_queue(bp, 0);
  830. }
  831. XB_CLEAR_OWNER(bp);
  832. up(&bp->b_sema);
  833. trace_xfs_buf_unlock(bp, _RET_IP_);
  834. }
  835. STATIC void
  836. xfs_buf_wait_unpin(
  837. xfs_buf_t *bp)
  838. {
  839. DECLARE_WAITQUEUE (wait, current);
  840. if (atomic_read(&bp->b_pin_count) == 0)
  841. return;
  842. add_wait_queue(&bp->b_waiters, &wait);
  843. for (;;) {
  844. set_current_state(TASK_UNINTERRUPTIBLE);
  845. if (atomic_read(&bp->b_pin_count) == 0)
  846. break;
  847. io_schedule();
  848. }
  849. remove_wait_queue(&bp->b_waiters, &wait);
  850. set_current_state(TASK_RUNNING);
  851. }
  852. /*
  853. * Buffer Utility Routines
  854. */
  855. STATIC void
  856. xfs_buf_iodone_work(
  857. struct work_struct *work)
  858. {
  859. xfs_buf_t *bp =
  860. container_of(work, xfs_buf_t, b_iodone_work);
  861. if (bp->b_iodone)
  862. (*(bp->b_iodone))(bp);
  863. else if (bp->b_flags & XBF_ASYNC)
  864. xfs_buf_relse(bp);
  865. }
  866. void
  867. xfs_buf_ioend(
  868. xfs_buf_t *bp,
  869. int schedule)
  870. {
  871. trace_xfs_buf_iodone(bp, _RET_IP_);
  872. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  873. if (bp->b_error == 0)
  874. bp->b_flags |= XBF_DONE;
  875. if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
  876. if (schedule) {
  877. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  878. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  879. } else {
  880. xfs_buf_iodone_work(&bp->b_iodone_work);
  881. }
  882. } else {
  883. complete(&bp->b_iowait);
  884. }
  885. }
  886. void
  887. xfs_buf_ioerror(
  888. xfs_buf_t *bp,
  889. int error)
  890. {
  891. ASSERT(error >= 0 && error <= 0xffff);
  892. bp->b_error = (unsigned short)error;
  893. trace_xfs_buf_ioerror(bp, error, _RET_IP_);
  894. }
  895. int
  896. xfs_bwrite(
  897. struct xfs_mount *mp,
  898. struct xfs_buf *bp)
  899. {
  900. int error;
  901. bp->b_flags |= XBF_WRITE;
  902. bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
  903. xfs_buf_delwri_dequeue(bp);
  904. xfs_bdstrat_cb(bp);
  905. error = xfs_buf_iowait(bp);
  906. if (error)
  907. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  908. xfs_buf_relse(bp);
  909. return error;
  910. }
  911. void
  912. xfs_bdwrite(
  913. void *mp,
  914. struct xfs_buf *bp)
  915. {
  916. trace_xfs_buf_bdwrite(bp, _RET_IP_);
  917. bp->b_flags &= ~XBF_READ;
  918. bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
  919. xfs_buf_delwri_queue(bp, 1);
  920. }
  921. /*
  922. * Called when we want to stop a buffer from getting written or read.
  923. * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
  924. * so that the proper iodone callbacks get called.
  925. */
  926. STATIC int
  927. xfs_bioerror(
  928. xfs_buf_t *bp)
  929. {
  930. #ifdef XFSERRORDEBUG
  931. ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
  932. #endif
  933. /*
  934. * No need to wait until the buffer is unpinned, we aren't flushing it.
  935. */
  936. xfs_buf_ioerror(bp, EIO);
  937. /*
  938. * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
  939. */
  940. XFS_BUF_UNREAD(bp);
  941. XFS_BUF_UNDELAYWRITE(bp);
  942. XFS_BUF_UNDONE(bp);
  943. XFS_BUF_STALE(bp);
  944. xfs_buf_ioend(bp, 0);
  945. return EIO;
  946. }
  947. /*
  948. * Same as xfs_bioerror, except that we are releasing the buffer
  949. * here ourselves, and avoiding the xfs_buf_ioend call.
  950. * This is meant for userdata errors; metadata bufs come with
  951. * iodone functions attached, so that we can track down errors.
  952. */
  953. STATIC int
  954. xfs_bioerror_relse(
  955. struct xfs_buf *bp)
  956. {
  957. int64_t fl = bp->b_flags;
  958. /*
  959. * No need to wait until the buffer is unpinned.
  960. * We aren't flushing it.
  961. *
  962. * chunkhold expects B_DONE to be set, whether
  963. * we actually finish the I/O or not. We don't want to
  964. * change that interface.
  965. */
  966. XFS_BUF_UNREAD(bp);
  967. XFS_BUF_UNDELAYWRITE(bp);
  968. XFS_BUF_DONE(bp);
  969. XFS_BUF_STALE(bp);
  970. bp->b_iodone = NULL;
  971. if (!(fl & XBF_ASYNC)) {
  972. /*
  973. * Mark b_error and B_ERROR _both_.
  974. * Lot's of chunkcache code assumes that.
  975. * There's no reason to mark error for
  976. * ASYNC buffers.
  977. */
  978. xfs_buf_ioerror(bp, EIO);
  979. XFS_BUF_FINISH_IOWAIT(bp);
  980. } else {
  981. xfs_buf_relse(bp);
  982. }
  983. return EIO;
  984. }
  985. /*
  986. * All xfs metadata buffers except log state machine buffers
  987. * get this attached as their b_bdstrat callback function.
  988. * This is so that we can catch a buffer
  989. * after prematurely unpinning it to forcibly shutdown the filesystem.
  990. */
  991. int
  992. xfs_bdstrat_cb(
  993. struct xfs_buf *bp)
  994. {
  995. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  996. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  997. /*
  998. * Metadata write that didn't get logged but
  999. * written delayed anyway. These aren't associated
  1000. * with a transaction, and can be ignored.
  1001. */
  1002. if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
  1003. return xfs_bioerror_relse(bp);
  1004. else
  1005. return xfs_bioerror(bp);
  1006. }
  1007. xfs_buf_iorequest(bp);
  1008. return 0;
  1009. }
  1010. /*
  1011. * Wrapper around bdstrat so that we can stop data from going to disk in case
  1012. * we are shutting down the filesystem. Typically user data goes thru this
  1013. * path; one of the exceptions is the superblock.
  1014. */
  1015. void
  1016. xfsbdstrat(
  1017. struct xfs_mount *mp,
  1018. struct xfs_buf *bp)
  1019. {
  1020. if (XFS_FORCED_SHUTDOWN(mp)) {
  1021. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  1022. xfs_bioerror_relse(bp);
  1023. return;
  1024. }
  1025. xfs_buf_iorequest(bp);
  1026. }
  1027. STATIC void
  1028. _xfs_buf_ioend(
  1029. xfs_buf_t *bp,
  1030. int schedule)
  1031. {
  1032. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1033. xfs_buf_ioend(bp, schedule);
  1034. }
  1035. STATIC void
  1036. xfs_buf_bio_end_io(
  1037. struct bio *bio,
  1038. int error)
  1039. {
  1040. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  1041. xfs_buf_ioerror(bp, -error);
  1042. if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  1043. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  1044. _xfs_buf_ioend(bp, 1);
  1045. bio_put(bio);
  1046. }
  1047. STATIC void
  1048. _xfs_buf_ioapply(
  1049. xfs_buf_t *bp)
  1050. {
  1051. int rw, map_i, total_nr_pages, nr_pages;
  1052. struct bio *bio;
  1053. int offset = bp->b_offset;
  1054. int size = bp->b_count_desired;
  1055. sector_t sector = bp->b_bn;
  1056. total_nr_pages = bp->b_page_count;
  1057. map_i = 0;
  1058. if (bp->b_flags & XBF_WRITE) {
  1059. if (bp->b_flags & XBF_SYNCIO)
  1060. rw = WRITE_SYNC;
  1061. else
  1062. rw = WRITE;
  1063. if (bp->b_flags & XBF_FUA)
  1064. rw |= REQ_FUA;
  1065. if (bp->b_flags & XBF_FLUSH)
  1066. rw |= REQ_FLUSH;
  1067. } else if (bp->b_flags & XBF_READ_AHEAD) {
  1068. rw = READA;
  1069. } else {
  1070. rw = READ;
  1071. }
  1072. /* we only use the buffer cache for meta-data */
  1073. rw |= REQ_META;
  1074. next_chunk:
  1075. atomic_inc(&bp->b_io_remaining);
  1076. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1077. if (nr_pages > total_nr_pages)
  1078. nr_pages = total_nr_pages;
  1079. bio = bio_alloc(GFP_NOIO, nr_pages);
  1080. bio->bi_bdev = bp->b_target->bt_bdev;
  1081. bio->bi_sector = sector;
  1082. bio->bi_end_io = xfs_buf_bio_end_io;
  1083. bio->bi_private = bp;
  1084. for (; size && nr_pages; nr_pages--, map_i++) {
  1085. int rbytes, nbytes = PAGE_SIZE - offset;
  1086. if (nbytes > size)
  1087. nbytes = size;
  1088. rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
  1089. if (rbytes < nbytes)
  1090. break;
  1091. offset = 0;
  1092. sector += nbytes >> BBSHIFT;
  1093. size -= nbytes;
  1094. total_nr_pages--;
  1095. }
  1096. if (likely(bio->bi_size)) {
  1097. if (xfs_buf_is_vmapped(bp)) {
  1098. flush_kernel_vmap_range(bp->b_addr,
  1099. xfs_buf_vmap_len(bp));
  1100. }
  1101. submit_bio(rw, bio);
  1102. if (size)
  1103. goto next_chunk;
  1104. } else {
  1105. xfs_buf_ioerror(bp, EIO);
  1106. bio_put(bio);
  1107. }
  1108. }
  1109. int
  1110. xfs_buf_iorequest(
  1111. xfs_buf_t *bp)
  1112. {
  1113. trace_xfs_buf_iorequest(bp, _RET_IP_);
  1114. if (bp->b_flags & XBF_DELWRI) {
  1115. xfs_buf_delwri_queue(bp, 1);
  1116. return 0;
  1117. }
  1118. if (bp->b_flags & XBF_WRITE) {
  1119. xfs_buf_wait_unpin(bp);
  1120. }
  1121. xfs_buf_hold(bp);
  1122. /* Set the count to 1 initially, this will stop an I/O
  1123. * completion callout which happens before we have started
  1124. * all the I/O from calling xfs_buf_ioend too early.
  1125. */
  1126. atomic_set(&bp->b_io_remaining, 1);
  1127. _xfs_buf_ioapply(bp);
  1128. _xfs_buf_ioend(bp, 0);
  1129. xfs_buf_rele(bp);
  1130. return 0;
  1131. }
  1132. /*
  1133. * Waits for I/O to complete on the buffer supplied.
  1134. * It returns immediately if no I/O is pending.
  1135. * It returns the I/O error code, if any, or 0 if there was no error.
  1136. */
  1137. int
  1138. xfs_buf_iowait(
  1139. xfs_buf_t *bp)
  1140. {
  1141. trace_xfs_buf_iowait(bp, _RET_IP_);
  1142. wait_for_completion(&bp->b_iowait);
  1143. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1144. return bp->b_error;
  1145. }
  1146. xfs_caddr_t
  1147. xfs_buf_offset(
  1148. xfs_buf_t *bp,
  1149. size_t offset)
  1150. {
  1151. struct page *page;
  1152. if (bp->b_flags & XBF_MAPPED)
  1153. return bp->b_addr + offset;
  1154. offset += bp->b_offset;
  1155. page = bp->b_pages[offset >> PAGE_SHIFT];
  1156. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
  1157. }
  1158. /*
  1159. * Move data into or out of a buffer.
  1160. */
  1161. void
  1162. xfs_buf_iomove(
  1163. xfs_buf_t *bp, /* buffer to process */
  1164. size_t boff, /* starting buffer offset */
  1165. size_t bsize, /* length to copy */
  1166. void *data, /* data address */
  1167. xfs_buf_rw_t mode) /* read/write/zero flag */
  1168. {
  1169. size_t bend, cpoff, csize;
  1170. struct page *page;
  1171. bend = boff + bsize;
  1172. while (boff < bend) {
  1173. page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
  1174. cpoff = xfs_buf_poff(boff + bp->b_offset);
  1175. csize = min_t(size_t,
  1176. PAGE_SIZE-cpoff, bp->b_count_desired-boff);
  1177. ASSERT(((csize + cpoff) <= PAGE_SIZE));
  1178. switch (mode) {
  1179. case XBRW_ZERO:
  1180. memset(page_address(page) + cpoff, 0, csize);
  1181. break;
  1182. case XBRW_READ:
  1183. memcpy(data, page_address(page) + cpoff, csize);
  1184. break;
  1185. case XBRW_WRITE:
  1186. memcpy(page_address(page) + cpoff, data, csize);
  1187. }
  1188. boff += csize;
  1189. data += csize;
  1190. }
  1191. }
  1192. /*
  1193. * Handling of buffer targets (buftargs).
  1194. */
  1195. /*
  1196. * Wait for any bufs with callbacks that have been submitted but have not yet
  1197. * returned. These buffers will have an elevated hold count, so wait on those
  1198. * while freeing all the buffers only held by the LRU.
  1199. */
  1200. void
  1201. xfs_wait_buftarg(
  1202. struct xfs_buftarg *btp)
  1203. {
  1204. struct xfs_buf *bp;
  1205. restart:
  1206. spin_lock(&btp->bt_lru_lock);
  1207. while (!list_empty(&btp->bt_lru)) {
  1208. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1209. if (atomic_read(&bp->b_hold) > 1) {
  1210. spin_unlock(&btp->bt_lru_lock);
  1211. delay(100);
  1212. goto restart;
  1213. }
  1214. /*
  1215. * clear the LRU reference count so the bufer doesn't get
  1216. * ignored in xfs_buf_rele().
  1217. */
  1218. atomic_set(&bp->b_lru_ref, 0);
  1219. spin_unlock(&btp->bt_lru_lock);
  1220. xfs_buf_rele(bp);
  1221. spin_lock(&btp->bt_lru_lock);
  1222. }
  1223. spin_unlock(&btp->bt_lru_lock);
  1224. }
  1225. int
  1226. xfs_buftarg_shrink(
  1227. struct shrinker *shrink,
  1228. struct shrink_control *sc)
  1229. {
  1230. struct xfs_buftarg *btp = container_of(shrink,
  1231. struct xfs_buftarg, bt_shrinker);
  1232. struct xfs_buf *bp;
  1233. int nr_to_scan = sc->nr_to_scan;
  1234. LIST_HEAD(dispose);
  1235. if (!nr_to_scan)
  1236. return btp->bt_lru_nr;
  1237. spin_lock(&btp->bt_lru_lock);
  1238. while (!list_empty(&btp->bt_lru)) {
  1239. if (nr_to_scan-- <= 0)
  1240. break;
  1241. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1242. /*
  1243. * Decrement the b_lru_ref count unless the value is already
  1244. * zero. If the value is already zero, we need to reclaim the
  1245. * buffer, otherwise it gets another trip through the LRU.
  1246. */
  1247. if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1248. list_move_tail(&bp->b_lru, &btp->bt_lru);
  1249. continue;
  1250. }
  1251. /*
  1252. * remove the buffer from the LRU now to avoid needing another
  1253. * lock round trip inside xfs_buf_rele().
  1254. */
  1255. list_move(&bp->b_lru, &dispose);
  1256. btp->bt_lru_nr--;
  1257. }
  1258. spin_unlock(&btp->bt_lru_lock);
  1259. while (!list_empty(&dispose)) {
  1260. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1261. list_del_init(&bp->b_lru);
  1262. xfs_buf_rele(bp);
  1263. }
  1264. return btp->bt_lru_nr;
  1265. }
  1266. void
  1267. xfs_free_buftarg(
  1268. struct xfs_mount *mp,
  1269. struct xfs_buftarg *btp)
  1270. {
  1271. unregister_shrinker(&btp->bt_shrinker);
  1272. xfs_flush_buftarg(btp, 1);
  1273. if (mp->m_flags & XFS_MOUNT_BARRIER)
  1274. xfs_blkdev_issue_flush(btp);
  1275. kthread_stop(btp->bt_task);
  1276. kmem_free(btp);
  1277. }
  1278. STATIC int
  1279. xfs_setsize_buftarg_flags(
  1280. xfs_buftarg_t *btp,
  1281. unsigned int blocksize,
  1282. unsigned int sectorsize,
  1283. int verbose)
  1284. {
  1285. btp->bt_bsize = blocksize;
  1286. btp->bt_sshift = ffs(sectorsize) - 1;
  1287. btp->bt_smask = sectorsize - 1;
  1288. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1289. xfs_warn(btp->bt_mount,
  1290. "Cannot set_blocksize to %u on device %s\n",
  1291. sectorsize, xfs_buf_target_name(btp));
  1292. return EINVAL;
  1293. }
  1294. return 0;
  1295. }
  1296. /*
  1297. * When allocating the initial buffer target we have not yet
  1298. * read in the superblock, so don't know what sized sectors
  1299. * are being used is at this early stage. Play safe.
  1300. */
  1301. STATIC int
  1302. xfs_setsize_buftarg_early(
  1303. xfs_buftarg_t *btp,
  1304. struct block_device *bdev)
  1305. {
  1306. return xfs_setsize_buftarg_flags(btp,
  1307. PAGE_SIZE, bdev_logical_block_size(bdev), 0);
  1308. }
  1309. int
  1310. xfs_setsize_buftarg(
  1311. xfs_buftarg_t *btp,
  1312. unsigned int blocksize,
  1313. unsigned int sectorsize)
  1314. {
  1315. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1316. }
  1317. STATIC int
  1318. xfs_alloc_delwrite_queue(
  1319. xfs_buftarg_t *btp,
  1320. const char *fsname)
  1321. {
  1322. INIT_LIST_HEAD(&btp->bt_delwrite_queue);
  1323. spin_lock_init(&btp->bt_delwrite_lock);
  1324. btp->bt_flags = 0;
  1325. btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
  1326. if (IS_ERR(btp->bt_task))
  1327. return PTR_ERR(btp->bt_task);
  1328. return 0;
  1329. }
  1330. xfs_buftarg_t *
  1331. xfs_alloc_buftarg(
  1332. struct xfs_mount *mp,
  1333. struct block_device *bdev,
  1334. int external,
  1335. const char *fsname)
  1336. {
  1337. xfs_buftarg_t *btp;
  1338. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
  1339. btp->bt_mount = mp;
  1340. btp->bt_dev = bdev->bd_dev;
  1341. btp->bt_bdev = bdev;
  1342. btp->bt_bdi = blk_get_backing_dev_info(bdev);
  1343. if (!btp->bt_bdi)
  1344. goto error;
  1345. INIT_LIST_HEAD(&btp->bt_lru);
  1346. spin_lock_init(&btp->bt_lru_lock);
  1347. if (xfs_setsize_buftarg_early(btp, bdev))
  1348. goto error;
  1349. if (xfs_alloc_delwrite_queue(btp, fsname))
  1350. goto error;
  1351. btp->bt_shrinker.shrink = xfs_buftarg_shrink;
  1352. btp->bt_shrinker.seeks = DEFAULT_SEEKS;
  1353. register_shrinker(&btp->bt_shrinker);
  1354. return btp;
  1355. error:
  1356. kmem_free(btp);
  1357. return NULL;
  1358. }
  1359. /*
  1360. * Delayed write buffer handling
  1361. */
  1362. STATIC void
  1363. xfs_buf_delwri_queue(
  1364. xfs_buf_t *bp,
  1365. int unlock)
  1366. {
  1367. struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
  1368. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1369. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1370. ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
  1371. spin_lock(dwlk);
  1372. /* If already in the queue, dequeue and place at tail */
  1373. if (!list_empty(&bp->b_list)) {
  1374. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1375. if (unlock)
  1376. atomic_dec(&bp->b_hold);
  1377. list_del(&bp->b_list);
  1378. }
  1379. if (list_empty(dwq)) {
  1380. /* start xfsbufd as it is about to have something to do */
  1381. wake_up_process(bp->b_target->bt_task);
  1382. }
  1383. bp->b_flags |= _XBF_DELWRI_Q;
  1384. list_add_tail(&bp->b_list, dwq);
  1385. bp->b_queuetime = jiffies;
  1386. spin_unlock(dwlk);
  1387. if (unlock)
  1388. xfs_buf_unlock(bp);
  1389. }
  1390. void
  1391. xfs_buf_delwri_dequeue(
  1392. xfs_buf_t *bp)
  1393. {
  1394. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1395. int dequeued = 0;
  1396. spin_lock(dwlk);
  1397. if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
  1398. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1399. list_del_init(&bp->b_list);
  1400. dequeued = 1;
  1401. }
  1402. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
  1403. spin_unlock(dwlk);
  1404. if (dequeued)
  1405. xfs_buf_rele(bp);
  1406. trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
  1407. }
  1408. /*
  1409. * If a delwri buffer needs to be pushed before it has aged out, then promote
  1410. * it to the head of the delwri queue so that it will be flushed on the next
  1411. * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
  1412. * than the age currently needed to flush the buffer. Hence the next time the
  1413. * xfsbufd sees it is guaranteed to be considered old enough to flush.
  1414. */
  1415. void
  1416. xfs_buf_delwri_promote(
  1417. struct xfs_buf *bp)
  1418. {
  1419. struct xfs_buftarg *btp = bp->b_target;
  1420. long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
  1421. ASSERT(bp->b_flags & XBF_DELWRI);
  1422. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1423. /*
  1424. * Check the buffer age before locking the delayed write queue as we
  1425. * don't need to promote buffers that are already past the flush age.
  1426. */
  1427. if (bp->b_queuetime < jiffies - age)
  1428. return;
  1429. bp->b_queuetime = jiffies - age;
  1430. spin_lock(&btp->bt_delwrite_lock);
  1431. list_move(&bp->b_list, &btp->bt_delwrite_queue);
  1432. spin_unlock(&btp->bt_delwrite_lock);
  1433. }
  1434. STATIC void
  1435. xfs_buf_runall_queues(
  1436. struct workqueue_struct *queue)
  1437. {
  1438. flush_workqueue(queue);
  1439. }
  1440. /*
  1441. * Move as many buffers as specified to the supplied list
  1442. * idicating if we skipped any buffers to prevent deadlocks.
  1443. */
  1444. STATIC int
  1445. xfs_buf_delwri_split(
  1446. xfs_buftarg_t *target,
  1447. struct list_head *list,
  1448. unsigned long age)
  1449. {
  1450. xfs_buf_t *bp, *n;
  1451. struct list_head *dwq = &target->bt_delwrite_queue;
  1452. spinlock_t *dwlk = &target->bt_delwrite_lock;
  1453. int skipped = 0;
  1454. int force;
  1455. force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1456. INIT_LIST_HEAD(list);
  1457. spin_lock(dwlk);
  1458. list_for_each_entry_safe(bp, n, dwq, b_list) {
  1459. ASSERT(bp->b_flags & XBF_DELWRI);
  1460. if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
  1461. if (!force &&
  1462. time_before(jiffies, bp->b_queuetime + age)) {
  1463. xfs_buf_unlock(bp);
  1464. break;
  1465. }
  1466. bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
  1467. bp->b_flags |= XBF_WRITE;
  1468. list_move_tail(&bp->b_list, list);
  1469. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  1470. } else
  1471. skipped++;
  1472. }
  1473. spin_unlock(dwlk);
  1474. return skipped;
  1475. }
  1476. /*
  1477. * Compare function is more complex than it needs to be because
  1478. * the return value is only 32 bits and we are doing comparisons
  1479. * on 64 bit values
  1480. */
  1481. static int
  1482. xfs_buf_cmp(
  1483. void *priv,
  1484. struct list_head *a,
  1485. struct list_head *b)
  1486. {
  1487. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1488. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1489. xfs_daddr_t diff;
  1490. diff = ap->b_bn - bp->b_bn;
  1491. if (diff < 0)
  1492. return -1;
  1493. if (diff > 0)
  1494. return 1;
  1495. return 0;
  1496. }
  1497. STATIC int
  1498. xfsbufd(
  1499. void *data)
  1500. {
  1501. xfs_buftarg_t *target = (xfs_buftarg_t *)data;
  1502. current->flags |= PF_MEMALLOC;
  1503. set_freezable();
  1504. do {
  1505. long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
  1506. long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
  1507. struct list_head tmp;
  1508. struct blk_plug plug;
  1509. if (unlikely(freezing(current))) {
  1510. set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1511. refrigerator();
  1512. } else {
  1513. clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1514. }
  1515. /* sleep for a long time if there is nothing to do. */
  1516. if (list_empty(&target->bt_delwrite_queue))
  1517. tout = MAX_SCHEDULE_TIMEOUT;
  1518. schedule_timeout_interruptible(tout);
  1519. xfs_buf_delwri_split(target, &tmp, age);
  1520. list_sort(NULL, &tmp, xfs_buf_cmp);
  1521. blk_start_plug(&plug);
  1522. while (!list_empty(&tmp)) {
  1523. struct xfs_buf *bp;
  1524. bp = list_first_entry(&tmp, struct xfs_buf, b_list);
  1525. list_del_init(&bp->b_list);
  1526. xfs_bdstrat_cb(bp);
  1527. }
  1528. blk_finish_plug(&plug);
  1529. } while (!kthread_should_stop());
  1530. return 0;
  1531. }
  1532. /*
  1533. * Go through all incore buffers, and release buffers if they belong to
  1534. * the given device. This is used in filesystem error handling to
  1535. * preserve the consistency of its metadata.
  1536. */
  1537. int
  1538. xfs_flush_buftarg(
  1539. xfs_buftarg_t *target,
  1540. int wait)
  1541. {
  1542. xfs_buf_t *bp;
  1543. int pincount = 0;
  1544. LIST_HEAD(tmp_list);
  1545. LIST_HEAD(wait_list);
  1546. struct blk_plug plug;
  1547. xfs_buf_runall_queues(xfsconvertd_workqueue);
  1548. xfs_buf_runall_queues(xfsdatad_workqueue);
  1549. xfs_buf_runall_queues(xfslogd_workqueue);
  1550. set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1551. pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
  1552. /*
  1553. * Dropped the delayed write list lock, now walk the temporary list.
  1554. * All I/O is issued async and then if we need to wait for completion
  1555. * we do that after issuing all the IO.
  1556. */
  1557. list_sort(NULL, &tmp_list, xfs_buf_cmp);
  1558. blk_start_plug(&plug);
  1559. while (!list_empty(&tmp_list)) {
  1560. bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
  1561. ASSERT(target == bp->b_target);
  1562. list_del_init(&bp->b_list);
  1563. if (wait) {
  1564. bp->b_flags &= ~XBF_ASYNC;
  1565. list_add(&bp->b_list, &wait_list);
  1566. }
  1567. xfs_bdstrat_cb(bp);
  1568. }
  1569. blk_finish_plug(&plug);
  1570. if (wait) {
  1571. /* Wait for IO to complete. */
  1572. while (!list_empty(&wait_list)) {
  1573. bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
  1574. list_del_init(&bp->b_list);
  1575. xfs_buf_iowait(bp);
  1576. xfs_buf_relse(bp);
  1577. }
  1578. }
  1579. return pincount;
  1580. }
  1581. int __init
  1582. xfs_buf_init(void)
  1583. {
  1584. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1585. KM_ZONE_HWALIGN, NULL);
  1586. if (!xfs_buf_zone)
  1587. goto out;
  1588. xfslogd_workqueue = alloc_workqueue("xfslogd",
  1589. WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
  1590. if (!xfslogd_workqueue)
  1591. goto out_free_buf_zone;
  1592. xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
  1593. if (!xfsdatad_workqueue)
  1594. goto out_destroy_xfslogd_workqueue;
  1595. xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
  1596. WQ_MEM_RECLAIM, 1);
  1597. if (!xfsconvertd_workqueue)
  1598. goto out_destroy_xfsdatad_workqueue;
  1599. return 0;
  1600. out_destroy_xfsdatad_workqueue:
  1601. destroy_workqueue(xfsdatad_workqueue);
  1602. out_destroy_xfslogd_workqueue:
  1603. destroy_workqueue(xfslogd_workqueue);
  1604. out_free_buf_zone:
  1605. kmem_zone_destroy(xfs_buf_zone);
  1606. out:
  1607. return -ENOMEM;
  1608. }
  1609. void
  1610. xfs_buf_terminate(void)
  1611. {
  1612. destroy_workqueue(xfsconvertd_workqueue);
  1613. destroy_workqueue(xfsdatad_workqueue);
  1614. destroy_workqueue(xfslogd_workqueue);
  1615. kmem_zone_destroy(xfs_buf_zone);
  1616. }
  1617. #ifdef CONFIG_KDB_MODULES
  1618. struct list_head *
  1619. xfs_get_buftarg_list(void)
  1620. {
  1621. return &xfs_buftarg_list;
  1622. }
  1623. #endif