xfs_buf.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/gfp.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. #include "xfs_sb.h"
  37. #include "xfs_log.h"
  38. #include "xfs_ag.h"
  39. #include "xfs_mount.h"
  40. #include "xfs_trace.h"
  41. static kmem_zone_t *xfs_buf_zone;
  42. static struct workqueue_struct *xfslogd_workqueue;
  43. #ifdef XFS_BUF_LOCK_TRACKING
  44. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  45. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  46. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  47. #else
  48. # define XB_SET_OWNER(bp) do { } while (0)
  49. # define XB_CLEAR_OWNER(bp) do { } while (0)
  50. # define XB_GET_OWNER(bp) do { } while (0)
  51. #endif
  52. #define xb_to_gfp(flags) \
  53. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
  54. static inline int
  55. xfs_buf_is_vmapped(
  56. struct xfs_buf *bp)
  57. {
  58. /*
  59. * Return true if the buffer is vmapped.
  60. *
  61. * b_addr is null if the buffer is not mapped, but the code is clever
  62. * enough to know it doesn't have to map a single page, so the check has
  63. * to be both for b_addr and bp->b_page_count > 1.
  64. */
  65. return bp->b_addr && bp->b_page_count > 1;
  66. }
  67. static inline int
  68. xfs_buf_vmap_len(
  69. struct xfs_buf *bp)
  70. {
  71. return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  72. }
  73. /*
  74. * xfs_buf_lru_add - add a buffer to the LRU.
  75. *
  76. * The LRU takes a new reference to the buffer so that it will only be freed
  77. * once the shrinker takes the buffer off the LRU.
  78. */
  79. STATIC void
  80. xfs_buf_lru_add(
  81. struct xfs_buf *bp)
  82. {
  83. struct xfs_buftarg *btp = bp->b_target;
  84. spin_lock(&btp->bt_lru_lock);
  85. if (list_empty(&bp->b_lru)) {
  86. atomic_inc(&bp->b_hold);
  87. list_add_tail(&bp->b_lru, &btp->bt_lru);
  88. btp->bt_lru_nr++;
  89. bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
  90. }
  91. spin_unlock(&btp->bt_lru_lock);
  92. }
  93. /*
  94. * xfs_buf_lru_del - remove a buffer from the LRU
  95. *
  96. * The unlocked check is safe here because it only occurs when there are not
  97. * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
  98. * to optimise the shrinker removing the buffer from the LRU and calling
  99. * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
  100. * bt_lru_lock.
  101. */
  102. STATIC void
  103. xfs_buf_lru_del(
  104. struct xfs_buf *bp)
  105. {
  106. struct xfs_buftarg *btp = bp->b_target;
  107. if (list_empty(&bp->b_lru))
  108. return;
  109. spin_lock(&btp->bt_lru_lock);
  110. if (!list_empty(&bp->b_lru)) {
  111. list_del_init(&bp->b_lru);
  112. btp->bt_lru_nr--;
  113. }
  114. spin_unlock(&btp->bt_lru_lock);
  115. }
  116. /*
  117. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  118. * b_lru_ref count so that the buffer is freed immediately when the buffer
  119. * reference count falls to zero. If the buffer is already on the LRU, we need
  120. * to remove the reference that LRU holds on the buffer.
  121. *
  122. * This prevents build-up of stale buffers on the LRU.
  123. */
  124. void
  125. xfs_buf_stale(
  126. struct xfs_buf *bp)
  127. {
  128. ASSERT(xfs_buf_islocked(bp));
  129. bp->b_flags |= XBF_STALE;
  130. /*
  131. * Clear the delwri status so that a delwri queue walker will not
  132. * flush this buffer to disk now that it is stale. The delwri queue has
  133. * a reference to the buffer, so this is safe to do.
  134. */
  135. bp->b_flags &= ~_XBF_DELWRI_Q;
  136. atomic_set(&(bp)->b_lru_ref, 0);
  137. if (!list_empty(&bp->b_lru)) {
  138. struct xfs_buftarg *btp = bp->b_target;
  139. spin_lock(&btp->bt_lru_lock);
  140. if (!list_empty(&bp->b_lru) &&
  141. !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
  142. list_del_init(&bp->b_lru);
  143. btp->bt_lru_nr--;
  144. atomic_dec(&bp->b_hold);
  145. }
  146. spin_unlock(&btp->bt_lru_lock);
  147. }
  148. ASSERT(atomic_read(&bp->b_hold) >= 1);
  149. }
  150. static int
  151. xfs_buf_get_maps(
  152. struct xfs_buf *bp,
  153. int map_count)
  154. {
  155. ASSERT(bp->b_maps == NULL);
  156. bp->b_map_count = map_count;
  157. if (map_count == 1) {
  158. bp->b_maps = &bp->b_map;
  159. return 0;
  160. }
  161. bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
  162. KM_NOFS);
  163. if (!bp->b_maps)
  164. return ENOMEM;
  165. return 0;
  166. }
  167. /*
  168. * Frees b_pages if it was allocated.
  169. */
  170. static void
  171. xfs_buf_free_maps(
  172. struct xfs_buf *bp)
  173. {
  174. if (bp->b_maps != &bp->b_map) {
  175. kmem_free(bp->b_maps);
  176. bp->b_maps = NULL;
  177. }
  178. }
  179. struct xfs_buf *
  180. _xfs_buf_alloc(
  181. struct xfs_buftarg *target,
  182. struct xfs_buf_map *map,
  183. int nmaps,
  184. xfs_buf_flags_t flags)
  185. {
  186. struct xfs_buf *bp;
  187. int error;
  188. int i;
  189. bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
  190. if (unlikely(!bp))
  191. return NULL;
  192. /*
  193. * We don't want certain flags to appear in b_flags unless they are
  194. * specifically set by later operations on the buffer.
  195. */
  196. flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
  197. atomic_set(&bp->b_hold, 1);
  198. atomic_set(&bp->b_lru_ref, 1);
  199. init_completion(&bp->b_iowait);
  200. INIT_LIST_HEAD(&bp->b_lru);
  201. INIT_LIST_HEAD(&bp->b_list);
  202. RB_CLEAR_NODE(&bp->b_rbnode);
  203. sema_init(&bp->b_sema, 0); /* held, no waiters */
  204. XB_SET_OWNER(bp);
  205. bp->b_target = target;
  206. bp->b_flags = flags;
  207. /*
  208. * Set length and io_length to the same value initially.
  209. * I/O routines should use io_length, which will be the same in
  210. * most cases but may be reset (e.g. XFS recovery).
  211. */
  212. error = xfs_buf_get_maps(bp, nmaps);
  213. if (error) {
  214. kmem_zone_free(xfs_buf_zone, bp);
  215. return NULL;
  216. }
  217. bp->b_bn = map[0].bm_bn;
  218. bp->b_length = 0;
  219. for (i = 0; i < nmaps; i++) {
  220. bp->b_maps[i].bm_bn = map[i].bm_bn;
  221. bp->b_maps[i].bm_len = map[i].bm_len;
  222. bp->b_length += map[i].bm_len;
  223. }
  224. bp->b_io_length = bp->b_length;
  225. atomic_set(&bp->b_pin_count, 0);
  226. init_waitqueue_head(&bp->b_waiters);
  227. XFS_STATS_INC(xb_create);
  228. trace_xfs_buf_init(bp, _RET_IP_);
  229. return bp;
  230. }
  231. /*
  232. * Allocate a page array capable of holding a specified number
  233. * of pages, and point the page buf at it.
  234. */
  235. STATIC int
  236. _xfs_buf_get_pages(
  237. xfs_buf_t *bp,
  238. int page_count,
  239. xfs_buf_flags_t flags)
  240. {
  241. /* Make sure that we have a page list */
  242. if (bp->b_pages == NULL) {
  243. bp->b_page_count = page_count;
  244. if (page_count <= XB_PAGES) {
  245. bp->b_pages = bp->b_page_array;
  246. } else {
  247. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  248. page_count, KM_NOFS);
  249. if (bp->b_pages == NULL)
  250. return -ENOMEM;
  251. }
  252. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  253. }
  254. return 0;
  255. }
  256. /*
  257. * Frees b_pages if it was allocated.
  258. */
  259. STATIC void
  260. _xfs_buf_free_pages(
  261. xfs_buf_t *bp)
  262. {
  263. if (bp->b_pages != bp->b_page_array) {
  264. kmem_free(bp->b_pages);
  265. bp->b_pages = NULL;
  266. }
  267. }
  268. /*
  269. * Releases the specified buffer.
  270. *
  271. * The modification state of any associated pages is left unchanged.
  272. * The buffer most not be on any hash - use xfs_buf_rele instead for
  273. * hashed and refcounted buffers
  274. */
  275. void
  276. xfs_buf_free(
  277. xfs_buf_t *bp)
  278. {
  279. trace_xfs_buf_free(bp, _RET_IP_);
  280. ASSERT(list_empty(&bp->b_lru));
  281. if (bp->b_flags & _XBF_PAGES) {
  282. uint i;
  283. if (xfs_buf_is_vmapped(bp))
  284. vm_unmap_ram(bp->b_addr - bp->b_offset,
  285. bp->b_page_count);
  286. for (i = 0; i < bp->b_page_count; i++) {
  287. struct page *page = bp->b_pages[i];
  288. __free_page(page);
  289. }
  290. } else if (bp->b_flags & _XBF_KMEM)
  291. kmem_free(bp->b_addr);
  292. _xfs_buf_free_pages(bp);
  293. xfs_buf_free_maps(bp);
  294. kmem_zone_free(xfs_buf_zone, bp);
  295. }
  296. /*
  297. * Allocates all the pages for buffer in question and builds it's page list.
  298. */
  299. STATIC int
  300. xfs_buf_allocate_memory(
  301. xfs_buf_t *bp,
  302. uint flags)
  303. {
  304. size_t size;
  305. size_t nbytes, offset;
  306. gfp_t gfp_mask = xb_to_gfp(flags);
  307. unsigned short page_count, i;
  308. xfs_off_t start, end;
  309. int error;
  310. /*
  311. * for buffers that are contained within a single page, just allocate
  312. * the memory from the heap - there's no need for the complexity of
  313. * page arrays to keep allocation down to order 0.
  314. */
  315. size = BBTOB(bp->b_length);
  316. if (size < PAGE_SIZE) {
  317. bp->b_addr = kmem_alloc(size, KM_NOFS);
  318. if (!bp->b_addr) {
  319. /* low memory - use alloc_page loop instead */
  320. goto use_alloc_page;
  321. }
  322. if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
  323. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  324. /* b_addr spans two pages - use alloc_page instead */
  325. kmem_free(bp->b_addr);
  326. bp->b_addr = NULL;
  327. goto use_alloc_page;
  328. }
  329. bp->b_offset = offset_in_page(bp->b_addr);
  330. bp->b_pages = bp->b_page_array;
  331. bp->b_pages[0] = virt_to_page(bp->b_addr);
  332. bp->b_page_count = 1;
  333. bp->b_flags |= _XBF_KMEM;
  334. return 0;
  335. }
  336. use_alloc_page:
  337. start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
  338. end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
  339. >> PAGE_SHIFT;
  340. page_count = end - start;
  341. error = _xfs_buf_get_pages(bp, page_count, flags);
  342. if (unlikely(error))
  343. return error;
  344. offset = bp->b_offset;
  345. bp->b_flags |= _XBF_PAGES;
  346. for (i = 0; i < bp->b_page_count; i++) {
  347. struct page *page;
  348. uint retries = 0;
  349. retry:
  350. page = alloc_page(gfp_mask);
  351. if (unlikely(page == NULL)) {
  352. if (flags & XBF_READ_AHEAD) {
  353. bp->b_page_count = i;
  354. error = ENOMEM;
  355. goto out_free_pages;
  356. }
  357. /*
  358. * This could deadlock.
  359. *
  360. * But until all the XFS lowlevel code is revamped to
  361. * handle buffer allocation failures we can't do much.
  362. */
  363. if (!(++retries % 100))
  364. xfs_err(NULL,
  365. "possible memory allocation deadlock in %s (mode:0x%x)",
  366. __func__, gfp_mask);
  367. XFS_STATS_INC(xb_page_retries);
  368. congestion_wait(BLK_RW_ASYNC, HZ/50);
  369. goto retry;
  370. }
  371. XFS_STATS_INC(xb_page_found);
  372. nbytes = min_t(size_t, size, PAGE_SIZE - offset);
  373. size -= nbytes;
  374. bp->b_pages[i] = page;
  375. offset = 0;
  376. }
  377. return 0;
  378. out_free_pages:
  379. for (i = 0; i < bp->b_page_count; i++)
  380. __free_page(bp->b_pages[i]);
  381. return error;
  382. }
  383. /*
  384. * Map buffer into kernel address-space if necessary.
  385. */
  386. STATIC int
  387. _xfs_buf_map_pages(
  388. xfs_buf_t *bp,
  389. uint flags)
  390. {
  391. ASSERT(bp->b_flags & _XBF_PAGES);
  392. if (bp->b_page_count == 1) {
  393. /* A single page buffer is always mappable */
  394. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  395. } else if (flags & XBF_UNMAPPED) {
  396. bp->b_addr = NULL;
  397. } else {
  398. int retried = 0;
  399. do {
  400. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  401. -1, PAGE_KERNEL);
  402. if (bp->b_addr)
  403. break;
  404. vm_unmap_aliases();
  405. } while (retried++ <= 1);
  406. if (!bp->b_addr)
  407. return -ENOMEM;
  408. bp->b_addr += bp->b_offset;
  409. }
  410. return 0;
  411. }
  412. /*
  413. * Finding and Reading Buffers
  414. */
  415. /*
  416. * Look up, and creates if absent, a lockable buffer for
  417. * a given range of an inode. The buffer is returned
  418. * locked. No I/O is implied by this call.
  419. */
  420. xfs_buf_t *
  421. _xfs_buf_find(
  422. struct xfs_buftarg *btp,
  423. struct xfs_buf_map *map,
  424. int nmaps,
  425. xfs_buf_flags_t flags,
  426. xfs_buf_t *new_bp)
  427. {
  428. size_t numbytes;
  429. struct xfs_perag *pag;
  430. struct rb_node **rbp;
  431. struct rb_node *parent;
  432. xfs_buf_t *bp;
  433. xfs_daddr_t blkno = map[0].bm_bn;
  434. int numblks = 0;
  435. int i;
  436. for (i = 0; i < nmaps; i++)
  437. numblks += map[i].bm_len;
  438. numbytes = BBTOB(numblks);
  439. /* Check for IOs smaller than the sector size / not sector aligned */
  440. ASSERT(!(numbytes < (1 << btp->bt_sshift)));
  441. ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
  442. /* get tree root */
  443. pag = xfs_perag_get(btp->bt_mount,
  444. xfs_daddr_to_agno(btp->bt_mount, blkno));
  445. /* walk tree */
  446. spin_lock(&pag->pag_buf_lock);
  447. rbp = &pag->pag_buf_tree.rb_node;
  448. parent = NULL;
  449. bp = NULL;
  450. while (*rbp) {
  451. parent = *rbp;
  452. bp = rb_entry(parent, struct xfs_buf, b_rbnode);
  453. if (blkno < bp->b_bn)
  454. rbp = &(*rbp)->rb_left;
  455. else if (blkno > bp->b_bn)
  456. rbp = &(*rbp)->rb_right;
  457. else {
  458. /*
  459. * found a block number match. If the range doesn't
  460. * match, the only way this is allowed is if the buffer
  461. * in the cache is stale and the transaction that made
  462. * it stale has not yet committed. i.e. we are
  463. * reallocating a busy extent. Skip this buffer and
  464. * continue searching to the right for an exact match.
  465. */
  466. if (bp->b_length != numblks) {
  467. ASSERT(bp->b_flags & XBF_STALE);
  468. rbp = &(*rbp)->rb_right;
  469. continue;
  470. }
  471. atomic_inc(&bp->b_hold);
  472. goto found;
  473. }
  474. }
  475. /* No match found */
  476. if (new_bp) {
  477. rb_link_node(&new_bp->b_rbnode, parent, rbp);
  478. rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
  479. /* the buffer keeps the perag reference until it is freed */
  480. new_bp->b_pag = pag;
  481. spin_unlock(&pag->pag_buf_lock);
  482. } else {
  483. XFS_STATS_INC(xb_miss_locked);
  484. spin_unlock(&pag->pag_buf_lock);
  485. xfs_perag_put(pag);
  486. }
  487. return new_bp;
  488. found:
  489. spin_unlock(&pag->pag_buf_lock);
  490. xfs_perag_put(pag);
  491. if (!xfs_buf_trylock(bp)) {
  492. if (flags & XBF_TRYLOCK) {
  493. xfs_buf_rele(bp);
  494. XFS_STATS_INC(xb_busy_locked);
  495. return NULL;
  496. }
  497. xfs_buf_lock(bp);
  498. XFS_STATS_INC(xb_get_locked_waited);
  499. }
  500. /*
  501. * if the buffer is stale, clear all the external state associated with
  502. * it. We need to keep flags such as how we allocated the buffer memory
  503. * intact here.
  504. */
  505. if (bp->b_flags & XBF_STALE) {
  506. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  507. bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
  508. }
  509. trace_xfs_buf_find(bp, flags, _RET_IP_);
  510. XFS_STATS_INC(xb_get_locked);
  511. return bp;
  512. }
  513. /*
  514. * Assembles a buffer covering the specified range. The code is optimised for
  515. * cache hits, as metadata intensive workloads will see 3 orders of magnitude
  516. * more hits than misses.
  517. */
  518. struct xfs_buf *
  519. xfs_buf_get_map(
  520. struct xfs_buftarg *target,
  521. struct xfs_buf_map *map,
  522. int nmaps,
  523. xfs_buf_flags_t flags)
  524. {
  525. struct xfs_buf *bp;
  526. struct xfs_buf *new_bp;
  527. int error = 0;
  528. bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
  529. if (likely(bp))
  530. goto found;
  531. new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
  532. if (unlikely(!new_bp))
  533. return NULL;
  534. error = xfs_buf_allocate_memory(new_bp, flags);
  535. if (error) {
  536. xfs_buf_free(new_bp);
  537. return NULL;
  538. }
  539. bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
  540. if (!bp) {
  541. xfs_buf_free(new_bp);
  542. return NULL;
  543. }
  544. if (bp != new_bp)
  545. xfs_buf_free(new_bp);
  546. found:
  547. if (!bp->b_addr) {
  548. error = _xfs_buf_map_pages(bp, flags);
  549. if (unlikely(error)) {
  550. xfs_warn(target->bt_mount,
  551. "%s: failed to map pages\n", __func__);
  552. xfs_buf_relse(bp);
  553. return NULL;
  554. }
  555. }
  556. XFS_STATS_INC(xb_get);
  557. trace_xfs_buf_get(bp, flags, _RET_IP_);
  558. return bp;
  559. }
  560. STATIC int
  561. _xfs_buf_read(
  562. xfs_buf_t *bp,
  563. xfs_buf_flags_t flags)
  564. {
  565. ASSERT(!(flags & XBF_WRITE));
  566. ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
  567. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
  568. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
  569. xfs_buf_iorequest(bp);
  570. if (flags & XBF_ASYNC)
  571. return 0;
  572. return xfs_buf_iowait(bp);
  573. }
  574. xfs_buf_t *
  575. xfs_buf_read_map(
  576. struct xfs_buftarg *target,
  577. struct xfs_buf_map *map,
  578. int nmaps,
  579. xfs_buf_flags_t flags,
  580. xfs_buf_iodone_t verify)
  581. {
  582. struct xfs_buf *bp;
  583. flags |= XBF_READ;
  584. bp = xfs_buf_get_map(target, map, nmaps, flags);
  585. if (bp) {
  586. trace_xfs_buf_read(bp, flags, _RET_IP_);
  587. if (!XFS_BUF_ISDONE(bp)) {
  588. XFS_STATS_INC(xb_get_read);
  589. bp->b_iodone = verify;
  590. _xfs_buf_read(bp, flags);
  591. } else if (flags & XBF_ASYNC) {
  592. /*
  593. * Read ahead call which is already satisfied,
  594. * drop the buffer
  595. */
  596. xfs_buf_relse(bp);
  597. return NULL;
  598. } else {
  599. /* We do not want read in the flags */
  600. bp->b_flags &= ~XBF_READ;
  601. }
  602. }
  603. return bp;
  604. }
  605. /*
  606. * If we are not low on memory then do the readahead in a deadlock
  607. * safe manner.
  608. */
  609. void
  610. xfs_buf_readahead_map(
  611. struct xfs_buftarg *target,
  612. struct xfs_buf_map *map,
  613. int nmaps,
  614. xfs_buf_iodone_t verify)
  615. {
  616. if (bdi_read_congested(target->bt_bdi))
  617. return;
  618. xfs_buf_read_map(target, map, nmaps,
  619. XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, verify);
  620. }
  621. /*
  622. * Read an uncached buffer from disk. Allocates and returns a locked
  623. * buffer containing the disk contents or nothing.
  624. */
  625. struct xfs_buf *
  626. xfs_buf_read_uncached(
  627. struct xfs_buftarg *target,
  628. xfs_daddr_t daddr,
  629. size_t numblks,
  630. int flags,
  631. xfs_buf_iodone_t verify)
  632. {
  633. xfs_buf_t *bp;
  634. int error;
  635. bp = xfs_buf_get_uncached(target, numblks, flags);
  636. if (!bp)
  637. return NULL;
  638. /* set up the buffer for a read IO */
  639. ASSERT(bp->b_map_count == 1);
  640. bp->b_bn = daddr;
  641. bp->b_maps[0].bm_bn = daddr;
  642. bp->b_flags |= XBF_READ;
  643. bp->b_iodone = verify;
  644. xfsbdstrat(target->bt_mount, bp);
  645. error = xfs_buf_iowait(bp);
  646. if (error) {
  647. xfs_buf_relse(bp);
  648. return NULL;
  649. }
  650. return bp;
  651. }
  652. /*
  653. * Return a buffer allocated as an empty buffer and associated to external
  654. * memory via xfs_buf_associate_memory() back to it's empty state.
  655. */
  656. void
  657. xfs_buf_set_empty(
  658. struct xfs_buf *bp,
  659. size_t numblks)
  660. {
  661. if (bp->b_pages)
  662. _xfs_buf_free_pages(bp);
  663. bp->b_pages = NULL;
  664. bp->b_page_count = 0;
  665. bp->b_addr = NULL;
  666. bp->b_length = numblks;
  667. bp->b_io_length = numblks;
  668. ASSERT(bp->b_map_count == 1);
  669. bp->b_bn = XFS_BUF_DADDR_NULL;
  670. bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
  671. bp->b_maps[0].bm_len = bp->b_length;
  672. }
  673. static inline struct page *
  674. mem_to_page(
  675. void *addr)
  676. {
  677. if ((!is_vmalloc_addr(addr))) {
  678. return virt_to_page(addr);
  679. } else {
  680. return vmalloc_to_page(addr);
  681. }
  682. }
  683. int
  684. xfs_buf_associate_memory(
  685. xfs_buf_t *bp,
  686. void *mem,
  687. size_t len)
  688. {
  689. int rval;
  690. int i = 0;
  691. unsigned long pageaddr;
  692. unsigned long offset;
  693. size_t buflen;
  694. int page_count;
  695. pageaddr = (unsigned long)mem & PAGE_MASK;
  696. offset = (unsigned long)mem - pageaddr;
  697. buflen = PAGE_ALIGN(len + offset);
  698. page_count = buflen >> PAGE_SHIFT;
  699. /* Free any previous set of page pointers */
  700. if (bp->b_pages)
  701. _xfs_buf_free_pages(bp);
  702. bp->b_pages = NULL;
  703. bp->b_addr = mem;
  704. rval = _xfs_buf_get_pages(bp, page_count, 0);
  705. if (rval)
  706. return rval;
  707. bp->b_offset = offset;
  708. for (i = 0; i < bp->b_page_count; i++) {
  709. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  710. pageaddr += PAGE_SIZE;
  711. }
  712. bp->b_io_length = BTOBB(len);
  713. bp->b_length = BTOBB(buflen);
  714. return 0;
  715. }
  716. xfs_buf_t *
  717. xfs_buf_get_uncached(
  718. struct xfs_buftarg *target,
  719. size_t numblks,
  720. int flags)
  721. {
  722. unsigned long page_count;
  723. int error, i;
  724. struct xfs_buf *bp;
  725. DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
  726. bp = _xfs_buf_alloc(target, &map, 1, 0);
  727. if (unlikely(bp == NULL))
  728. goto fail;
  729. page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
  730. error = _xfs_buf_get_pages(bp, page_count, 0);
  731. if (error)
  732. goto fail_free_buf;
  733. for (i = 0; i < page_count; i++) {
  734. bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
  735. if (!bp->b_pages[i])
  736. goto fail_free_mem;
  737. }
  738. bp->b_flags |= _XBF_PAGES;
  739. error = _xfs_buf_map_pages(bp, 0);
  740. if (unlikely(error)) {
  741. xfs_warn(target->bt_mount,
  742. "%s: failed to map pages\n", __func__);
  743. goto fail_free_mem;
  744. }
  745. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  746. return bp;
  747. fail_free_mem:
  748. while (--i >= 0)
  749. __free_page(bp->b_pages[i]);
  750. _xfs_buf_free_pages(bp);
  751. fail_free_buf:
  752. xfs_buf_free_maps(bp);
  753. kmem_zone_free(xfs_buf_zone, bp);
  754. fail:
  755. return NULL;
  756. }
  757. /*
  758. * Increment reference count on buffer, to hold the buffer concurrently
  759. * with another thread which may release (free) the buffer asynchronously.
  760. * Must hold the buffer already to call this function.
  761. */
  762. void
  763. xfs_buf_hold(
  764. xfs_buf_t *bp)
  765. {
  766. trace_xfs_buf_hold(bp, _RET_IP_);
  767. atomic_inc(&bp->b_hold);
  768. }
  769. /*
  770. * Releases a hold on the specified buffer. If the
  771. * the hold count is 1, calls xfs_buf_free.
  772. */
  773. void
  774. xfs_buf_rele(
  775. xfs_buf_t *bp)
  776. {
  777. struct xfs_perag *pag = bp->b_pag;
  778. trace_xfs_buf_rele(bp, _RET_IP_);
  779. if (!pag) {
  780. ASSERT(list_empty(&bp->b_lru));
  781. ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
  782. if (atomic_dec_and_test(&bp->b_hold))
  783. xfs_buf_free(bp);
  784. return;
  785. }
  786. ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
  787. ASSERT(atomic_read(&bp->b_hold) > 0);
  788. if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
  789. if (!(bp->b_flags & XBF_STALE) &&
  790. atomic_read(&bp->b_lru_ref)) {
  791. xfs_buf_lru_add(bp);
  792. spin_unlock(&pag->pag_buf_lock);
  793. } else {
  794. xfs_buf_lru_del(bp);
  795. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  796. rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
  797. spin_unlock(&pag->pag_buf_lock);
  798. xfs_perag_put(pag);
  799. xfs_buf_free(bp);
  800. }
  801. }
  802. }
  803. /*
  804. * Lock a buffer object, if it is not already locked.
  805. *
  806. * If we come across a stale, pinned, locked buffer, we know that we are
  807. * being asked to lock a buffer that has been reallocated. Because it is
  808. * pinned, we know that the log has not been pushed to disk and hence it
  809. * will still be locked. Rather than continuing to have trylock attempts
  810. * fail until someone else pushes the log, push it ourselves before
  811. * returning. This means that the xfsaild will not get stuck trying
  812. * to push on stale inode buffers.
  813. */
  814. int
  815. xfs_buf_trylock(
  816. struct xfs_buf *bp)
  817. {
  818. int locked;
  819. locked = down_trylock(&bp->b_sema) == 0;
  820. if (locked)
  821. XB_SET_OWNER(bp);
  822. else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  823. xfs_log_force(bp->b_target->bt_mount, 0);
  824. trace_xfs_buf_trylock(bp, _RET_IP_);
  825. return locked;
  826. }
  827. /*
  828. * Lock a buffer object.
  829. *
  830. * If we come across a stale, pinned, locked buffer, we know that we
  831. * are being asked to lock a buffer that has been reallocated. Because
  832. * it is pinned, we know that the log has not been pushed to disk and
  833. * hence it will still be locked. Rather than sleeping until someone
  834. * else pushes the log, push it ourselves before trying to get the lock.
  835. */
  836. void
  837. xfs_buf_lock(
  838. struct xfs_buf *bp)
  839. {
  840. trace_xfs_buf_lock(bp, _RET_IP_);
  841. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  842. xfs_log_force(bp->b_target->bt_mount, 0);
  843. down(&bp->b_sema);
  844. XB_SET_OWNER(bp);
  845. trace_xfs_buf_lock_done(bp, _RET_IP_);
  846. }
  847. void
  848. xfs_buf_unlock(
  849. struct xfs_buf *bp)
  850. {
  851. XB_CLEAR_OWNER(bp);
  852. up(&bp->b_sema);
  853. trace_xfs_buf_unlock(bp, _RET_IP_);
  854. }
  855. STATIC void
  856. xfs_buf_wait_unpin(
  857. xfs_buf_t *bp)
  858. {
  859. DECLARE_WAITQUEUE (wait, current);
  860. if (atomic_read(&bp->b_pin_count) == 0)
  861. return;
  862. add_wait_queue(&bp->b_waiters, &wait);
  863. for (;;) {
  864. set_current_state(TASK_UNINTERRUPTIBLE);
  865. if (atomic_read(&bp->b_pin_count) == 0)
  866. break;
  867. io_schedule();
  868. }
  869. remove_wait_queue(&bp->b_waiters, &wait);
  870. set_current_state(TASK_RUNNING);
  871. }
  872. /*
  873. * Buffer Utility Routines
  874. */
  875. STATIC void
  876. xfs_buf_iodone_work(
  877. struct work_struct *work)
  878. {
  879. xfs_buf_t *bp =
  880. container_of(work, xfs_buf_t, b_iodone_work);
  881. if (bp->b_iodone)
  882. (*(bp->b_iodone))(bp);
  883. else if (bp->b_flags & XBF_ASYNC)
  884. xfs_buf_relse(bp);
  885. }
  886. void
  887. xfs_buf_ioend(
  888. xfs_buf_t *bp,
  889. int schedule)
  890. {
  891. trace_xfs_buf_iodone(bp, _RET_IP_);
  892. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  893. if (bp->b_error == 0)
  894. bp->b_flags |= XBF_DONE;
  895. if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
  896. if (schedule) {
  897. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  898. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  899. } else {
  900. xfs_buf_iodone_work(&bp->b_iodone_work);
  901. }
  902. } else {
  903. complete(&bp->b_iowait);
  904. }
  905. }
  906. void
  907. xfs_buf_ioerror(
  908. xfs_buf_t *bp,
  909. int error)
  910. {
  911. ASSERT(error >= 0 && error <= 0xffff);
  912. bp->b_error = (unsigned short)error;
  913. trace_xfs_buf_ioerror(bp, error, _RET_IP_);
  914. }
  915. void
  916. xfs_buf_ioerror_alert(
  917. struct xfs_buf *bp,
  918. const char *func)
  919. {
  920. xfs_alert(bp->b_target->bt_mount,
  921. "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
  922. (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
  923. }
  924. /*
  925. * Called when we want to stop a buffer from getting written or read.
  926. * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
  927. * so that the proper iodone callbacks get called.
  928. */
  929. STATIC int
  930. xfs_bioerror(
  931. xfs_buf_t *bp)
  932. {
  933. #ifdef XFSERRORDEBUG
  934. ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
  935. #endif
  936. /*
  937. * No need to wait until the buffer is unpinned, we aren't flushing it.
  938. */
  939. xfs_buf_ioerror(bp, EIO);
  940. /*
  941. * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
  942. */
  943. XFS_BUF_UNREAD(bp);
  944. XFS_BUF_UNDONE(bp);
  945. xfs_buf_stale(bp);
  946. xfs_buf_ioend(bp, 0);
  947. return EIO;
  948. }
  949. /*
  950. * Same as xfs_bioerror, except that we are releasing the buffer
  951. * here ourselves, and avoiding the xfs_buf_ioend call.
  952. * This is meant for userdata errors; metadata bufs come with
  953. * iodone functions attached, so that we can track down errors.
  954. */
  955. STATIC int
  956. xfs_bioerror_relse(
  957. struct xfs_buf *bp)
  958. {
  959. int64_t fl = bp->b_flags;
  960. /*
  961. * No need to wait until the buffer is unpinned.
  962. * We aren't flushing it.
  963. *
  964. * chunkhold expects B_DONE to be set, whether
  965. * we actually finish the I/O or not. We don't want to
  966. * change that interface.
  967. */
  968. XFS_BUF_UNREAD(bp);
  969. XFS_BUF_DONE(bp);
  970. xfs_buf_stale(bp);
  971. bp->b_iodone = NULL;
  972. if (!(fl & XBF_ASYNC)) {
  973. /*
  974. * Mark b_error and B_ERROR _both_.
  975. * Lot's of chunkcache code assumes that.
  976. * There's no reason to mark error for
  977. * ASYNC buffers.
  978. */
  979. xfs_buf_ioerror(bp, EIO);
  980. complete(&bp->b_iowait);
  981. } else {
  982. xfs_buf_relse(bp);
  983. }
  984. return EIO;
  985. }
  986. STATIC int
  987. xfs_bdstrat_cb(
  988. struct xfs_buf *bp)
  989. {
  990. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  991. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  992. /*
  993. * Metadata write that didn't get logged but
  994. * written delayed anyway. These aren't associated
  995. * with a transaction, and can be ignored.
  996. */
  997. if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
  998. return xfs_bioerror_relse(bp);
  999. else
  1000. return xfs_bioerror(bp);
  1001. }
  1002. xfs_buf_iorequest(bp);
  1003. return 0;
  1004. }
  1005. int
  1006. xfs_bwrite(
  1007. struct xfs_buf *bp)
  1008. {
  1009. int error;
  1010. ASSERT(xfs_buf_islocked(bp));
  1011. bp->b_flags |= XBF_WRITE;
  1012. bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
  1013. xfs_bdstrat_cb(bp);
  1014. error = xfs_buf_iowait(bp);
  1015. if (error) {
  1016. xfs_force_shutdown(bp->b_target->bt_mount,
  1017. SHUTDOWN_META_IO_ERROR);
  1018. }
  1019. return error;
  1020. }
  1021. /*
  1022. * Wrapper around bdstrat so that we can stop data from going to disk in case
  1023. * we are shutting down the filesystem. Typically user data goes thru this
  1024. * path; one of the exceptions is the superblock.
  1025. */
  1026. void
  1027. xfsbdstrat(
  1028. struct xfs_mount *mp,
  1029. struct xfs_buf *bp)
  1030. {
  1031. if (XFS_FORCED_SHUTDOWN(mp)) {
  1032. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  1033. xfs_bioerror_relse(bp);
  1034. return;
  1035. }
  1036. xfs_buf_iorequest(bp);
  1037. }
  1038. STATIC void
  1039. _xfs_buf_ioend(
  1040. xfs_buf_t *bp,
  1041. int schedule)
  1042. {
  1043. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1044. xfs_buf_ioend(bp, schedule);
  1045. }
  1046. STATIC void
  1047. xfs_buf_bio_end_io(
  1048. struct bio *bio,
  1049. int error)
  1050. {
  1051. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  1052. /*
  1053. * don't overwrite existing errors - otherwise we can lose errors on
  1054. * buffers that require multiple bios to complete.
  1055. */
  1056. if (!bp->b_error)
  1057. xfs_buf_ioerror(bp, -error);
  1058. if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  1059. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  1060. _xfs_buf_ioend(bp, 1);
  1061. bio_put(bio);
  1062. }
  1063. static void
  1064. xfs_buf_ioapply_map(
  1065. struct xfs_buf *bp,
  1066. int map,
  1067. int *buf_offset,
  1068. int *count,
  1069. int rw)
  1070. {
  1071. int page_index;
  1072. int total_nr_pages = bp->b_page_count;
  1073. int nr_pages;
  1074. struct bio *bio;
  1075. sector_t sector = bp->b_maps[map].bm_bn;
  1076. int size;
  1077. int offset;
  1078. total_nr_pages = bp->b_page_count;
  1079. /* skip the pages in the buffer before the start offset */
  1080. page_index = 0;
  1081. offset = *buf_offset;
  1082. while (offset >= PAGE_SIZE) {
  1083. page_index++;
  1084. offset -= PAGE_SIZE;
  1085. }
  1086. /*
  1087. * Limit the IO size to the length of the current vector, and update the
  1088. * remaining IO count for the next time around.
  1089. */
  1090. size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
  1091. *count -= size;
  1092. *buf_offset += size;
  1093. next_chunk:
  1094. atomic_inc(&bp->b_io_remaining);
  1095. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1096. if (nr_pages > total_nr_pages)
  1097. nr_pages = total_nr_pages;
  1098. bio = bio_alloc(GFP_NOIO, nr_pages);
  1099. bio->bi_bdev = bp->b_target->bt_bdev;
  1100. bio->bi_sector = sector;
  1101. bio->bi_end_io = xfs_buf_bio_end_io;
  1102. bio->bi_private = bp;
  1103. for (; size && nr_pages; nr_pages--, page_index++) {
  1104. int rbytes, nbytes = PAGE_SIZE - offset;
  1105. if (nbytes > size)
  1106. nbytes = size;
  1107. rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
  1108. offset);
  1109. if (rbytes < nbytes)
  1110. break;
  1111. offset = 0;
  1112. sector += BTOBB(nbytes);
  1113. size -= nbytes;
  1114. total_nr_pages--;
  1115. }
  1116. if (likely(bio->bi_size)) {
  1117. if (xfs_buf_is_vmapped(bp)) {
  1118. flush_kernel_vmap_range(bp->b_addr,
  1119. xfs_buf_vmap_len(bp));
  1120. }
  1121. submit_bio(rw, bio);
  1122. if (size)
  1123. goto next_chunk;
  1124. } else {
  1125. /*
  1126. * This is guaranteed not to be the last io reference count
  1127. * because the caller (xfs_buf_iorequest) holds a count itself.
  1128. */
  1129. atomic_dec(&bp->b_io_remaining);
  1130. xfs_buf_ioerror(bp, EIO);
  1131. bio_put(bio);
  1132. }
  1133. }
  1134. STATIC void
  1135. _xfs_buf_ioapply(
  1136. struct xfs_buf *bp)
  1137. {
  1138. struct blk_plug plug;
  1139. int rw;
  1140. int offset;
  1141. int size;
  1142. int i;
  1143. if (bp->b_flags & XBF_WRITE) {
  1144. if (bp->b_flags & XBF_SYNCIO)
  1145. rw = WRITE_SYNC;
  1146. else
  1147. rw = WRITE;
  1148. if (bp->b_flags & XBF_FUA)
  1149. rw |= REQ_FUA;
  1150. if (bp->b_flags & XBF_FLUSH)
  1151. rw |= REQ_FLUSH;
  1152. } else if (bp->b_flags & XBF_READ_AHEAD) {
  1153. rw = READA;
  1154. } else {
  1155. rw = READ;
  1156. }
  1157. /* we only use the buffer cache for meta-data */
  1158. rw |= REQ_META;
  1159. /*
  1160. * Walk all the vectors issuing IO on them. Set up the initial offset
  1161. * into the buffer and the desired IO size before we start -
  1162. * _xfs_buf_ioapply_vec() will modify them appropriately for each
  1163. * subsequent call.
  1164. */
  1165. offset = bp->b_offset;
  1166. size = BBTOB(bp->b_io_length);
  1167. blk_start_plug(&plug);
  1168. for (i = 0; i < bp->b_map_count; i++) {
  1169. xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
  1170. if (bp->b_error)
  1171. break;
  1172. if (size <= 0)
  1173. break; /* all done */
  1174. }
  1175. blk_finish_plug(&plug);
  1176. }
  1177. void
  1178. xfs_buf_iorequest(
  1179. xfs_buf_t *bp)
  1180. {
  1181. trace_xfs_buf_iorequest(bp, _RET_IP_);
  1182. ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
  1183. if (bp->b_flags & XBF_WRITE)
  1184. xfs_buf_wait_unpin(bp);
  1185. xfs_buf_hold(bp);
  1186. /* Set the count to 1 initially, this will stop an I/O
  1187. * completion callout which happens before we have started
  1188. * all the I/O from calling xfs_buf_ioend too early.
  1189. */
  1190. atomic_set(&bp->b_io_remaining, 1);
  1191. _xfs_buf_ioapply(bp);
  1192. _xfs_buf_ioend(bp, 1);
  1193. xfs_buf_rele(bp);
  1194. }
  1195. /*
  1196. * Waits for I/O to complete on the buffer supplied. It returns immediately if
  1197. * no I/O is pending or there is already a pending error on the buffer. It
  1198. * returns the I/O error code, if any, or 0 if there was no error.
  1199. */
  1200. int
  1201. xfs_buf_iowait(
  1202. xfs_buf_t *bp)
  1203. {
  1204. trace_xfs_buf_iowait(bp, _RET_IP_);
  1205. if (!bp->b_error)
  1206. wait_for_completion(&bp->b_iowait);
  1207. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1208. return bp->b_error;
  1209. }
  1210. xfs_caddr_t
  1211. xfs_buf_offset(
  1212. xfs_buf_t *bp,
  1213. size_t offset)
  1214. {
  1215. struct page *page;
  1216. if (bp->b_addr)
  1217. return bp->b_addr + offset;
  1218. offset += bp->b_offset;
  1219. page = bp->b_pages[offset >> PAGE_SHIFT];
  1220. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
  1221. }
  1222. /*
  1223. * Move data into or out of a buffer.
  1224. */
  1225. void
  1226. xfs_buf_iomove(
  1227. xfs_buf_t *bp, /* buffer to process */
  1228. size_t boff, /* starting buffer offset */
  1229. size_t bsize, /* length to copy */
  1230. void *data, /* data address */
  1231. xfs_buf_rw_t mode) /* read/write/zero flag */
  1232. {
  1233. size_t bend;
  1234. bend = boff + bsize;
  1235. while (boff < bend) {
  1236. struct page *page;
  1237. int page_index, page_offset, csize;
  1238. page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
  1239. page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
  1240. page = bp->b_pages[page_index];
  1241. csize = min_t(size_t, PAGE_SIZE - page_offset,
  1242. BBTOB(bp->b_io_length) - boff);
  1243. ASSERT((csize + page_offset) <= PAGE_SIZE);
  1244. switch (mode) {
  1245. case XBRW_ZERO:
  1246. memset(page_address(page) + page_offset, 0, csize);
  1247. break;
  1248. case XBRW_READ:
  1249. memcpy(data, page_address(page) + page_offset, csize);
  1250. break;
  1251. case XBRW_WRITE:
  1252. memcpy(page_address(page) + page_offset, data, csize);
  1253. }
  1254. boff += csize;
  1255. data += csize;
  1256. }
  1257. }
  1258. /*
  1259. * Handling of buffer targets (buftargs).
  1260. */
  1261. /*
  1262. * Wait for any bufs with callbacks that have been submitted but have not yet
  1263. * returned. These buffers will have an elevated hold count, so wait on those
  1264. * while freeing all the buffers only held by the LRU.
  1265. */
  1266. void
  1267. xfs_wait_buftarg(
  1268. struct xfs_buftarg *btp)
  1269. {
  1270. struct xfs_buf *bp;
  1271. restart:
  1272. spin_lock(&btp->bt_lru_lock);
  1273. while (!list_empty(&btp->bt_lru)) {
  1274. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1275. if (atomic_read(&bp->b_hold) > 1) {
  1276. spin_unlock(&btp->bt_lru_lock);
  1277. delay(100);
  1278. goto restart;
  1279. }
  1280. /*
  1281. * clear the LRU reference count so the buffer doesn't get
  1282. * ignored in xfs_buf_rele().
  1283. */
  1284. atomic_set(&bp->b_lru_ref, 0);
  1285. spin_unlock(&btp->bt_lru_lock);
  1286. xfs_buf_rele(bp);
  1287. spin_lock(&btp->bt_lru_lock);
  1288. }
  1289. spin_unlock(&btp->bt_lru_lock);
  1290. }
  1291. int
  1292. xfs_buftarg_shrink(
  1293. struct shrinker *shrink,
  1294. struct shrink_control *sc)
  1295. {
  1296. struct xfs_buftarg *btp = container_of(shrink,
  1297. struct xfs_buftarg, bt_shrinker);
  1298. struct xfs_buf *bp;
  1299. int nr_to_scan = sc->nr_to_scan;
  1300. LIST_HEAD(dispose);
  1301. if (!nr_to_scan)
  1302. return btp->bt_lru_nr;
  1303. spin_lock(&btp->bt_lru_lock);
  1304. while (!list_empty(&btp->bt_lru)) {
  1305. if (nr_to_scan-- <= 0)
  1306. break;
  1307. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1308. /*
  1309. * Decrement the b_lru_ref count unless the value is already
  1310. * zero. If the value is already zero, we need to reclaim the
  1311. * buffer, otherwise it gets another trip through the LRU.
  1312. */
  1313. if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1314. list_move_tail(&bp->b_lru, &btp->bt_lru);
  1315. continue;
  1316. }
  1317. /*
  1318. * remove the buffer from the LRU now to avoid needing another
  1319. * lock round trip inside xfs_buf_rele().
  1320. */
  1321. list_move(&bp->b_lru, &dispose);
  1322. btp->bt_lru_nr--;
  1323. bp->b_lru_flags |= _XBF_LRU_DISPOSE;
  1324. }
  1325. spin_unlock(&btp->bt_lru_lock);
  1326. while (!list_empty(&dispose)) {
  1327. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1328. list_del_init(&bp->b_lru);
  1329. xfs_buf_rele(bp);
  1330. }
  1331. return btp->bt_lru_nr;
  1332. }
  1333. void
  1334. xfs_free_buftarg(
  1335. struct xfs_mount *mp,
  1336. struct xfs_buftarg *btp)
  1337. {
  1338. unregister_shrinker(&btp->bt_shrinker);
  1339. if (mp->m_flags & XFS_MOUNT_BARRIER)
  1340. xfs_blkdev_issue_flush(btp);
  1341. kmem_free(btp);
  1342. }
  1343. STATIC int
  1344. xfs_setsize_buftarg_flags(
  1345. xfs_buftarg_t *btp,
  1346. unsigned int blocksize,
  1347. unsigned int sectorsize,
  1348. int verbose)
  1349. {
  1350. btp->bt_bsize = blocksize;
  1351. btp->bt_sshift = ffs(sectorsize) - 1;
  1352. btp->bt_smask = sectorsize - 1;
  1353. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1354. char name[BDEVNAME_SIZE];
  1355. bdevname(btp->bt_bdev, name);
  1356. xfs_warn(btp->bt_mount,
  1357. "Cannot set_blocksize to %u on device %s\n",
  1358. sectorsize, name);
  1359. return EINVAL;
  1360. }
  1361. return 0;
  1362. }
  1363. /*
  1364. * When allocating the initial buffer target we have not yet
  1365. * read in the superblock, so don't know what sized sectors
  1366. * are being used is at this early stage. Play safe.
  1367. */
  1368. STATIC int
  1369. xfs_setsize_buftarg_early(
  1370. xfs_buftarg_t *btp,
  1371. struct block_device *bdev)
  1372. {
  1373. return xfs_setsize_buftarg_flags(btp,
  1374. PAGE_SIZE, bdev_logical_block_size(bdev), 0);
  1375. }
  1376. int
  1377. xfs_setsize_buftarg(
  1378. xfs_buftarg_t *btp,
  1379. unsigned int blocksize,
  1380. unsigned int sectorsize)
  1381. {
  1382. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1383. }
  1384. xfs_buftarg_t *
  1385. xfs_alloc_buftarg(
  1386. struct xfs_mount *mp,
  1387. struct block_device *bdev,
  1388. int external,
  1389. const char *fsname)
  1390. {
  1391. xfs_buftarg_t *btp;
  1392. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
  1393. btp->bt_mount = mp;
  1394. btp->bt_dev = bdev->bd_dev;
  1395. btp->bt_bdev = bdev;
  1396. btp->bt_bdi = blk_get_backing_dev_info(bdev);
  1397. if (!btp->bt_bdi)
  1398. goto error;
  1399. INIT_LIST_HEAD(&btp->bt_lru);
  1400. spin_lock_init(&btp->bt_lru_lock);
  1401. if (xfs_setsize_buftarg_early(btp, bdev))
  1402. goto error;
  1403. btp->bt_shrinker.shrink = xfs_buftarg_shrink;
  1404. btp->bt_shrinker.seeks = DEFAULT_SEEKS;
  1405. register_shrinker(&btp->bt_shrinker);
  1406. return btp;
  1407. error:
  1408. kmem_free(btp);
  1409. return NULL;
  1410. }
  1411. /*
  1412. * Add a buffer to the delayed write list.
  1413. *
  1414. * This queues a buffer for writeout if it hasn't already been. Note that
  1415. * neither this routine nor the buffer list submission functions perform
  1416. * any internal synchronization. It is expected that the lists are thread-local
  1417. * to the callers.
  1418. *
  1419. * Returns true if we queued up the buffer, or false if it already had
  1420. * been on the buffer list.
  1421. */
  1422. bool
  1423. xfs_buf_delwri_queue(
  1424. struct xfs_buf *bp,
  1425. struct list_head *list)
  1426. {
  1427. ASSERT(xfs_buf_islocked(bp));
  1428. ASSERT(!(bp->b_flags & XBF_READ));
  1429. /*
  1430. * If the buffer is already marked delwri it already is queued up
  1431. * by someone else for imediate writeout. Just ignore it in that
  1432. * case.
  1433. */
  1434. if (bp->b_flags & _XBF_DELWRI_Q) {
  1435. trace_xfs_buf_delwri_queued(bp, _RET_IP_);
  1436. return false;
  1437. }
  1438. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1439. /*
  1440. * If a buffer gets written out synchronously or marked stale while it
  1441. * is on a delwri list we lazily remove it. To do this, the other party
  1442. * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
  1443. * It remains referenced and on the list. In a rare corner case it
  1444. * might get readded to a delwri list after the synchronous writeout, in
  1445. * which case we need just need to re-add the flag here.
  1446. */
  1447. bp->b_flags |= _XBF_DELWRI_Q;
  1448. if (list_empty(&bp->b_list)) {
  1449. atomic_inc(&bp->b_hold);
  1450. list_add_tail(&bp->b_list, list);
  1451. }
  1452. return true;
  1453. }
  1454. /*
  1455. * Compare function is more complex than it needs to be because
  1456. * the return value is only 32 bits and we are doing comparisons
  1457. * on 64 bit values
  1458. */
  1459. static int
  1460. xfs_buf_cmp(
  1461. void *priv,
  1462. struct list_head *a,
  1463. struct list_head *b)
  1464. {
  1465. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1466. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1467. xfs_daddr_t diff;
  1468. diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
  1469. if (diff < 0)
  1470. return -1;
  1471. if (diff > 0)
  1472. return 1;
  1473. return 0;
  1474. }
  1475. static int
  1476. __xfs_buf_delwri_submit(
  1477. struct list_head *buffer_list,
  1478. struct list_head *io_list,
  1479. bool wait)
  1480. {
  1481. struct blk_plug plug;
  1482. struct xfs_buf *bp, *n;
  1483. int pinned = 0;
  1484. list_for_each_entry_safe(bp, n, buffer_list, b_list) {
  1485. if (!wait) {
  1486. if (xfs_buf_ispinned(bp)) {
  1487. pinned++;
  1488. continue;
  1489. }
  1490. if (!xfs_buf_trylock(bp))
  1491. continue;
  1492. } else {
  1493. xfs_buf_lock(bp);
  1494. }
  1495. /*
  1496. * Someone else might have written the buffer synchronously or
  1497. * marked it stale in the meantime. In that case only the
  1498. * _XBF_DELWRI_Q flag got cleared, and we have to drop the
  1499. * reference and remove it from the list here.
  1500. */
  1501. if (!(bp->b_flags & _XBF_DELWRI_Q)) {
  1502. list_del_init(&bp->b_list);
  1503. xfs_buf_relse(bp);
  1504. continue;
  1505. }
  1506. list_move_tail(&bp->b_list, io_list);
  1507. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  1508. }
  1509. list_sort(NULL, io_list, xfs_buf_cmp);
  1510. blk_start_plug(&plug);
  1511. list_for_each_entry_safe(bp, n, io_list, b_list) {
  1512. bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
  1513. bp->b_flags |= XBF_WRITE;
  1514. if (!wait) {
  1515. bp->b_flags |= XBF_ASYNC;
  1516. list_del_init(&bp->b_list);
  1517. }
  1518. xfs_bdstrat_cb(bp);
  1519. }
  1520. blk_finish_plug(&plug);
  1521. return pinned;
  1522. }
  1523. /*
  1524. * Write out a buffer list asynchronously.
  1525. *
  1526. * This will take the @buffer_list, write all non-locked and non-pinned buffers
  1527. * out and not wait for I/O completion on any of the buffers. This interface
  1528. * is only safely useable for callers that can track I/O completion by higher
  1529. * level means, e.g. AIL pushing as the @buffer_list is consumed in this
  1530. * function.
  1531. */
  1532. int
  1533. xfs_buf_delwri_submit_nowait(
  1534. struct list_head *buffer_list)
  1535. {
  1536. LIST_HEAD (io_list);
  1537. return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
  1538. }
  1539. /*
  1540. * Write out a buffer list synchronously.
  1541. *
  1542. * This will take the @buffer_list, write all buffers out and wait for I/O
  1543. * completion on all of the buffers. @buffer_list is consumed by the function,
  1544. * so callers must have some other way of tracking buffers if they require such
  1545. * functionality.
  1546. */
  1547. int
  1548. xfs_buf_delwri_submit(
  1549. struct list_head *buffer_list)
  1550. {
  1551. LIST_HEAD (io_list);
  1552. int error = 0, error2;
  1553. struct xfs_buf *bp;
  1554. __xfs_buf_delwri_submit(buffer_list, &io_list, true);
  1555. /* Wait for IO to complete. */
  1556. while (!list_empty(&io_list)) {
  1557. bp = list_first_entry(&io_list, struct xfs_buf, b_list);
  1558. list_del_init(&bp->b_list);
  1559. error2 = xfs_buf_iowait(bp);
  1560. xfs_buf_relse(bp);
  1561. if (!error)
  1562. error = error2;
  1563. }
  1564. return error;
  1565. }
  1566. int __init
  1567. xfs_buf_init(void)
  1568. {
  1569. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1570. KM_ZONE_HWALIGN, NULL);
  1571. if (!xfs_buf_zone)
  1572. goto out;
  1573. xfslogd_workqueue = alloc_workqueue("xfslogd",
  1574. WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
  1575. if (!xfslogd_workqueue)
  1576. goto out_free_buf_zone;
  1577. return 0;
  1578. out_free_buf_zone:
  1579. kmem_zone_destroy(xfs_buf_zone);
  1580. out:
  1581. return -ENOMEM;
  1582. }
  1583. void
  1584. xfs_buf_terminate(void)
  1585. {
  1586. destroy_workqueue(xfslogd_workqueue);
  1587. kmem_zone_destroy(xfs_buf_zone);
  1588. }