xfs_buf.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include <linux/stddef.h>
  20. #include <linux/errno.h>
  21. #include <linux/gfp.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/init.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/bio.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/percpu.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/hash.h>
  32. #include <linux/kthread.h>
  33. #include <linux/migrate.h>
  34. #include <linux/backing-dev.h>
  35. #include <linux/freezer.h>
  36. #include <linux/list_sort.h>
  37. #include "xfs_sb.h"
  38. #include "xfs_inum.h"
  39. #include "xfs_log.h"
  40. #include "xfs_ag.h"
  41. #include "xfs_mount.h"
  42. #include "xfs_trace.h"
  43. static kmem_zone_t *xfs_buf_zone;
  44. STATIC int xfsbufd(void *);
  45. STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
  46. static struct workqueue_struct *xfslogd_workqueue;
  47. struct workqueue_struct *xfsdatad_workqueue;
  48. struct workqueue_struct *xfsconvertd_workqueue;
  49. #ifdef XFS_BUF_LOCK_TRACKING
  50. # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
  51. # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
  52. # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
  53. #else
  54. # define XB_SET_OWNER(bp) do { } while (0)
  55. # define XB_CLEAR_OWNER(bp) do { } while (0)
  56. # define XB_GET_OWNER(bp) do { } while (0)
  57. #endif
  58. #define xb_to_gfp(flags) \
  59. ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
  60. ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
  61. #define xb_to_km(flags) \
  62. (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
  63. #define xfs_buf_allocate(flags) \
  64. kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
  65. #define xfs_buf_deallocate(bp) \
  66. kmem_zone_free(xfs_buf_zone, (bp));
  67. static inline int
  68. xfs_buf_is_vmapped(
  69. struct xfs_buf *bp)
  70. {
  71. /*
  72. * Return true if the buffer is vmapped.
  73. *
  74. * The XBF_MAPPED flag is set if the buffer should be mapped, but the
  75. * code is clever enough to know it doesn't have to map a single page,
  76. * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
  77. */
  78. return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
  79. }
  80. static inline int
  81. xfs_buf_vmap_len(
  82. struct xfs_buf *bp)
  83. {
  84. return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
  85. }
  86. /*
  87. * xfs_buf_lru_add - add a buffer to the LRU.
  88. *
  89. * The LRU takes a new reference to the buffer so that it will only be freed
  90. * once the shrinker takes the buffer off the LRU.
  91. */
  92. STATIC void
  93. xfs_buf_lru_add(
  94. struct xfs_buf *bp)
  95. {
  96. struct xfs_buftarg *btp = bp->b_target;
  97. spin_lock(&btp->bt_lru_lock);
  98. if (list_empty(&bp->b_lru)) {
  99. atomic_inc(&bp->b_hold);
  100. list_add_tail(&bp->b_lru, &btp->bt_lru);
  101. btp->bt_lru_nr++;
  102. }
  103. spin_unlock(&btp->bt_lru_lock);
  104. }
  105. /*
  106. * xfs_buf_lru_del - remove a buffer from the LRU
  107. *
  108. * The unlocked check is safe here because it only occurs when there are not
  109. * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
  110. * to optimise the shrinker removing the buffer from the LRU and calling
  111. * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
  112. * bt_lru_lock.
  113. */
  114. STATIC void
  115. xfs_buf_lru_del(
  116. struct xfs_buf *bp)
  117. {
  118. struct xfs_buftarg *btp = bp->b_target;
  119. if (list_empty(&bp->b_lru))
  120. return;
  121. spin_lock(&btp->bt_lru_lock);
  122. if (!list_empty(&bp->b_lru)) {
  123. list_del_init(&bp->b_lru);
  124. btp->bt_lru_nr--;
  125. }
  126. spin_unlock(&btp->bt_lru_lock);
  127. }
  128. /*
  129. * When we mark a buffer stale, we remove the buffer from the LRU and clear the
  130. * b_lru_ref count so that the buffer is freed immediately when the buffer
  131. * reference count falls to zero. If the buffer is already on the LRU, we need
  132. * to remove the reference that LRU holds on the buffer.
  133. *
  134. * This prevents build-up of stale buffers on the LRU.
  135. */
  136. void
  137. xfs_buf_stale(
  138. struct xfs_buf *bp)
  139. {
  140. bp->b_flags |= XBF_STALE;
  141. atomic_set(&(bp)->b_lru_ref, 0);
  142. if (!list_empty(&bp->b_lru)) {
  143. struct xfs_buftarg *btp = bp->b_target;
  144. spin_lock(&btp->bt_lru_lock);
  145. if (!list_empty(&bp->b_lru)) {
  146. list_del_init(&bp->b_lru);
  147. btp->bt_lru_nr--;
  148. atomic_dec(&bp->b_hold);
  149. }
  150. spin_unlock(&btp->bt_lru_lock);
  151. }
  152. ASSERT(atomic_read(&bp->b_hold) >= 1);
  153. }
  154. STATIC void
  155. _xfs_buf_initialize(
  156. xfs_buf_t *bp,
  157. xfs_buftarg_t *target,
  158. xfs_off_t range_base,
  159. size_t range_length,
  160. xfs_buf_flags_t flags)
  161. {
  162. /*
  163. * We don't want certain flags to appear in b_flags.
  164. */
  165. flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
  166. memset(bp, 0, sizeof(xfs_buf_t));
  167. atomic_set(&bp->b_hold, 1);
  168. atomic_set(&bp->b_lru_ref, 1);
  169. init_completion(&bp->b_iowait);
  170. INIT_LIST_HEAD(&bp->b_lru);
  171. INIT_LIST_HEAD(&bp->b_list);
  172. RB_CLEAR_NODE(&bp->b_rbnode);
  173. sema_init(&bp->b_sema, 0); /* held, no waiters */
  174. XB_SET_OWNER(bp);
  175. bp->b_target = target;
  176. bp->b_file_offset = range_base;
  177. /*
  178. * Set buffer_length and count_desired to the same value initially.
  179. * I/O routines should use count_desired, which will be the same in
  180. * most cases but may be reset (e.g. XFS recovery).
  181. */
  182. bp->b_buffer_length = bp->b_count_desired = range_length;
  183. bp->b_flags = flags;
  184. bp->b_bn = XFS_BUF_DADDR_NULL;
  185. atomic_set(&bp->b_pin_count, 0);
  186. init_waitqueue_head(&bp->b_waiters);
  187. XFS_STATS_INC(xb_create);
  188. trace_xfs_buf_init(bp, _RET_IP_);
  189. }
  190. /*
  191. * Allocate a page array capable of holding a specified number
  192. * of pages, and point the page buf at it.
  193. */
  194. STATIC int
  195. _xfs_buf_get_pages(
  196. xfs_buf_t *bp,
  197. int page_count,
  198. xfs_buf_flags_t flags)
  199. {
  200. /* Make sure that we have a page list */
  201. if (bp->b_pages == NULL) {
  202. bp->b_offset = xfs_buf_poff(bp->b_file_offset);
  203. bp->b_page_count = page_count;
  204. if (page_count <= XB_PAGES) {
  205. bp->b_pages = bp->b_page_array;
  206. } else {
  207. bp->b_pages = kmem_alloc(sizeof(struct page *) *
  208. page_count, xb_to_km(flags));
  209. if (bp->b_pages == NULL)
  210. return -ENOMEM;
  211. }
  212. memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
  213. }
  214. return 0;
  215. }
  216. /*
  217. * Frees b_pages if it was allocated.
  218. */
  219. STATIC void
  220. _xfs_buf_free_pages(
  221. xfs_buf_t *bp)
  222. {
  223. if (bp->b_pages != bp->b_page_array) {
  224. kmem_free(bp->b_pages);
  225. bp->b_pages = NULL;
  226. }
  227. }
  228. /*
  229. * Releases the specified buffer.
  230. *
  231. * The modification state of any associated pages is left unchanged.
  232. * The buffer most not be on any hash - use xfs_buf_rele instead for
  233. * hashed and refcounted buffers
  234. */
  235. void
  236. xfs_buf_free(
  237. xfs_buf_t *bp)
  238. {
  239. trace_xfs_buf_free(bp, _RET_IP_);
  240. ASSERT(list_empty(&bp->b_lru));
  241. if (bp->b_flags & _XBF_PAGES) {
  242. uint i;
  243. if (xfs_buf_is_vmapped(bp))
  244. vm_unmap_ram(bp->b_addr - bp->b_offset,
  245. bp->b_page_count);
  246. for (i = 0; i < bp->b_page_count; i++) {
  247. struct page *page = bp->b_pages[i];
  248. __free_page(page);
  249. }
  250. } else if (bp->b_flags & _XBF_KMEM)
  251. kmem_free(bp->b_addr);
  252. _xfs_buf_free_pages(bp);
  253. xfs_buf_deallocate(bp);
  254. }
  255. /*
  256. * Allocates all the pages for buffer in question and builds it's page list.
  257. */
  258. STATIC int
  259. xfs_buf_allocate_memory(
  260. xfs_buf_t *bp,
  261. uint flags)
  262. {
  263. size_t size = bp->b_count_desired;
  264. size_t nbytes, offset;
  265. gfp_t gfp_mask = xb_to_gfp(flags);
  266. unsigned short page_count, i;
  267. pgoff_t first;
  268. xfs_off_t end;
  269. int error;
  270. /*
  271. * for buffers that are contained within a single page, just allocate
  272. * the memory from the heap - there's no need for the complexity of
  273. * page arrays to keep allocation down to order 0.
  274. */
  275. if (bp->b_buffer_length < PAGE_SIZE) {
  276. bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
  277. if (!bp->b_addr) {
  278. /* low memory - use alloc_page loop instead */
  279. goto use_alloc_page;
  280. }
  281. if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
  282. PAGE_MASK) !=
  283. ((unsigned long)bp->b_addr & PAGE_MASK)) {
  284. /* b_addr spans two pages - use alloc_page instead */
  285. kmem_free(bp->b_addr);
  286. bp->b_addr = NULL;
  287. goto use_alloc_page;
  288. }
  289. bp->b_offset = offset_in_page(bp->b_addr);
  290. bp->b_pages = bp->b_page_array;
  291. bp->b_pages[0] = virt_to_page(bp->b_addr);
  292. bp->b_page_count = 1;
  293. bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
  294. return 0;
  295. }
  296. use_alloc_page:
  297. end = bp->b_file_offset + bp->b_buffer_length;
  298. page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
  299. error = _xfs_buf_get_pages(bp, page_count, flags);
  300. if (unlikely(error))
  301. return error;
  302. offset = bp->b_offset;
  303. first = bp->b_file_offset >> PAGE_SHIFT;
  304. bp->b_flags |= _XBF_PAGES;
  305. for (i = 0; i < bp->b_page_count; i++) {
  306. struct page *page;
  307. uint retries = 0;
  308. retry:
  309. page = alloc_page(gfp_mask);
  310. if (unlikely(page == NULL)) {
  311. if (flags & XBF_READ_AHEAD) {
  312. bp->b_page_count = i;
  313. error = ENOMEM;
  314. goto out_free_pages;
  315. }
  316. /*
  317. * This could deadlock.
  318. *
  319. * But until all the XFS lowlevel code is revamped to
  320. * handle buffer allocation failures we can't do much.
  321. */
  322. if (!(++retries % 100))
  323. xfs_err(NULL,
  324. "possible memory allocation deadlock in %s (mode:0x%x)",
  325. __func__, gfp_mask);
  326. XFS_STATS_INC(xb_page_retries);
  327. congestion_wait(BLK_RW_ASYNC, HZ/50);
  328. goto retry;
  329. }
  330. XFS_STATS_INC(xb_page_found);
  331. nbytes = min_t(size_t, size, PAGE_SIZE - offset);
  332. size -= nbytes;
  333. bp->b_pages[i] = page;
  334. offset = 0;
  335. }
  336. return 0;
  337. out_free_pages:
  338. for (i = 0; i < bp->b_page_count; i++)
  339. __free_page(bp->b_pages[i]);
  340. return error;
  341. }
  342. /*
  343. * Map buffer into kernel address-space if nessecary.
  344. */
  345. STATIC int
  346. _xfs_buf_map_pages(
  347. xfs_buf_t *bp,
  348. uint flags)
  349. {
  350. ASSERT(bp->b_flags & _XBF_PAGES);
  351. if (bp->b_page_count == 1) {
  352. /* A single page buffer is always mappable */
  353. bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
  354. bp->b_flags |= XBF_MAPPED;
  355. } else if (flags & XBF_MAPPED) {
  356. int retried = 0;
  357. do {
  358. bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
  359. -1, PAGE_KERNEL);
  360. if (bp->b_addr)
  361. break;
  362. vm_unmap_aliases();
  363. } while (retried++ <= 1);
  364. if (!bp->b_addr)
  365. return -ENOMEM;
  366. bp->b_addr += bp->b_offset;
  367. bp->b_flags |= XBF_MAPPED;
  368. }
  369. return 0;
  370. }
  371. /*
  372. * Finding and Reading Buffers
  373. */
  374. /*
  375. * Look up, and creates if absent, a lockable buffer for
  376. * a given range of an inode. The buffer is returned
  377. * locked. If other overlapping buffers exist, they are
  378. * released before the new buffer is created and locked,
  379. * which may imply that this call will block until those buffers
  380. * are unlocked. No I/O is implied by this call.
  381. */
  382. xfs_buf_t *
  383. _xfs_buf_find(
  384. xfs_buftarg_t *btp, /* block device target */
  385. xfs_off_t ioff, /* starting offset of range */
  386. size_t isize, /* length of range */
  387. xfs_buf_flags_t flags,
  388. xfs_buf_t *new_bp)
  389. {
  390. xfs_off_t range_base;
  391. size_t range_length;
  392. struct xfs_perag *pag;
  393. struct rb_node **rbp;
  394. struct rb_node *parent;
  395. xfs_buf_t *bp;
  396. range_base = (ioff << BBSHIFT);
  397. range_length = (isize << BBSHIFT);
  398. /* Check for IOs smaller than the sector size / not sector aligned */
  399. ASSERT(!(range_length < (1 << btp->bt_sshift)));
  400. ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
  401. /* get tree root */
  402. pag = xfs_perag_get(btp->bt_mount,
  403. xfs_daddr_to_agno(btp->bt_mount, ioff));
  404. /* walk tree */
  405. spin_lock(&pag->pag_buf_lock);
  406. rbp = &pag->pag_buf_tree.rb_node;
  407. parent = NULL;
  408. bp = NULL;
  409. while (*rbp) {
  410. parent = *rbp;
  411. bp = rb_entry(parent, struct xfs_buf, b_rbnode);
  412. if (range_base < bp->b_file_offset)
  413. rbp = &(*rbp)->rb_left;
  414. else if (range_base > bp->b_file_offset)
  415. rbp = &(*rbp)->rb_right;
  416. else {
  417. /*
  418. * found a block offset match. If the range doesn't
  419. * match, the only way this is allowed is if the buffer
  420. * in the cache is stale and the transaction that made
  421. * it stale has not yet committed. i.e. we are
  422. * reallocating a busy extent. Skip this buffer and
  423. * continue searching to the right for an exact match.
  424. */
  425. if (bp->b_buffer_length != range_length) {
  426. ASSERT(bp->b_flags & XBF_STALE);
  427. rbp = &(*rbp)->rb_right;
  428. continue;
  429. }
  430. atomic_inc(&bp->b_hold);
  431. goto found;
  432. }
  433. }
  434. /* No match found */
  435. if (new_bp) {
  436. _xfs_buf_initialize(new_bp, btp, range_base,
  437. range_length, flags);
  438. rb_link_node(&new_bp->b_rbnode, parent, rbp);
  439. rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
  440. /* the buffer keeps the perag reference until it is freed */
  441. new_bp->b_pag = pag;
  442. spin_unlock(&pag->pag_buf_lock);
  443. } else {
  444. XFS_STATS_INC(xb_miss_locked);
  445. spin_unlock(&pag->pag_buf_lock);
  446. xfs_perag_put(pag);
  447. }
  448. return new_bp;
  449. found:
  450. spin_unlock(&pag->pag_buf_lock);
  451. xfs_perag_put(pag);
  452. if (xfs_buf_cond_lock(bp)) {
  453. /* failed, so wait for the lock if requested. */
  454. if (!(flags & XBF_TRYLOCK)) {
  455. xfs_buf_lock(bp);
  456. XFS_STATS_INC(xb_get_locked_waited);
  457. } else {
  458. xfs_buf_rele(bp);
  459. XFS_STATS_INC(xb_busy_locked);
  460. return NULL;
  461. }
  462. }
  463. /*
  464. * if the buffer is stale, clear all the external state associated with
  465. * it. We need to keep flags such as how we allocated the buffer memory
  466. * intact here.
  467. */
  468. if (bp->b_flags & XBF_STALE) {
  469. ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
  470. bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
  471. }
  472. trace_xfs_buf_find(bp, flags, _RET_IP_);
  473. XFS_STATS_INC(xb_get_locked);
  474. return bp;
  475. }
  476. /*
  477. * Assembles a buffer covering the specified range.
  478. * Storage in memory for all portions of the buffer will be allocated,
  479. * although backing storage may not be.
  480. */
  481. xfs_buf_t *
  482. xfs_buf_get(
  483. xfs_buftarg_t *target,/* target for buffer */
  484. xfs_off_t ioff, /* starting offset of range */
  485. size_t isize, /* length of range */
  486. xfs_buf_flags_t flags)
  487. {
  488. xfs_buf_t *bp, *new_bp;
  489. int error = 0;
  490. new_bp = xfs_buf_allocate(flags);
  491. if (unlikely(!new_bp))
  492. return NULL;
  493. bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
  494. if (bp == new_bp) {
  495. error = xfs_buf_allocate_memory(bp, flags);
  496. if (error)
  497. goto no_buffer;
  498. } else {
  499. xfs_buf_deallocate(new_bp);
  500. if (unlikely(bp == NULL))
  501. return NULL;
  502. }
  503. if (!(bp->b_flags & XBF_MAPPED)) {
  504. error = _xfs_buf_map_pages(bp, flags);
  505. if (unlikely(error)) {
  506. xfs_warn(target->bt_mount,
  507. "%s: failed to map pages\n", __func__);
  508. goto no_buffer;
  509. }
  510. }
  511. XFS_STATS_INC(xb_get);
  512. /*
  513. * Always fill in the block number now, the mapped cases can do
  514. * their own overlay of this later.
  515. */
  516. bp->b_bn = ioff;
  517. bp->b_count_desired = bp->b_buffer_length;
  518. trace_xfs_buf_get(bp, flags, _RET_IP_);
  519. return bp;
  520. no_buffer:
  521. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  522. xfs_buf_unlock(bp);
  523. xfs_buf_rele(bp);
  524. return NULL;
  525. }
  526. STATIC int
  527. _xfs_buf_read(
  528. xfs_buf_t *bp,
  529. xfs_buf_flags_t flags)
  530. {
  531. int status;
  532. ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
  533. ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
  534. bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
  535. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  536. bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
  537. XBF_READ_AHEAD | _XBF_RUN_QUEUES);
  538. status = xfs_buf_iorequest(bp);
  539. if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
  540. return status;
  541. return xfs_buf_iowait(bp);
  542. }
  543. xfs_buf_t *
  544. xfs_buf_read(
  545. xfs_buftarg_t *target,
  546. xfs_off_t ioff,
  547. size_t isize,
  548. xfs_buf_flags_t flags)
  549. {
  550. xfs_buf_t *bp;
  551. flags |= XBF_READ;
  552. bp = xfs_buf_get(target, ioff, isize, flags);
  553. if (bp) {
  554. trace_xfs_buf_read(bp, flags, _RET_IP_);
  555. if (!XFS_BUF_ISDONE(bp)) {
  556. XFS_STATS_INC(xb_get_read);
  557. _xfs_buf_read(bp, flags);
  558. } else if (flags & XBF_ASYNC) {
  559. /*
  560. * Read ahead call which is already satisfied,
  561. * drop the buffer
  562. */
  563. goto no_buffer;
  564. } else {
  565. /* We do not want read in the flags */
  566. bp->b_flags &= ~XBF_READ;
  567. }
  568. }
  569. return bp;
  570. no_buffer:
  571. if (flags & (XBF_LOCK | XBF_TRYLOCK))
  572. xfs_buf_unlock(bp);
  573. xfs_buf_rele(bp);
  574. return NULL;
  575. }
  576. /*
  577. * If we are not low on memory then do the readahead in a deadlock
  578. * safe manner.
  579. */
  580. void
  581. xfs_buf_readahead(
  582. xfs_buftarg_t *target,
  583. xfs_off_t ioff,
  584. size_t isize)
  585. {
  586. struct backing_dev_info *bdi;
  587. if (bdi_read_congested(target->bt_bdi))
  588. return;
  589. xfs_buf_read(target, ioff, isize,
  590. XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
  591. }
  592. /*
  593. * Read an uncached buffer from disk. Allocates and returns a locked
  594. * buffer containing the disk contents or nothing.
  595. */
  596. struct xfs_buf *
  597. xfs_buf_read_uncached(
  598. struct xfs_mount *mp,
  599. struct xfs_buftarg *target,
  600. xfs_daddr_t daddr,
  601. size_t length,
  602. int flags)
  603. {
  604. xfs_buf_t *bp;
  605. int error;
  606. bp = xfs_buf_get_uncached(target, length, flags);
  607. if (!bp)
  608. return NULL;
  609. /* set up the buffer for a read IO */
  610. xfs_buf_lock(bp);
  611. XFS_BUF_SET_ADDR(bp, daddr);
  612. XFS_BUF_READ(bp);
  613. XFS_BUF_BUSY(bp);
  614. xfsbdstrat(mp, bp);
  615. error = xfs_buf_iowait(bp);
  616. if (error || bp->b_error) {
  617. xfs_buf_relse(bp);
  618. return NULL;
  619. }
  620. return bp;
  621. }
  622. xfs_buf_t *
  623. xfs_buf_get_empty(
  624. size_t len,
  625. xfs_buftarg_t *target)
  626. {
  627. xfs_buf_t *bp;
  628. bp = xfs_buf_allocate(0);
  629. if (bp)
  630. _xfs_buf_initialize(bp, target, 0, len, 0);
  631. return bp;
  632. }
  633. static inline struct page *
  634. mem_to_page(
  635. void *addr)
  636. {
  637. if ((!is_vmalloc_addr(addr))) {
  638. return virt_to_page(addr);
  639. } else {
  640. return vmalloc_to_page(addr);
  641. }
  642. }
  643. int
  644. xfs_buf_associate_memory(
  645. xfs_buf_t *bp,
  646. void *mem,
  647. size_t len)
  648. {
  649. int rval;
  650. int i = 0;
  651. unsigned long pageaddr;
  652. unsigned long offset;
  653. size_t buflen;
  654. int page_count;
  655. pageaddr = (unsigned long)mem & PAGE_MASK;
  656. offset = (unsigned long)mem - pageaddr;
  657. buflen = PAGE_ALIGN(len + offset);
  658. page_count = buflen >> PAGE_SHIFT;
  659. /* Free any previous set of page pointers */
  660. if (bp->b_pages)
  661. _xfs_buf_free_pages(bp);
  662. bp->b_pages = NULL;
  663. bp->b_addr = mem;
  664. rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
  665. if (rval)
  666. return rval;
  667. bp->b_offset = offset;
  668. for (i = 0; i < bp->b_page_count; i++) {
  669. bp->b_pages[i] = mem_to_page((void *)pageaddr);
  670. pageaddr += PAGE_SIZE;
  671. }
  672. bp->b_count_desired = len;
  673. bp->b_buffer_length = buflen;
  674. bp->b_flags |= XBF_MAPPED;
  675. return 0;
  676. }
  677. xfs_buf_t *
  678. xfs_buf_get_uncached(
  679. struct xfs_buftarg *target,
  680. size_t len,
  681. int flags)
  682. {
  683. unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
  684. int error, i;
  685. xfs_buf_t *bp;
  686. bp = xfs_buf_allocate(0);
  687. if (unlikely(bp == NULL))
  688. goto fail;
  689. _xfs_buf_initialize(bp, target, 0, len, 0);
  690. error = _xfs_buf_get_pages(bp, page_count, 0);
  691. if (error)
  692. goto fail_free_buf;
  693. for (i = 0; i < page_count; i++) {
  694. bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
  695. if (!bp->b_pages[i])
  696. goto fail_free_mem;
  697. }
  698. bp->b_flags |= _XBF_PAGES;
  699. error = _xfs_buf_map_pages(bp, XBF_MAPPED);
  700. if (unlikely(error)) {
  701. xfs_warn(target->bt_mount,
  702. "%s: failed to map pages\n", __func__);
  703. goto fail_free_mem;
  704. }
  705. xfs_buf_unlock(bp);
  706. trace_xfs_buf_get_uncached(bp, _RET_IP_);
  707. return bp;
  708. fail_free_mem:
  709. while (--i >= 0)
  710. __free_page(bp->b_pages[i]);
  711. _xfs_buf_free_pages(bp);
  712. fail_free_buf:
  713. xfs_buf_deallocate(bp);
  714. fail:
  715. return NULL;
  716. }
  717. /*
  718. * Increment reference count on buffer, to hold the buffer concurrently
  719. * with another thread which may release (free) the buffer asynchronously.
  720. * Must hold the buffer already to call this function.
  721. */
  722. void
  723. xfs_buf_hold(
  724. xfs_buf_t *bp)
  725. {
  726. trace_xfs_buf_hold(bp, _RET_IP_);
  727. atomic_inc(&bp->b_hold);
  728. }
  729. /*
  730. * Releases a hold on the specified buffer. If the
  731. * the hold count is 1, calls xfs_buf_free.
  732. */
  733. void
  734. xfs_buf_rele(
  735. xfs_buf_t *bp)
  736. {
  737. struct xfs_perag *pag = bp->b_pag;
  738. trace_xfs_buf_rele(bp, _RET_IP_);
  739. if (!pag) {
  740. ASSERT(list_empty(&bp->b_lru));
  741. ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
  742. if (atomic_dec_and_test(&bp->b_hold))
  743. xfs_buf_free(bp);
  744. return;
  745. }
  746. ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
  747. ASSERT(atomic_read(&bp->b_hold) > 0);
  748. if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
  749. if (!(bp->b_flags & XBF_STALE) &&
  750. atomic_read(&bp->b_lru_ref)) {
  751. xfs_buf_lru_add(bp);
  752. spin_unlock(&pag->pag_buf_lock);
  753. } else {
  754. xfs_buf_lru_del(bp);
  755. ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
  756. rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
  757. spin_unlock(&pag->pag_buf_lock);
  758. xfs_perag_put(pag);
  759. xfs_buf_free(bp);
  760. }
  761. }
  762. }
  763. /*
  764. * Lock a buffer object, if it is not already locked.
  765. *
  766. * If we come across a stale, pinned, locked buffer, we know that we are
  767. * being asked to lock a buffer that has been reallocated. Because it is
  768. * pinned, we know that the log has not been pushed to disk and hence it
  769. * will still be locked. Rather than continuing to have trylock attempts
  770. * fail until someone else pushes the log, push it ourselves before
  771. * returning. This means that the xfsaild will not get stuck trying
  772. * to push on stale inode buffers.
  773. */
  774. int
  775. xfs_buf_cond_lock(
  776. xfs_buf_t *bp)
  777. {
  778. int locked;
  779. locked = down_trylock(&bp->b_sema) == 0;
  780. if (locked)
  781. XB_SET_OWNER(bp);
  782. else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  783. xfs_log_force(bp->b_target->bt_mount, 0);
  784. trace_xfs_buf_cond_lock(bp, _RET_IP_);
  785. return locked ? 0 : -EBUSY;
  786. }
  787. int
  788. xfs_buf_lock_value(
  789. xfs_buf_t *bp)
  790. {
  791. return bp->b_sema.count;
  792. }
  793. /*
  794. * Lock a buffer object.
  795. *
  796. * If we come across a stale, pinned, locked buffer, we know that we
  797. * are being asked to lock a buffer that has been reallocated. Because
  798. * it is pinned, we know that the log has not been pushed to disk and
  799. * hence it will still be locked. Rather than sleeping until someone
  800. * else pushes the log, push it ourselves before trying to get the lock.
  801. */
  802. void
  803. xfs_buf_lock(
  804. xfs_buf_t *bp)
  805. {
  806. trace_xfs_buf_lock(bp, _RET_IP_);
  807. if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
  808. xfs_log_force(bp->b_target->bt_mount, 0);
  809. if (atomic_read(&bp->b_io_remaining))
  810. blk_run_backing_dev(bp->b_target->bt_bdi, NULL);
  811. down(&bp->b_sema);
  812. XB_SET_OWNER(bp);
  813. trace_xfs_buf_lock_done(bp, _RET_IP_);
  814. }
  815. /*
  816. * Releases the lock on the buffer object.
  817. * If the buffer is marked delwri but is not queued, do so before we
  818. * unlock the buffer as we need to set flags correctly. We also need to
  819. * take a reference for the delwri queue because the unlocker is going to
  820. * drop their's and they don't know we just queued it.
  821. */
  822. void
  823. xfs_buf_unlock(
  824. xfs_buf_t *bp)
  825. {
  826. if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
  827. atomic_inc(&bp->b_hold);
  828. bp->b_flags |= XBF_ASYNC;
  829. xfs_buf_delwri_queue(bp, 0);
  830. }
  831. XB_CLEAR_OWNER(bp);
  832. up(&bp->b_sema);
  833. trace_xfs_buf_unlock(bp, _RET_IP_);
  834. }
  835. STATIC void
  836. xfs_buf_wait_unpin(
  837. xfs_buf_t *bp)
  838. {
  839. DECLARE_WAITQUEUE (wait, current);
  840. if (atomic_read(&bp->b_pin_count) == 0)
  841. return;
  842. add_wait_queue(&bp->b_waiters, &wait);
  843. for (;;) {
  844. set_current_state(TASK_UNINTERRUPTIBLE);
  845. if (atomic_read(&bp->b_pin_count) == 0)
  846. break;
  847. if (atomic_read(&bp->b_io_remaining))
  848. blk_run_backing_dev(bp->b_target->bt_bdi, NULL);
  849. schedule();
  850. }
  851. remove_wait_queue(&bp->b_waiters, &wait);
  852. set_current_state(TASK_RUNNING);
  853. }
  854. /*
  855. * Buffer Utility Routines
  856. */
  857. STATIC void
  858. xfs_buf_iodone_work(
  859. struct work_struct *work)
  860. {
  861. xfs_buf_t *bp =
  862. container_of(work, xfs_buf_t, b_iodone_work);
  863. if (bp->b_iodone)
  864. (*(bp->b_iodone))(bp);
  865. else if (bp->b_flags & XBF_ASYNC)
  866. xfs_buf_relse(bp);
  867. }
  868. void
  869. xfs_buf_ioend(
  870. xfs_buf_t *bp,
  871. int schedule)
  872. {
  873. trace_xfs_buf_iodone(bp, _RET_IP_);
  874. bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
  875. if (bp->b_error == 0)
  876. bp->b_flags |= XBF_DONE;
  877. if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
  878. if (schedule) {
  879. INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
  880. queue_work(xfslogd_workqueue, &bp->b_iodone_work);
  881. } else {
  882. xfs_buf_iodone_work(&bp->b_iodone_work);
  883. }
  884. } else {
  885. complete(&bp->b_iowait);
  886. }
  887. }
  888. void
  889. xfs_buf_ioerror(
  890. xfs_buf_t *bp,
  891. int error)
  892. {
  893. ASSERT(error >= 0 && error <= 0xffff);
  894. bp->b_error = (unsigned short)error;
  895. trace_xfs_buf_ioerror(bp, error, _RET_IP_);
  896. }
  897. int
  898. xfs_bwrite(
  899. struct xfs_mount *mp,
  900. struct xfs_buf *bp)
  901. {
  902. int error;
  903. bp->b_flags |= XBF_WRITE;
  904. bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
  905. xfs_buf_delwri_dequeue(bp);
  906. xfs_bdstrat_cb(bp);
  907. error = xfs_buf_iowait(bp);
  908. if (error)
  909. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  910. xfs_buf_relse(bp);
  911. return error;
  912. }
  913. void
  914. xfs_bdwrite(
  915. void *mp,
  916. struct xfs_buf *bp)
  917. {
  918. trace_xfs_buf_bdwrite(bp, _RET_IP_);
  919. bp->b_flags &= ~XBF_READ;
  920. bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
  921. xfs_buf_delwri_queue(bp, 1);
  922. }
  923. /*
  924. * Called when we want to stop a buffer from getting written or read.
  925. * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
  926. * so that the proper iodone callbacks get called.
  927. */
  928. STATIC int
  929. xfs_bioerror(
  930. xfs_buf_t *bp)
  931. {
  932. #ifdef XFSERRORDEBUG
  933. ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
  934. #endif
  935. /*
  936. * No need to wait until the buffer is unpinned, we aren't flushing it.
  937. */
  938. XFS_BUF_ERROR(bp, EIO);
  939. /*
  940. * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
  941. */
  942. XFS_BUF_UNREAD(bp);
  943. XFS_BUF_UNDELAYWRITE(bp);
  944. XFS_BUF_UNDONE(bp);
  945. XFS_BUF_STALE(bp);
  946. xfs_buf_ioend(bp, 0);
  947. return EIO;
  948. }
  949. /*
  950. * Same as xfs_bioerror, except that we are releasing the buffer
  951. * here ourselves, and avoiding the xfs_buf_ioend call.
  952. * This is meant for userdata errors; metadata bufs come with
  953. * iodone functions attached, so that we can track down errors.
  954. */
  955. STATIC int
  956. xfs_bioerror_relse(
  957. struct xfs_buf *bp)
  958. {
  959. int64_t fl = XFS_BUF_BFLAGS(bp);
  960. /*
  961. * No need to wait until the buffer is unpinned.
  962. * We aren't flushing it.
  963. *
  964. * chunkhold expects B_DONE to be set, whether
  965. * we actually finish the I/O or not. We don't want to
  966. * change that interface.
  967. */
  968. XFS_BUF_UNREAD(bp);
  969. XFS_BUF_UNDELAYWRITE(bp);
  970. XFS_BUF_DONE(bp);
  971. XFS_BUF_STALE(bp);
  972. XFS_BUF_CLR_IODONE_FUNC(bp);
  973. if (!(fl & XBF_ASYNC)) {
  974. /*
  975. * Mark b_error and B_ERROR _both_.
  976. * Lot's of chunkcache code assumes that.
  977. * There's no reason to mark error for
  978. * ASYNC buffers.
  979. */
  980. XFS_BUF_ERROR(bp, EIO);
  981. XFS_BUF_FINISH_IOWAIT(bp);
  982. } else {
  983. xfs_buf_relse(bp);
  984. }
  985. return EIO;
  986. }
  987. /*
  988. * All xfs metadata buffers except log state machine buffers
  989. * get this attached as their b_bdstrat callback function.
  990. * This is so that we can catch a buffer
  991. * after prematurely unpinning it to forcibly shutdown the filesystem.
  992. */
  993. int
  994. xfs_bdstrat_cb(
  995. struct xfs_buf *bp)
  996. {
  997. if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
  998. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  999. /*
  1000. * Metadata write that didn't get logged but
  1001. * written delayed anyway. These aren't associated
  1002. * with a transaction, and can be ignored.
  1003. */
  1004. if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
  1005. return xfs_bioerror_relse(bp);
  1006. else
  1007. return xfs_bioerror(bp);
  1008. }
  1009. xfs_buf_iorequest(bp);
  1010. return 0;
  1011. }
  1012. /*
  1013. * Wrapper around bdstrat so that we can stop data from going to disk in case
  1014. * we are shutting down the filesystem. Typically user data goes thru this
  1015. * path; one of the exceptions is the superblock.
  1016. */
  1017. void
  1018. xfsbdstrat(
  1019. struct xfs_mount *mp,
  1020. struct xfs_buf *bp)
  1021. {
  1022. if (XFS_FORCED_SHUTDOWN(mp)) {
  1023. trace_xfs_bdstrat_shut(bp, _RET_IP_);
  1024. xfs_bioerror_relse(bp);
  1025. return;
  1026. }
  1027. xfs_buf_iorequest(bp);
  1028. }
  1029. STATIC void
  1030. _xfs_buf_ioend(
  1031. xfs_buf_t *bp,
  1032. int schedule)
  1033. {
  1034. if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
  1035. xfs_buf_ioend(bp, schedule);
  1036. }
  1037. STATIC void
  1038. xfs_buf_bio_end_io(
  1039. struct bio *bio,
  1040. int error)
  1041. {
  1042. xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
  1043. xfs_buf_ioerror(bp, -error);
  1044. if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
  1045. invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
  1046. _xfs_buf_ioend(bp, 1);
  1047. bio_put(bio);
  1048. }
  1049. STATIC void
  1050. _xfs_buf_ioapply(
  1051. xfs_buf_t *bp)
  1052. {
  1053. int rw, map_i, total_nr_pages, nr_pages;
  1054. struct bio *bio;
  1055. int offset = bp->b_offset;
  1056. int size = bp->b_count_desired;
  1057. sector_t sector = bp->b_bn;
  1058. total_nr_pages = bp->b_page_count;
  1059. map_i = 0;
  1060. if (bp->b_flags & XBF_ORDERED) {
  1061. ASSERT(!(bp->b_flags & XBF_READ));
  1062. rw = WRITE_FLUSH_FUA;
  1063. } else if (bp->b_flags & XBF_LOG_BUFFER) {
  1064. ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
  1065. bp->b_flags &= ~_XBF_RUN_QUEUES;
  1066. rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
  1067. } else if (bp->b_flags & _XBF_RUN_QUEUES) {
  1068. ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
  1069. bp->b_flags &= ~_XBF_RUN_QUEUES;
  1070. rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
  1071. } else {
  1072. rw = (bp->b_flags & XBF_WRITE) ? WRITE :
  1073. (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
  1074. }
  1075. next_chunk:
  1076. atomic_inc(&bp->b_io_remaining);
  1077. nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
  1078. if (nr_pages > total_nr_pages)
  1079. nr_pages = total_nr_pages;
  1080. bio = bio_alloc(GFP_NOIO, nr_pages);
  1081. bio->bi_bdev = bp->b_target->bt_bdev;
  1082. bio->bi_sector = sector;
  1083. bio->bi_end_io = xfs_buf_bio_end_io;
  1084. bio->bi_private = bp;
  1085. for (; size && nr_pages; nr_pages--, map_i++) {
  1086. int rbytes, nbytes = PAGE_SIZE - offset;
  1087. if (nbytes > size)
  1088. nbytes = size;
  1089. rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
  1090. if (rbytes < nbytes)
  1091. break;
  1092. offset = 0;
  1093. sector += nbytes >> BBSHIFT;
  1094. size -= nbytes;
  1095. total_nr_pages--;
  1096. }
  1097. if (likely(bio->bi_size)) {
  1098. if (xfs_buf_is_vmapped(bp)) {
  1099. flush_kernel_vmap_range(bp->b_addr,
  1100. xfs_buf_vmap_len(bp));
  1101. }
  1102. submit_bio(rw, bio);
  1103. if (size)
  1104. goto next_chunk;
  1105. } else {
  1106. xfs_buf_ioerror(bp, EIO);
  1107. bio_put(bio);
  1108. }
  1109. }
  1110. int
  1111. xfs_buf_iorequest(
  1112. xfs_buf_t *bp)
  1113. {
  1114. trace_xfs_buf_iorequest(bp, _RET_IP_);
  1115. if (bp->b_flags & XBF_DELWRI) {
  1116. xfs_buf_delwri_queue(bp, 1);
  1117. return 0;
  1118. }
  1119. if (bp->b_flags & XBF_WRITE) {
  1120. xfs_buf_wait_unpin(bp);
  1121. }
  1122. xfs_buf_hold(bp);
  1123. /* Set the count to 1 initially, this will stop an I/O
  1124. * completion callout which happens before we have started
  1125. * all the I/O from calling xfs_buf_ioend too early.
  1126. */
  1127. atomic_set(&bp->b_io_remaining, 1);
  1128. _xfs_buf_ioapply(bp);
  1129. _xfs_buf_ioend(bp, 0);
  1130. xfs_buf_rele(bp);
  1131. return 0;
  1132. }
  1133. /*
  1134. * Waits for I/O to complete on the buffer supplied.
  1135. * It returns immediately if no I/O is pending.
  1136. * It returns the I/O error code, if any, or 0 if there was no error.
  1137. */
  1138. int
  1139. xfs_buf_iowait(
  1140. xfs_buf_t *bp)
  1141. {
  1142. trace_xfs_buf_iowait(bp, _RET_IP_);
  1143. if (atomic_read(&bp->b_io_remaining))
  1144. blk_run_backing_dev(bp->b_target->bt_bdi, NULL);
  1145. wait_for_completion(&bp->b_iowait);
  1146. trace_xfs_buf_iowait_done(bp, _RET_IP_);
  1147. return bp->b_error;
  1148. }
  1149. xfs_caddr_t
  1150. xfs_buf_offset(
  1151. xfs_buf_t *bp,
  1152. size_t offset)
  1153. {
  1154. struct page *page;
  1155. if (bp->b_flags & XBF_MAPPED)
  1156. return XFS_BUF_PTR(bp) + offset;
  1157. offset += bp->b_offset;
  1158. page = bp->b_pages[offset >> PAGE_SHIFT];
  1159. return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
  1160. }
  1161. /*
  1162. * Move data into or out of a buffer.
  1163. */
  1164. void
  1165. xfs_buf_iomove(
  1166. xfs_buf_t *bp, /* buffer to process */
  1167. size_t boff, /* starting buffer offset */
  1168. size_t bsize, /* length to copy */
  1169. void *data, /* data address */
  1170. xfs_buf_rw_t mode) /* read/write/zero flag */
  1171. {
  1172. size_t bend, cpoff, csize;
  1173. struct page *page;
  1174. bend = boff + bsize;
  1175. while (boff < bend) {
  1176. page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
  1177. cpoff = xfs_buf_poff(boff + bp->b_offset);
  1178. csize = min_t(size_t,
  1179. PAGE_SIZE-cpoff, bp->b_count_desired-boff);
  1180. ASSERT(((csize + cpoff) <= PAGE_SIZE));
  1181. switch (mode) {
  1182. case XBRW_ZERO:
  1183. memset(page_address(page) + cpoff, 0, csize);
  1184. break;
  1185. case XBRW_READ:
  1186. memcpy(data, page_address(page) + cpoff, csize);
  1187. break;
  1188. case XBRW_WRITE:
  1189. memcpy(page_address(page) + cpoff, data, csize);
  1190. }
  1191. boff += csize;
  1192. data += csize;
  1193. }
  1194. }
  1195. /*
  1196. * Handling of buffer targets (buftargs).
  1197. */
  1198. /*
  1199. * Wait for any bufs with callbacks that have been submitted but have not yet
  1200. * returned. These buffers will have an elevated hold count, so wait on those
  1201. * while freeing all the buffers only held by the LRU.
  1202. */
  1203. void
  1204. xfs_wait_buftarg(
  1205. struct xfs_buftarg *btp)
  1206. {
  1207. struct xfs_buf *bp;
  1208. restart:
  1209. spin_lock(&btp->bt_lru_lock);
  1210. while (!list_empty(&btp->bt_lru)) {
  1211. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1212. if (atomic_read(&bp->b_hold) > 1) {
  1213. spin_unlock(&btp->bt_lru_lock);
  1214. delay(100);
  1215. goto restart;
  1216. }
  1217. /*
  1218. * clear the LRU reference count so the bufer doesn't get
  1219. * ignored in xfs_buf_rele().
  1220. */
  1221. atomic_set(&bp->b_lru_ref, 0);
  1222. spin_unlock(&btp->bt_lru_lock);
  1223. xfs_buf_rele(bp);
  1224. spin_lock(&btp->bt_lru_lock);
  1225. }
  1226. spin_unlock(&btp->bt_lru_lock);
  1227. }
  1228. int
  1229. xfs_buftarg_shrink(
  1230. struct shrinker *shrink,
  1231. int nr_to_scan,
  1232. gfp_t mask)
  1233. {
  1234. struct xfs_buftarg *btp = container_of(shrink,
  1235. struct xfs_buftarg, bt_shrinker);
  1236. struct xfs_buf *bp;
  1237. LIST_HEAD(dispose);
  1238. if (!nr_to_scan)
  1239. return btp->bt_lru_nr;
  1240. spin_lock(&btp->bt_lru_lock);
  1241. while (!list_empty(&btp->bt_lru)) {
  1242. if (nr_to_scan-- <= 0)
  1243. break;
  1244. bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
  1245. /*
  1246. * Decrement the b_lru_ref count unless the value is already
  1247. * zero. If the value is already zero, we need to reclaim the
  1248. * buffer, otherwise it gets another trip through the LRU.
  1249. */
  1250. if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
  1251. list_move_tail(&bp->b_lru, &btp->bt_lru);
  1252. continue;
  1253. }
  1254. /*
  1255. * remove the buffer from the LRU now to avoid needing another
  1256. * lock round trip inside xfs_buf_rele().
  1257. */
  1258. list_move(&bp->b_lru, &dispose);
  1259. btp->bt_lru_nr--;
  1260. }
  1261. spin_unlock(&btp->bt_lru_lock);
  1262. while (!list_empty(&dispose)) {
  1263. bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
  1264. list_del_init(&bp->b_lru);
  1265. xfs_buf_rele(bp);
  1266. }
  1267. return btp->bt_lru_nr;
  1268. }
  1269. void
  1270. xfs_free_buftarg(
  1271. struct xfs_mount *mp,
  1272. struct xfs_buftarg *btp)
  1273. {
  1274. unregister_shrinker(&btp->bt_shrinker);
  1275. xfs_flush_buftarg(btp, 1);
  1276. if (mp->m_flags & XFS_MOUNT_BARRIER)
  1277. xfs_blkdev_issue_flush(btp);
  1278. kthread_stop(btp->bt_task);
  1279. kmem_free(btp);
  1280. }
  1281. STATIC int
  1282. xfs_setsize_buftarg_flags(
  1283. xfs_buftarg_t *btp,
  1284. unsigned int blocksize,
  1285. unsigned int sectorsize,
  1286. int verbose)
  1287. {
  1288. btp->bt_bsize = blocksize;
  1289. btp->bt_sshift = ffs(sectorsize) - 1;
  1290. btp->bt_smask = sectorsize - 1;
  1291. if (set_blocksize(btp->bt_bdev, sectorsize)) {
  1292. xfs_warn(btp->bt_mount,
  1293. "Cannot set_blocksize to %u on device %s\n",
  1294. sectorsize, XFS_BUFTARG_NAME(btp));
  1295. return EINVAL;
  1296. }
  1297. return 0;
  1298. }
  1299. /*
  1300. * When allocating the initial buffer target we have not yet
  1301. * read in the superblock, so don't know what sized sectors
  1302. * are being used is at this early stage. Play safe.
  1303. */
  1304. STATIC int
  1305. xfs_setsize_buftarg_early(
  1306. xfs_buftarg_t *btp,
  1307. struct block_device *bdev)
  1308. {
  1309. return xfs_setsize_buftarg_flags(btp,
  1310. PAGE_SIZE, bdev_logical_block_size(bdev), 0);
  1311. }
  1312. int
  1313. xfs_setsize_buftarg(
  1314. xfs_buftarg_t *btp,
  1315. unsigned int blocksize,
  1316. unsigned int sectorsize)
  1317. {
  1318. return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
  1319. }
  1320. STATIC int
  1321. xfs_alloc_delwrite_queue(
  1322. xfs_buftarg_t *btp,
  1323. const char *fsname)
  1324. {
  1325. INIT_LIST_HEAD(&btp->bt_delwrite_queue);
  1326. spin_lock_init(&btp->bt_delwrite_lock);
  1327. btp->bt_flags = 0;
  1328. btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
  1329. if (IS_ERR(btp->bt_task))
  1330. return PTR_ERR(btp->bt_task);
  1331. return 0;
  1332. }
  1333. xfs_buftarg_t *
  1334. xfs_alloc_buftarg(
  1335. struct xfs_mount *mp,
  1336. struct block_device *bdev,
  1337. int external,
  1338. const char *fsname)
  1339. {
  1340. xfs_buftarg_t *btp;
  1341. btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
  1342. btp->bt_mount = mp;
  1343. btp->bt_dev = bdev->bd_dev;
  1344. btp->bt_bdev = bdev;
  1345. btp->bt_bdi = blk_get_backing_dev_info(bdev);
  1346. if (!btp->bt_bdi)
  1347. goto error;
  1348. INIT_LIST_HEAD(&btp->bt_lru);
  1349. spin_lock_init(&btp->bt_lru_lock);
  1350. if (xfs_setsize_buftarg_early(btp, bdev))
  1351. goto error;
  1352. if (xfs_alloc_delwrite_queue(btp, fsname))
  1353. goto error;
  1354. btp->bt_shrinker.shrink = xfs_buftarg_shrink;
  1355. btp->bt_shrinker.seeks = DEFAULT_SEEKS;
  1356. register_shrinker(&btp->bt_shrinker);
  1357. return btp;
  1358. error:
  1359. kmem_free(btp);
  1360. return NULL;
  1361. }
  1362. /*
  1363. * Delayed write buffer handling
  1364. */
  1365. STATIC void
  1366. xfs_buf_delwri_queue(
  1367. xfs_buf_t *bp,
  1368. int unlock)
  1369. {
  1370. struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
  1371. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1372. trace_xfs_buf_delwri_queue(bp, _RET_IP_);
  1373. ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
  1374. spin_lock(dwlk);
  1375. /* If already in the queue, dequeue and place at tail */
  1376. if (!list_empty(&bp->b_list)) {
  1377. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1378. if (unlock)
  1379. atomic_dec(&bp->b_hold);
  1380. list_del(&bp->b_list);
  1381. }
  1382. if (list_empty(dwq)) {
  1383. /* start xfsbufd as it is about to have something to do */
  1384. wake_up_process(bp->b_target->bt_task);
  1385. }
  1386. bp->b_flags |= _XBF_DELWRI_Q;
  1387. list_add_tail(&bp->b_list, dwq);
  1388. bp->b_queuetime = jiffies;
  1389. spin_unlock(dwlk);
  1390. if (unlock)
  1391. xfs_buf_unlock(bp);
  1392. }
  1393. void
  1394. xfs_buf_delwri_dequeue(
  1395. xfs_buf_t *bp)
  1396. {
  1397. spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
  1398. int dequeued = 0;
  1399. spin_lock(dwlk);
  1400. if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
  1401. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1402. list_del_init(&bp->b_list);
  1403. dequeued = 1;
  1404. }
  1405. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
  1406. spin_unlock(dwlk);
  1407. if (dequeued)
  1408. xfs_buf_rele(bp);
  1409. trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
  1410. }
  1411. /*
  1412. * If a delwri buffer needs to be pushed before it has aged out, then promote
  1413. * it to the head of the delwri queue so that it will be flushed on the next
  1414. * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
  1415. * than the age currently needed to flush the buffer. Hence the next time the
  1416. * xfsbufd sees it is guaranteed to be considered old enough to flush.
  1417. */
  1418. void
  1419. xfs_buf_delwri_promote(
  1420. struct xfs_buf *bp)
  1421. {
  1422. struct xfs_buftarg *btp = bp->b_target;
  1423. long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
  1424. ASSERT(bp->b_flags & XBF_DELWRI);
  1425. ASSERT(bp->b_flags & _XBF_DELWRI_Q);
  1426. /*
  1427. * Check the buffer age before locking the delayed write queue as we
  1428. * don't need to promote buffers that are already past the flush age.
  1429. */
  1430. if (bp->b_queuetime < jiffies - age)
  1431. return;
  1432. bp->b_queuetime = jiffies - age;
  1433. spin_lock(&btp->bt_delwrite_lock);
  1434. list_move(&bp->b_list, &btp->bt_delwrite_queue);
  1435. spin_unlock(&btp->bt_delwrite_lock);
  1436. }
  1437. STATIC void
  1438. xfs_buf_runall_queues(
  1439. struct workqueue_struct *queue)
  1440. {
  1441. flush_workqueue(queue);
  1442. }
  1443. /*
  1444. * Move as many buffers as specified to the supplied list
  1445. * idicating if we skipped any buffers to prevent deadlocks.
  1446. */
  1447. STATIC int
  1448. xfs_buf_delwri_split(
  1449. xfs_buftarg_t *target,
  1450. struct list_head *list,
  1451. unsigned long age)
  1452. {
  1453. xfs_buf_t *bp, *n;
  1454. struct list_head *dwq = &target->bt_delwrite_queue;
  1455. spinlock_t *dwlk = &target->bt_delwrite_lock;
  1456. int skipped = 0;
  1457. int force;
  1458. force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1459. INIT_LIST_HEAD(list);
  1460. spin_lock(dwlk);
  1461. list_for_each_entry_safe(bp, n, dwq, b_list) {
  1462. ASSERT(bp->b_flags & XBF_DELWRI);
  1463. if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
  1464. if (!force &&
  1465. time_before(jiffies, bp->b_queuetime + age)) {
  1466. xfs_buf_unlock(bp);
  1467. break;
  1468. }
  1469. bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
  1470. _XBF_RUN_QUEUES);
  1471. bp->b_flags |= XBF_WRITE;
  1472. list_move_tail(&bp->b_list, list);
  1473. trace_xfs_buf_delwri_split(bp, _RET_IP_);
  1474. } else
  1475. skipped++;
  1476. }
  1477. spin_unlock(dwlk);
  1478. return skipped;
  1479. }
  1480. /*
  1481. * Compare function is more complex than it needs to be because
  1482. * the return value is only 32 bits and we are doing comparisons
  1483. * on 64 bit values
  1484. */
  1485. static int
  1486. xfs_buf_cmp(
  1487. void *priv,
  1488. struct list_head *a,
  1489. struct list_head *b)
  1490. {
  1491. struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
  1492. struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
  1493. xfs_daddr_t diff;
  1494. diff = ap->b_bn - bp->b_bn;
  1495. if (diff < 0)
  1496. return -1;
  1497. if (diff > 0)
  1498. return 1;
  1499. return 0;
  1500. }
  1501. void
  1502. xfs_buf_delwri_sort(
  1503. xfs_buftarg_t *target,
  1504. struct list_head *list)
  1505. {
  1506. list_sort(NULL, list, xfs_buf_cmp);
  1507. }
  1508. STATIC int
  1509. xfsbufd(
  1510. void *data)
  1511. {
  1512. xfs_buftarg_t *target = (xfs_buftarg_t *)data;
  1513. current->flags |= PF_MEMALLOC;
  1514. set_freezable();
  1515. do {
  1516. long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
  1517. long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
  1518. int count = 0;
  1519. struct list_head tmp;
  1520. if (unlikely(freezing(current))) {
  1521. set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1522. refrigerator();
  1523. } else {
  1524. clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
  1525. }
  1526. /* sleep for a long time if there is nothing to do. */
  1527. if (list_empty(&target->bt_delwrite_queue))
  1528. tout = MAX_SCHEDULE_TIMEOUT;
  1529. schedule_timeout_interruptible(tout);
  1530. xfs_buf_delwri_split(target, &tmp, age);
  1531. list_sort(NULL, &tmp, xfs_buf_cmp);
  1532. while (!list_empty(&tmp)) {
  1533. struct xfs_buf *bp;
  1534. bp = list_first_entry(&tmp, struct xfs_buf, b_list);
  1535. list_del_init(&bp->b_list);
  1536. xfs_bdstrat_cb(bp);
  1537. count++;
  1538. }
  1539. if (count)
  1540. blk_run_backing_dev(target->bt_bdi, NULL);
  1541. } while (!kthread_should_stop());
  1542. return 0;
  1543. }
  1544. /*
  1545. * Go through all incore buffers, and release buffers if they belong to
  1546. * the given device. This is used in filesystem error handling to
  1547. * preserve the consistency of its metadata.
  1548. */
  1549. int
  1550. xfs_flush_buftarg(
  1551. xfs_buftarg_t *target,
  1552. int wait)
  1553. {
  1554. xfs_buf_t *bp;
  1555. int pincount = 0;
  1556. LIST_HEAD(tmp_list);
  1557. LIST_HEAD(wait_list);
  1558. xfs_buf_runall_queues(xfsconvertd_workqueue);
  1559. xfs_buf_runall_queues(xfsdatad_workqueue);
  1560. xfs_buf_runall_queues(xfslogd_workqueue);
  1561. set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
  1562. pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
  1563. /*
  1564. * Dropped the delayed write list lock, now walk the temporary list.
  1565. * All I/O is issued async and then if we need to wait for completion
  1566. * we do that after issuing all the IO.
  1567. */
  1568. list_sort(NULL, &tmp_list, xfs_buf_cmp);
  1569. while (!list_empty(&tmp_list)) {
  1570. bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
  1571. ASSERT(target == bp->b_target);
  1572. list_del_init(&bp->b_list);
  1573. if (wait) {
  1574. bp->b_flags &= ~XBF_ASYNC;
  1575. list_add(&bp->b_list, &wait_list);
  1576. }
  1577. xfs_bdstrat_cb(bp);
  1578. }
  1579. if (wait) {
  1580. /* Expedite and wait for IO to complete. */
  1581. blk_run_backing_dev(target->bt_bdi, NULL);
  1582. while (!list_empty(&wait_list)) {
  1583. bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
  1584. list_del_init(&bp->b_list);
  1585. xfs_buf_iowait(bp);
  1586. xfs_buf_relse(bp);
  1587. }
  1588. }
  1589. return pincount;
  1590. }
  1591. int __init
  1592. xfs_buf_init(void)
  1593. {
  1594. xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
  1595. KM_ZONE_HWALIGN, NULL);
  1596. if (!xfs_buf_zone)
  1597. goto out;
  1598. xfslogd_workqueue = alloc_workqueue("xfslogd",
  1599. WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
  1600. if (!xfslogd_workqueue)
  1601. goto out_free_buf_zone;
  1602. xfsdatad_workqueue = create_workqueue("xfsdatad");
  1603. if (!xfsdatad_workqueue)
  1604. goto out_destroy_xfslogd_workqueue;
  1605. xfsconvertd_workqueue = create_workqueue("xfsconvertd");
  1606. if (!xfsconvertd_workqueue)
  1607. goto out_destroy_xfsdatad_workqueue;
  1608. return 0;
  1609. out_destroy_xfsdatad_workqueue:
  1610. destroy_workqueue(xfsdatad_workqueue);
  1611. out_destroy_xfslogd_workqueue:
  1612. destroy_workqueue(xfslogd_workqueue);
  1613. out_free_buf_zone:
  1614. kmem_zone_destroy(xfs_buf_zone);
  1615. out:
  1616. return -ENOMEM;
  1617. }
  1618. void
  1619. xfs_buf_terminate(void)
  1620. {
  1621. destroy_workqueue(xfsconvertd_workqueue);
  1622. destroy_workqueue(xfsdatad_workqueue);
  1623. destroy_workqueue(xfslogd_workqueue);
  1624. kmem_zone_destroy(xfs_buf_zone);
  1625. }
  1626. #ifdef CONFIG_KDB_MODULES
  1627. struct list_head *
  1628. xfs_get_buftarg_list(void)
  1629. {
  1630. return &xfs_buftarg_list;
  1631. }
  1632. #endif