blocklayout.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/buffer_head.h> /* various write calls */
  38. #include <linux/prefetch.h>
  39. #include "blocklayout.h"
  40. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  41. MODULE_LICENSE("GPL");
  42. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  43. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  44. struct dentry *bl_device_pipe;
  45. wait_queue_head_t bl_wq;
  46. static void print_page(struct page *page)
  47. {
  48. dprintk("PRINTPAGE page %p\n", page);
  49. dprintk(" PagePrivate %d\n", PagePrivate(page));
  50. dprintk(" PageUptodate %d\n", PageUptodate(page));
  51. dprintk(" PageError %d\n", PageError(page));
  52. dprintk(" PageDirty %d\n", PageDirty(page));
  53. dprintk(" PageReferenced %d\n", PageReferenced(page));
  54. dprintk(" PageLocked %d\n", PageLocked(page));
  55. dprintk(" PageWriteback %d\n", PageWriteback(page));
  56. dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
  57. dprintk("\n");
  58. }
  59. /* Given the be associated with isect, determine if page data needs to be
  60. * initialized.
  61. */
  62. static int is_hole(struct pnfs_block_extent *be, sector_t isect)
  63. {
  64. if (be->be_state == PNFS_BLOCK_NONE_DATA)
  65. return 1;
  66. else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
  67. return 0;
  68. else
  69. return !bl_is_sector_init(be->be_inval, isect);
  70. }
  71. /* Given the be associated with isect, determine if page data can be
  72. * written to disk.
  73. */
  74. static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  75. {
  76. return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  77. be->be_state == PNFS_BLOCK_INVALID_DATA);
  78. }
  79. /* The data we are handed might be spread across several bios. We need
  80. * to track when the last one is finished.
  81. */
  82. struct parallel_io {
  83. struct kref refcnt;
  84. struct rpc_call_ops call_ops;
  85. void (*pnfs_callback) (void *data);
  86. void *data;
  87. };
  88. static inline struct parallel_io *alloc_parallel(void *data)
  89. {
  90. struct parallel_io *rv;
  91. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  92. if (rv) {
  93. rv->data = data;
  94. kref_init(&rv->refcnt);
  95. }
  96. return rv;
  97. }
  98. static inline void get_parallel(struct parallel_io *p)
  99. {
  100. kref_get(&p->refcnt);
  101. }
  102. static void destroy_parallel(struct kref *kref)
  103. {
  104. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  105. dprintk("%s enter\n", __func__);
  106. p->pnfs_callback(p->data);
  107. kfree(p);
  108. }
  109. static inline void put_parallel(struct parallel_io *p)
  110. {
  111. kref_put(&p->refcnt, destroy_parallel);
  112. }
  113. static struct bio *
  114. bl_submit_bio(int rw, struct bio *bio)
  115. {
  116. if (bio) {
  117. get_parallel(bio->bi_private);
  118. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  119. rw == READ ? "read" : "write",
  120. bio->bi_size, (unsigned long long)bio->bi_sector);
  121. submit_bio(rw, bio);
  122. }
  123. return NULL;
  124. }
  125. static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
  126. struct pnfs_block_extent *be,
  127. void (*end_io)(struct bio *, int err),
  128. struct parallel_io *par)
  129. {
  130. struct bio *bio;
  131. npg = min(npg, BIO_MAX_PAGES);
  132. bio = bio_alloc(GFP_NOIO, npg);
  133. if (!bio && (current->flags & PF_MEMALLOC)) {
  134. while (!bio && (npg /= 2))
  135. bio = bio_alloc(GFP_NOIO, npg);
  136. }
  137. if (bio) {
  138. bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
  139. bio->bi_bdev = be->be_mdev;
  140. bio->bi_end_io = end_io;
  141. bio->bi_private = par;
  142. }
  143. return bio;
  144. }
  145. static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
  146. sector_t isect, struct page *page,
  147. struct pnfs_block_extent *be,
  148. void (*end_io)(struct bio *, int err),
  149. struct parallel_io *par)
  150. {
  151. retry:
  152. if (!bio) {
  153. bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
  154. if (!bio)
  155. return ERR_PTR(-ENOMEM);
  156. }
  157. if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
  158. bio = bl_submit_bio(rw, bio);
  159. goto retry;
  160. }
  161. return bio;
  162. }
  163. /* This is basically copied from mpage_end_io_read */
  164. static void bl_end_io_read(struct bio *bio, int err)
  165. {
  166. struct parallel_io *par = bio->bi_private;
  167. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  168. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  169. struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
  170. do {
  171. struct page *page = bvec->bv_page;
  172. if (--bvec >= bio->bi_io_vec)
  173. prefetchw(&bvec->bv_page->flags);
  174. if (uptodate)
  175. SetPageUptodate(page);
  176. } while (bvec >= bio->bi_io_vec);
  177. if (!uptodate) {
  178. if (!rdata->pnfs_error)
  179. rdata->pnfs_error = -EIO;
  180. pnfs_set_lo_fail(rdata->lseg);
  181. }
  182. bio_put(bio);
  183. put_parallel(par);
  184. }
  185. static void bl_read_cleanup(struct work_struct *work)
  186. {
  187. struct rpc_task *task;
  188. struct nfs_read_data *rdata;
  189. dprintk("%s enter\n", __func__);
  190. task = container_of(work, struct rpc_task, u.tk_work);
  191. rdata = container_of(task, struct nfs_read_data, task);
  192. pnfs_ld_read_done(rdata);
  193. }
  194. static void
  195. bl_end_par_io_read(void *data)
  196. {
  197. struct nfs_read_data *rdata = data;
  198. rdata->task.tk_status = rdata->pnfs_error;
  199. INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
  200. schedule_work(&rdata->task.u.tk_work);
  201. }
  202. /* We don't want normal .rpc_call_done callback used, so we replace it
  203. * with this stub.
  204. */
  205. static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
  206. {
  207. return;
  208. }
  209. static enum pnfs_try_status
  210. bl_read_pagelist(struct nfs_read_data *rdata)
  211. {
  212. int i, hole;
  213. struct bio *bio = NULL;
  214. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  215. sector_t isect, extent_length = 0;
  216. struct parallel_io *par;
  217. loff_t f_offset = rdata->args.offset;
  218. size_t count = rdata->args.count;
  219. struct page **pages = rdata->args.pages;
  220. int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
  221. dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
  222. rdata->npages, f_offset, count);
  223. par = alloc_parallel(rdata);
  224. if (!par)
  225. goto use_mds;
  226. par->call_ops = *rdata->mds_ops;
  227. par->call_ops.rpc_call_done = bl_rpc_do_nothing;
  228. par->pnfs_callback = bl_end_par_io_read;
  229. /* At this point, we can no longer jump to use_mds */
  230. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  231. /* Code assumes extents are page-aligned */
  232. for (i = pg_index; i < rdata->npages; i++) {
  233. if (!extent_length) {
  234. /* We've used up the previous extent */
  235. bl_put_extent(be);
  236. bl_put_extent(cow_read);
  237. bio = bl_submit_bio(READ, bio);
  238. /* Get the next one */
  239. be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
  240. isect, &cow_read);
  241. if (!be) {
  242. rdata->pnfs_error = -EIO;
  243. goto out;
  244. }
  245. extent_length = be->be_length -
  246. (isect - be->be_f_offset);
  247. if (cow_read) {
  248. sector_t cow_length = cow_read->be_length -
  249. (isect - cow_read->be_f_offset);
  250. extent_length = min(extent_length, cow_length);
  251. }
  252. }
  253. hole = is_hole(be, isect);
  254. if (hole && !cow_read) {
  255. bio = bl_submit_bio(READ, bio);
  256. /* Fill hole w/ zeroes w/o accessing device */
  257. dprintk("%s Zeroing page for hole\n", __func__);
  258. zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
  259. print_page(pages[i]);
  260. SetPageUptodate(pages[i]);
  261. } else {
  262. struct pnfs_block_extent *be_read;
  263. be_read = (hole && cow_read) ? cow_read : be;
  264. bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
  265. isect, pages[i], be_read,
  266. bl_end_io_read, par);
  267. if (IS_ERR(bio)) {
  268. rdata->pnfs_error = PTR_ERR(bio);
  269. bio = NULL;
  270. goto out;
  271. }
  272. }
  273. isect += PAGE_CACHE_SECTORS;
  274. extent_length -= PAGE_CACHE_SECTORS;
  275. }
  276. if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
  277. rdata->res.eof = 1;
  278. rdata->res.count = rdata->inode->i_size - f_offset;
  279. } else {
  280. rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
  281. }
  282. out:
  283. bl_put_extent(be);
  284. bl_put_extent(cow_read);
  285. bl_submit_bio(READ, bio);
  286. put_parallel(par);
  287. return PNFS_ATTEMPTED;
  288. use_mds:
  289. dprintk("Giving up and using normal NFS\n");
  290. return PNFS_NOT_ATTEMPTED;
  291. }
  292. static void mark_extents_written(struct pnfs_block_layout *bl,
  293. __u64 offset, __u32 count)
  294. {
  295. sector_t isect, end;
  296. struct pnfs_block_extent *be;
  297. dprintk("%s(%llu, %u)\n", __func__, offset, count);
  298. if (count == 0)
  299. return;
  300. isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
  301. end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
  302. end >>= SECTOR_SHIFT;
  303. while (isect < end) {
  304. sector_t len;
  305. be = bl_find_get_extent(bl, isect, NULL);
  306. BUG_ON(!be); /* FIXME */
  307. len = min(end, be->be_f_offset + be->be_length) - isect;
  308. if (be->be_state == PNFS_BLOCK_INVALID_DATA)
  309. bl_mark_for_commit(be, isect, len); /* What if fails? */
  310. isect += len;
  311. bl_put_extent(be);
  312. }
  313. }
  314. static void bl_end_io_write_zero(struct bio *bio, int err)
  315. {
  316. struct parallel_io *par = bio->bi_private;
  317. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  318. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  319. struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
  320. do {
  321. struct page *page = bvec->bv_page;
  322. if (--bvec >= bio->bi_io_vec)
  323. prefetchw(&bvec->bv_page->flags);
  324. /* This is the zeroing page we added */
  325. end_page_writeback(page);
  326. page_cache_release(page);
  327. } while (bvec >= bio->bi_io_vec);
  328. if (!uptodate) {
  329. if (!wdata->pnfs_error)
  330. wdata->pnfs_error = -EIO;
  331. pnfs_set_lo_fail(wdata->lseg);
  332. }
  333. bio_put(bio);
  334. put_parallel(par);
  335. }
  336. /* This is basically copied from mpage_end_io_read */
  337. static void bl_end_io_write(struct bio *bio, int err)
  338. {
  339. struct parallel_io *par = bio->bi_private;
  340. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  341. struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
  342. if (!uptodate) {
  343. if (!wdata->pnfs_error)
  344. wdata->pnfs_error = -EIO;
  345. pnfs_set_lo_fail(wdata->lseg);
  346. }
  347. bio_put(bio);
  348. put_parallel(par);
  349. }
  350. /* Function scheduled for call during bl_end_par_io_write,
  351. * it marks sectors as written and extends the commitlist.
  352. */
  353. static void bl_write_cleanup(struct work_struct *work)
  354. {
  355. struct rpc_task *task;
  356. struct nfs_write_data *wdata;
  357. dprintk("%s enter\n", __func__);
  358. task = container_of(work, struct rpc_task, u.tk_work);
  359. wdata = container_of(task, struct nfs_write_data, task);
  360. if (!wdata->pnfs_error) {
  361. /* Marks for LAYOUTCOMMIT */
  362. mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
  363. wdata->args.offset, wdata->args.count);
  364. }
  365. pnfs_ld_write_done(wdata);
  366. }
  367. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  368. static void bl_end_par_io_write(void *data)
  369. {
  370. struct nfs_write_data *wdata = data;
  371. wdata->task.tk_status = wdata->pnfs_error;
  372. wdata->verf.committed = NFS_FILE_SYNC;
  373. INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
  374. schedule_work(&wdata->task.u.tk_work);
  375. }
  376. /* FIXME STUB - mark intersection of layout and page as bad, so is not
  377. * used again.
  378. */
  379. static void mark_bad_read(void)
  380. {
  381. return;
  382. }
  383. /*
  384. * map_block: map a requested I/0 block (isect) into an offset in the LVM
  385. * block_device
  386. */
  387. static void
  388. map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
  389. {
  390. dprintk("%s enter be=%p\n", __func__, be);
  391. set_buffer_mapped(bh);
  392. bh->b_bdev = be->be_mdev;
  393. bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
  394. (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
  395. dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
  396. __func__, (unsigned long long)isect, (long)bh->b_blocknr,
  397. bh->b_size);
  398. return;
  399. }
  400. /* Given an unmapped page, zero it or read in page for COW, page is locked
  401. * by caller.
  402. */
  403. static int
  404. init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
  405. {
  406. struct buffer_head *bh = NULL;
  407. int ret = 0;
  408. sector_t isect;
  409. dprintk("%s enter, %p\n", __func__, page);
  410. BUG_ON(PageUptodate(page));
  411. if (!cow_read) {
  412. zero_user_segment(page, 0, PAGE_SIZE);
  413. SetPageUptodate(page);
  414. goto cleanup;
  415. }
  416. bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
  417. if (!bh) {
  418. ret = -ENOMEM;
  419. goto cleanup;
  420. }
  421. isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
  422. map_block(bh, isect, cow_read);
  423. if (!bh_uptodate_or_lock(bh))
  424. ret = bh_submit_read(bh);
  425. if (ret)
  426. goto cleanup;
  427. SetPageUptodate(page);
  428. cleanup:
  429. bl_put_extent(cow_read);
  430. if (bh)
  431. free_buffer_head(bh);
  432. if (ret) {
  433. /* Need to mark layout with bad read...should now
  434. * just use nfs4 for reads and writes.
  435. */
  436. mark_bad_read();
  437. }
  438. return ret;
  439. }
  440. /* Find or create a zeroing page marked being writeback.
  441. * Return ERR_PTR on error, NULL to indicate skip this page and page itself
  442. * to indicate write out.
  443. */
  444. static struct page *
  445. bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
  446. struct pnfs_block_extent *cow_read)
  447. {
  448. struct page *page;
  449. int locked = 0;
  450. page = find_get_page(inode->i_mapping, index);
  451. if (page)
  452. goto check_page;
  453. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  454. if (unlikely(!page)) {
  455. dprintk("%s oom\n", __func__);
  456. return ERR_PTR(-ENOMEM);
  457. }
  458. locked = 1;
  459. check_page:
  460. /* PageDirty: Other will write this out
  461. * PageWriteback: Other is writing this out
  462. * PageUptodate: It was read before
  463. */
  464. if (PageDirty(page) || PageWriteback(page)) {
  465. print_page(page);
  466. if (locked)
  467. unlock_page(page);
  468. page_cache_release(page);
  469. return NULL;
  470. }
  471. if (!locked) {
  472. lock_page(page);
  473. locked = 1;
  474. goto check_page;
  475. }
  476. if (!PageUptodate(page)) {
  477. /* New page, readin or zero it */
  478. init_page_for_write(page, cow_read);
  479. }
  480. set_page_writeback(page);
  481. unlock_page(page);
  482. return page;
  483. }
  484. static enum pnfs_try_status
  485. bl_write_pagelist(struct nfs_write_data *wdata, int sync)
  486. {
  487. int i, ret, npg_zero, pg_index, last = 0;
  488. struct bio *bio = NULL;
  489. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  490. sector_t isect, last_isect = 0, extent_length = 0;
  491. struct parallel_io *par;
  492. loff_t offset = wdata->args.offset;
  493. size_t count = wdata->args.count;
  494. struct page **pages = wdata->args.pages;
  495. struct page *page;
  496. pgoff_t index;
  497. u64 temp;
  498. int npg_per_block =
  499. NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
  500. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  501. /* At this point, wdata->pages is a (sequential) list of nfs_pages.
  502. * We want to write each, and if there is an error set pnfs_error
  503. * to have it redone using nfs.
  504. */
  505. par = alloc_parallel(wdata);
  506. if (!par)
  507. return PNFS_NOT_ATTEMPTED;
  508. par->call_ops = *wdata->mds_ops;
  509. par->call_ops.rpc_call_done = bl_rpc_do_nothing;
  510. par->pnfs_callback = bl_end_par_io_write;
  511. /* At this point, have to be more careful with error handling */
  512. isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  513. be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
  514. if (!be || !is_writable(be, isect)) {
  515. dprintk("%s no matching extents!\n", __func__);
  516. wdata->pnfs_error = -EINVAL;
  517. goto out;
  518. }
  519. /* First page inside INVALID extent */
  520. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  521. temp = offset >> PAGE_CACHE_SHIFT;
  522. npg_zero = do_div(temp, npg_per_block);
  523. isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
  524. (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  525. extent_length = be->be_length - (isect - be->be_f_offset);
  526. fill_invalid_ext:
  527. dprintk("%s need to zero %d pages\n", __func__, npg_zero);
  528. for (;npg_zero > 0; npg_zero--) {
  529. if (bl_is_sector_init(be->be_inval, isect)) {
  530. dprintk("isect %llu already init\n",
  531. (unsigned long long)isect);
  532. goto next_page;
  533. }
  534. /* page ref released in bl_end_io_write_zero */
  535. index = isect >> PAGE_CACHE_SECTOR_SHIFT;
  536. dprintk("%s zero %dth page: index %lu isect %llu\n",
  537. __func__, npg_zero, index,
  538. (unsigned long long)isect);
  539. page = bl_find_get_zeroing_page(wdata->inode, index,
  540. cow_read);
  541. if (unlikely(IS_ERR(page))) {
  542. wdata->pnfs_error = PTR_ERR(page);
  543. goto out;
  544. } else if (page == NULL)
  545. goto next_page;
  546. ret = bl_mark_sectors_init(be->be_inval, isect,
  547. PAGE_CACHE_SECTORS);
  548. if (unlikely(ret)) {
  549. dprintk("%s bl_mark_sectors_init fail %d\n",
  550. __func__, ret);
  551. end_page_writeback(page);
  552. page_cache_release(page);
  553. wdata->pnfs_error = ret;
  554. goto out;
  555. }
  556. bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
  557. isect, page, be,
  558. bl_end_io_write_zero, par);
  559. if (IS_ERR(bio)) {
  560. wdata->pnfs_error = PTR_ERR(bio);
  561. bio = NULL;
  562. goto out;
  563. }
  564. /* FIXME: This should be done in bi_end_io */
  565. mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
  566. page->index << PAGE_CACHE_SHIFT,
  567. PAGE_CACHE_SIZE);
  568. next_page:
  569. isect += PAGE_CACHE_SECTORS;
  570. extent_length -= PAGE_CACHE_SECTORS;
  571. }
  572. if (last)
  573. goto write_done;
  574. }
  575. bio = bl_submit_bio(WRITE, bio);
  576. /* Middle pages */
  577. pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
  578. for (i = pg_index; i < wdata->npages; i++) {
  579. if (!extent_length) {
  580. /* We've used up the previous extent */
  581. bl_put_extent(be);
  582. bio = bl_submit_bio(WRITE, bio);
  583. /* Get the next one */
  584. be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
  585. isect, NULL);
  586. if (!be || !is_writable(be, isect)) {
  587. wdata->pnfs_error = -EINVAL;
  588. goto out;
  589. }
  590. extent_length = be->be_length -
  591. (isect - be->be_f_offset);
  592. }
  593. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  594. ret = bl_mark_sectors_init(be->be_inval, isect,
  595. PAGE_CACHE_SECTORS);
  596. if (unlikely(ret)) {
  597. dprintk("%s bl_mark_sectors_init fail %d\n",
  598. __func__, ret);
  599. wdata->pnfs_error = ret;
  600. goto out;
  601. }
  602. }
  603. bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
  604. isect, pages[i], be,
  605. bl_end_io_write, par);
  606. if (IS_ERR(bio)) {
  607. wdata->pnfs_error = PTR_ERR(bio);
  608. bio = NULL;
  609. goto out;
  610. }
  611. isect += PAGE_CACHE_SECTORS;
  612. last_isect = isect;
  613. extent_length -= PAGE_CACHE_SECTORS;
  614. }
  615. /* Last page inside INVALID extent */
  616. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  617. bio = bl_submit_bio(WRITE, bio);
  618. temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
  619. npg_zero = npg_per_block - do_div(temp, npg_per_block);
  620. if (npg_zero < npg_per_block) {
  621. last = 1;
  622. goto fill_invalid_ext;
  623. }
  624. }
  625. write_done:
  626. wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
  627. if (count < wdata->res.count) {
  628. wdata->res.count = count;
  629. }
  630. out:
  631. bl_put_extent(be);
  632. bl_submit_bio(WRITE, bio);
  633. put_parallel(par);
  634. return PNFS_ATTEMPTED;
  635. }
  636. /* FIXME - range ignored */
  637. static void
  638. release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
  639. {
  640. int i;
  641. struct pnfs_block_extent *be;
  642. spin_lock(&bl->bl_ext_lock);
  643. for (i = 0; i < EXTENT_LISTS; i++) {
  644. while (!list_empty(&bl->bl_extents[i])) {
  645. be = list_first_entry(&bl->bl_extents[i],
  646. struct pnfs_block_extent,
  647. be_node);
  648. list_del(&be->be_node);
  649. bl_put_extent(be);
  650. }
  651. }
  652. spin_unlock(&bl->bl_ext_lock);
  653. }
  654. static void
  655. release_inval_marks(struct pnfs_inval_markings *marks)
  656. {
  657. struct pnfs_inval_tracking *pos, *temp;
  658. list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
  659. list_del(&pos->it_link);
  660. kfree(pos);
  661. }
  662. return;
  663. }
  664. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  665. {
  666. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  667. dprintk("%s enter\n", __func__);
  668. release_extents(bl, NULL);
  669. release_inval_marks(&bl->bl_inval);
  670. kfree(bl);
  671. }
  672. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  673. gfp_t gfp_flags)
  674. {
  675. struct pnfs_block_layout *bl;
  676. dprintk("%s enter\n", __func__);
  677. bl = kzalloc(sizeof(*bl), gfp_flags);
  678. if (!bl)
  679. return NULL;
  680. spin_lock_init(&bl->bl_ext_lock);
  681. INIT_LIST_HEAD(&bl->bl_extents[0]);
  682. INIT_LIST_HEAD(&bl->bl_extents[1]);
  683. INIT_LIST_HEAD(&bl->bl_commit);
  684. INIT_LIST_HEAD(&bl->bl_committing);
  685. bl->bl_count = 0;
  686. bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
  687. BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
  688. return &bl->bl_layout;
  689. }
  690. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  691. {
  692. dprintk("%s enter\n", __func__);
  693. kfree(lseg);
  694. }
  695. /* We pretty much ignore lseg, and store all data layout wide, so we
  696. * can correctly merge.
  697. */
  698. static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
  699. struct nfs4_layoutget_res *lgr,
  700. gfp_t gfp_flags)
  701. {
  702. struct pnfs_layout_segment *lseg;
  703. int status;
  704. dprintk("%s enter\n", __func__);
  705. lseg = kzalloc(sizeof(*lseg), gfp_flags);
  706. if (!lseg)
  707. return ERR_PTR(-ENOMEM);
  708. status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
  709. if (status) {
  710. /* We don't want to call the full-blown bl_free_lseg,
  711. * since on error extents were not touched.
  712. */
  713. kfree(lseg);
  714. return ERR_PTR(status);
  715. }
  716. return lseg;
  717. }
  718. static void
  719. bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
  720. const struct nfs4_layoutcommit_args *arg)
  721. {
  722. dprintk("%s enter\n", __func__);
  723. encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
  724. }
  725. static void
  726. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  727. {
  728. struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
  729. dprintk("%s enter\n", __func__);
  730. clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
  731. }
  732. static void free_blk_mountid(struct block_mount_id *mid)
  733. {
  734. if (mid) {
  735. struct pnfs_block_dev *dev, *tmp;
  736. /* No need to take bm_lock as we are last user freeing bm_devlist */
  737. list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
  738. list_del(&dev->bm_node);
  739. bl_free_block_dev(dev);
  740. }
  741. kfree(mid);
  742. }
  743. }
  744. /* This is mostly copied from the filelayout's get_device_info function.
  745. * It seems much of this should be at the generic pnfs level.
  746. */
  747. static struct pnfs_block_dev *
  748. nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
  749. struct nfs4_deviceid *d_id)
  750. {
  751. struct pnfs_device *dev;
  752. struct pnfs_block_dev *rv;
  753. u32 max_resp_sz;
  754. int max_pages;
  755. struct page **pages = NULL;
  756. int i, rc;
  757. /*
  758. * Use the session max response size as the basis for setting
  759. * GETDEVICEINFO's maxcount
  760. */
  761. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  762. max_pages = max_resp_sz >> PAGE_SHIFT;
  763. dprintk("%s max_resp_sz %u max_pages %d\n",
  764. __func__, max_resp_sz, max_pages);
  765. dev = kmalloc(sizeof(*dev), GFP_NOFS);
  766. if (!dev) {
  767. dprintk("%s kmalloc failed\n", __func__);
  768. return ERR_PTR(-ENOMEM);
  769. }
  770. pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
  771. if (pages == NULL) {
  772. kfree(dev);
  773. return ERR_PTR(-ENOMEM);
  774. }
  775. for (i = 0; i < max_pages; i++) {
  776. pages[i] = alloc_page(GFP_NOFS);
  777. if (!pages[i]) {
  778. rv = ERR_PTR(-ENOMEM);
  779. goto out_free;
  780. }
  781. }
  782. memcpy(&dev->dev_id, d_id, sizeof(*d_id));
  783. dev->layout_type = LAYOUT_BLOCK_VOLUME;
  784. dev->pages = pages;
  785. dev->pgbase = 0;
  786. dev->pglen = PAGE_SIZE * max_pages;
  787. dev->mincount = 0;
  788. dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
  789. rc = nfs4_proc_getdeviceinfo(server, dev);
  790. dprintk("%s getdevice info returns %d\n", __func__, rc);
  791. if (rc) {
  792. rv = ERR_PTR(rc);
  793. goto out_free;
  794. }
  795. rv = nfs4_blk_decode_device(server, dev);
  796. out_free:
  797. for (i = 0; i < max_pages; i++)
  798. __free_page(pages[i]);
  799. kfree(pages);
  800. kfree(dev);
  801. return rv;
  802. }
  803. static int
  804. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  805. {
  806. struct block_mount_id *b_mt_id = NULL;
  807. struct pnfs_devicelist *dlist = NULL;
  808. struct pnfs_block_dev *bdev;
  809. LIST_HEAD(block_disklist);
  810. int status, i;
  811. dprintk("%s enter\n", __func__);
  812. if (server->pnfs_blksize == 0) {
  813. dprintk("%s Server did not return blksize\n", __func__);
  814. return -EINVAL;
  815. }
  816. b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
  817. if (!b_mt_id) {
  818. status = -ENOMEM;
  819. goto out_error;
  820. }
  821. /* Initialize nfs4 block layout mount id */
  822. spin_lock_init(&b_mt_id->bm_lock);
  823. INIT_LIST_HEAD(&b_mt_id->bm_devlist);
  824. dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
  825. if (!dlist) {
  826. status = -ENOMEM;
  827. goto out_error;
  828. }
  829. dlist->eof = 0;
  830. while (!dlist->eof) {
  831. status = nfs4_proc_getdevicelist(server, fh, dlist);
  832. if (status)
  833. goto out_error;
  834. dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
  835. __func__, dlist->num_devs, dlist->eof);
  836. for (i = 0; i < dlist->num_devs; i++) {
  837. bdev = nfs4_blk_get_deviceinfo(server, fh,
  838. &dlist->dev_id[i]);
  839. if (IS_ERR(bdev)) {
  840. status = PTR_ERR(bdev);
  841. goto out_error;
  842. }
  843. spin_lock(&b_mt_id->bm_lock);
  844. list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
  845. spin_unlock(&b_mt_id->bm_lock);
  846. }
  847. }
  848. dprintk("%s SUCCESS\n", __func__);
  849. server->pnfs_ld_data = b_mt_id;
  850. out_return:
  851. kfree(dlist);
  852. return status;
  853. out_error:
  854. free_blk_mountid(b_mt_id);
  855. goto out_return;
  856. }
  857. static int
  858. bl_clear_layoutdriver(struct nfs_server *server)
  859. {
  860. struct block_mount_id *b_mt_id = server->pnfs_ld_data;
  861. dprintk("%s enter\n", __func__);
  862. free_blk_mountid(b_mt_id);
  863. dprintk("%s RETURNS\n", __func__);
  864. return 0;
  865. }
  866. static const struct nfs_pageio_ops bl_pg_read_ops = {
  867. .pg_init = pnfs_generic_pg_init_read,
  868. .pg_test = pnfs_generic_pg_test,
  869. .pg_doio = pnfs_generic_pg_readpages,
  870. };
  871. static const struct nfs_pageio_ops bl_pg_write_ops = {
  872. .pg_init = pnfs_generic_pg_init_write,
  873. .pg_test = pnfs_generic_pg_test,
  874. .pg_doio = pnfs_generic_pg_writepages,
  875. };
  876. static struct pnfs_layoutdriver_type blocklayout_type = {
  877. .id = LAYOUT_BLOCK_VOLUME,
  878. .name = "LAYOUT_BLOCK_VOLUME",
  879. .read_pagelist = bl_read_pagelist,
  880. .write_pagelist = bl_write_pagelist,
  881. .alloc_layout_hdr = bl_alloc_layout_hdr,
  882. .free_layout_hdr = bl_free_layout_hdr,
  883. .alloc_lseg = bl_alloc_lseg,
  884. .free_lseg = bl_free_lseg,
  885. .encode_layoutcommit = bl_encode_layoutcommit,
  886. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  887. .set_layoutdriver = bl_set_layoutdriver,
  888. .clear_layoutdriver = bl_clear_layoutdriver,
  889. .pg_read_ops = &bl_pg_read_ops,
  890. .pg_write_ops = &bl_pg_write_ops,
  891. };
  892. static const struct rpc_pipe_ops bl_upcall_ops = {
  893. .upcall = rpc_pipe_generic_upcall,
  894. .downcall = bl_pipe_downcall,
  895. .destroy_msg = bl_pipe_destroy_msg,
  896. };
  897. static int __init nfs4blocklayout_init(void)
  898. {
  899. struct vfsmount *mnt;
  900. struct path path;
  901. int ret;
  902. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  903. ret = pnfs_register_layoutdriver(&blocklayout_type);
  904. if (ret)
  905. goto out;
  906. init_waitqueue_head(&bl_wq);
  907. mnt = rpc_get_mount();
  908. if (IS_ERR(mnt)) {
  909. ret = PTR_ERR(mnt);
  910. goto out_remove;
  911. }
  912. ret = vfs_path_lookup(mnt->mnt_root,
  913. mnt,
  914. NFS_PIPE_DIRNAME, 0, &path);
  915. if (ret)
  916. goto out_putrpc;
  917. bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
  918. &bl_upcall_ops, 0);
  919. path_put(&path);
  920. if (IS_ERR(bl_device_pipe)) {
  921. ret = PTR_ERR(bl_device_pipe);
  922. goto out_putrpc;
  923. }
  924. out:
  925. return ret;
  926. out_putrpc:
  927. rpc_put_mount();
  928. out_remove:
  929. pnfs_unregister_layoutdriver(&blocklayout_type);
  930. return ret;
  931. }
  932. static void __exit nfs4blocklayout_exit(void)
  933. {
  934. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  935. __func__);
  936. pnfs_unregister_layoutdriver(&blocklayout_type);
  937. rpc_unlink(bl_device_pipe);
  938. rpc_put_mount();
  939. }
  940. MODULE_ALIAS("nfs-layouttype4-3");
  941. module_init(nfs4blocklayout_init);
  942. module_exit(nfs4blocklayout_exit);