blocklayout.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/buffer_head.h> /* various write calls */
  38. #include <linux/prefetch.h>
  39. #include "blocklayout.h"
  40. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  41. MODULE_LICENSE("GPL");
  42. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  43. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  44. static void print_page(struct page *page)
  45. {
  46. dprintk("PRINTPAGE page %p\n", page);
  47. dprintk(" PagePrivate %d\n", PagePrivate(page));
  48. dprintk(" PageUptodate %d\n", PageUptodate(page));
  49. dprintk(" PageError %d\n", PageError(page));
  50. dprintk(" PageDirty %d\n", PageDirty(page));
  51. dprintk(" PageReferenced %d\n", PageReferenced(page));
  52. dprintk(" PageLocked %d\n", PageLocked(page));
  53. dprintk(" PageWriteback %d\n", PageWriteback(page));
  54. dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
  55. dprintk("\n");
  56. }
  57. /* Given the be associated with isect, determine if page data needs to be
  58. * initialized.
  59. */
  60. static int is_hole(struct pnfs_block_extent *be, sector_t isect)
  61. {
  62. if (be->be_state == PNFS_BLOCK_NONE_DATA)
  63. return 1;
  64. else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
  65. return 0;
  66. else
  67. return !bl_is_sector_init(be->be_inval, isect);
  68. }
  69. /* Given the be associated with isect, determine if page data can be
  70. * written to disk.
  71. */
  72. static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  73. {
  74. return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  75. be->be_state == PNFS_BLOCK_INVALID_DATA);
  76. }
  77. /* The data we are handed might be spread across several bios. We need
  78. * to track when the last one is finished.
  79. */
  80. struct parallel_io {
  81. struct kref refcnt;
  82. void (*pnfs_callback) (void *data, int num_se);
  83. void *data;
  84. int bse_count;
  85. };
  86. static inline struct parallel_io *alloc_parallel(void *data)
  87. {
  88. struct parallel_io *rv;
  89. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  90. if (rv) {
  91. rv->data = data;
  92. kref_init(&rv->refcnt);
  93. rv->bse_count = 0;
  94. }
  95. return rv;
  96. }
  97. static inline void get_parallel(struct parallel_io *p)
  98. {
  99. kref_get(&p->refcnt);
  100. }
  101. static void destroy_parallel(struct kref *kref)
  102. {
  103. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  104. dprintk("%s enter\n", __func__);
  105. p->pnfs_callback(p->data, p->bse_count);
  106. kfree(p);
  107. }
  108. static inline void put_parallel(struct parallel_io *p)
  109. {
  110. kref_put(&p->refcnt, destroy_parallel);
  111. }
  112. static struct bio *
  113. bl_submit_bio(int rw, struct bio *bio)
  114. {
  115. if (bio) {
  116. get_parallel(bio->bi_private);
  117. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  118. rw == READ ? "read" : "write",
  119. bio->bi_size, (unsigned long long)bio->bi_sector);
  120. submit_bio(rw, bio);
  121. }
  122. return NULL;
  123. }
  124. static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
  125. struct pnfs_block_extent *be,
  126. void (*end_io)(struct bio *, int err),
  127. struct parallel_io *par)
  128. {
  129. struct bio *bio;
  130. npg = min(npg, BIO_MAX_PAGES);
  131. bio = bio_alloc(GFP_NOIO, npg);
  132. if (!bio && (current->flags & PF_MEMALLOC)) {
  133. while (!bio && (npg /= 2))
  134. bio = bio_alloc(GFP_NOIO, npg);
  135. }
  136. if (bio) {
  137. bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
  138. bio->bi_bdev = be->be_mdev;
  139. bio->bi_end_io = end_io;
  140. bio->bi_private = par;
  141. }
  142. return bio;
  143. }
  144. static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
  145. sector_t isect, struct page *page,
  146. struct pnfs_block_extent *be,
  147. void (*end_io)(struct bio *, int err),
  148. struct parallel_io *par)
  149. {
  150. retry:
  151. if (!bio) {
  152. bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
  153. if (!bio)
  154. return ERR_PTR(-ENOMEM);
  155. }
  156. if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
  157. bio = bl_submit_bio(rw, bio);
  158. goto retry;
  159. }
  160. return bio;
  161. }
  162. /* This is basically copied from mpage_end_io_read */
  163. static void bl_end_io_read(struct bio *bio, int err)
  164. {
  165. struct parallel_io *par = bio->bi_private;
  166. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  167. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  168. struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
  169. do {
  170. struct page *page = bvec->bv_page;
  171. if (--bvec >= bio->bi_io_vec)
  172. prefetchw(&bvec->bv_page->flags);
  173. if (uptodate)
  174. SetPageUptodate(page);
  175. } while (bvec >= bio->bi_io_vec);
  176. if (!uptodate) {
  177. if (!rdata->pnfs_error)
  178. rdata->pnfs_error = -EIO;
  179. pnfs_set_lo_fail(rdata->lseg);
  180. }
  181. bio_put(bio);
  182. put_parallel(par);
  183. }
  184. static void bl_read_cleanup(struct work_struct *work)
  185. {
  186. struct rpc_task *task;
  187. struct nfs_read_data *rdata;
  188. dprintk("%s enter\n", __func__);
  189. task = container_of(work, struct rpc_task, u.tk_work);
  190. rdata = container_of(task, struct nfs_read_data, task);
  191. pnfs_ld_read_done(rdata);
  192. }
  193. static void
  194. bl_end_par_io_read(void *data, int unused)
  195. {
  196. struct nfs_read_data *rdata = data;
  197. rdata->task.tk_status = rdata->pnfs_error;
  198. INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
  199. schedule_work(&rdata->task.u.tk_work);
  200. }
  201. static enum pnfs_try_status
  202. bl_read_pagelist(struct nfs_read_data *rdata)
  203. {
  204. int i, hole;
  205. struct bio *bio = NULL;
  206. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  207. sector_t isect, extent_length = 0;
  208. struct parallel_io *par;
  209. loff_t f_offset = rdata->args.offset;
  210. struct page **pages = rdata->args.pages;
  211. int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
  212. dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
  213. rdata->npages, f_offset, (unsigned int)rdata->args.count);
  214. par = alloc_parallel(rdata);
  215. if (!par)
  216. goto use_mds;
  217. par->pnfs_callback = bl_end_par_io_read;
  218. /* At this point, we can no longer jump to use_mds */
  219. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  220. /* Code assumes extents are page-aligned */
  221. for (i = pg_index; i < rdata->npages; i++) {
  222. if (!extent_length) {
  223. /* We've used up the previous extent */
  224. bl_put_extent(be);
  225. bl_put_extent(cow_read);
  226. bio = bl_submit_bio(READ, bio);
  227. /* Get the next one */
  228. be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
  229. isect, &cow_read);
  230. if (!be) {
  231. rdata->pnfs_error = -EIO;
  232. goto out;
  233. }
  234. extent_length = be->be_length -
  235. (isect - be->be_f_offset);
  236. if (cow_read) {
  237. sector_t cow_length = cow_read->be_length -
  238. (isect - cow_read->be_f_offset);
  239. extent_length = min(extent_length, cow_length);
  240. }
  241. }
  242. hole = is_hole(be, isect);
  243. if (hole && !cow_read) {
  244. bio = bl_submit_bio(READ, bio);
  245. /* Fill hole w/ zeroes w/o accessing device */
  246. dprintk("%s Zeroing page for hole\n", __func__);
  247. zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
  248. print_page(pages[i]);
  249. SetPageUptodate(pages[i]);
  250. } else {
  251. struct pnfs_block_extent *be_read;
  252. be_read = (hole && cow_read) ? cow_read : be;
  253. bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
  254. isect, pages[i], be_read,
  255. bl_end_io_read, par);
  256. if (IS_ERR(bio)) {
  257. rdata->pnfs_error = PTR_ERR(bio);
  258. bio = NULL;
  259. goto out;
  260. }
  261. }
  262. isect += PAGE_CACHE_SECTORS;
  263. extent_length -= PAGE_CACHE_SECTORS;
  264. }
  265. if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
  266. rdata->res.eof = 1;
  267. rdata->res.count = rdata->inode->i_size - f_offset;
  268. } else {
  269. rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
  270. }
  271. out:
  272. bl_put_extent(be);
  273. bl_put_extent(cow_read);
  274. bl_submit_bio(READ, bio);
  275. put_parallel(par);
  276. return PNFS_ATTEMPTED;
  277. use_mds:
  278. dprintk("Giving up and using normal NFS\n");
  279. return PNFS_NOT_ATTEMPTED;
  280. }
  281. static void mark_extents_written(struct pnfs_block_layout *bl,
  282. __u64 offset, __u32 count)
  283. {
  284. sector_t isect, end;
  285. struct pnfs_block_extent *be;
  286. struct pnfs_block_short_extent *se;
  287. dprintk("%s(%llu, %u)\n", __func__, offset, count);
  288. if (count == 0)
  289. return;
  290. isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
  291. end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
  292. end >>= SECTOR_SHIFT;
  293. while (isect < end) {
  294. sector_t len;
  295. be = bl_find_get_extent(bl, isect, NULL);
  296. BUG_ON(!be); /* FIXME */
  297. len = min(end, be->be_f_offset + be->be_length) - isect;
  298. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  299. se = bl_pop_one_short_extent(be->be_inval);
  300. BUG_ON(!se);
  301. bl_mark_for_commit(be, isect, len, se);
  302. }
  303. isect += len;
  304. bl_put_extent(be);
  305. }
  306. }
  307. static void bl_end_io_write_zero(struct bio *bio, int err)
  308. {
  309. struct parallel_io *par = bio->bi_private;
  310. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  311. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  312. struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
  313. do {
  314. struct page *page = bvec->bv_page;
  315. if (--bvec >= bio->bi_io_vec)
  316. prefetchw(&bvec->bv_page->flags);
  317. /* This is the zeroing page we added */
  318. end_page_writeback(page);
  319. page_cache_release(page);
  320. } while (bvec >= bio->bi_io_vec);
  321. if (unlikely(!uptodate)) {
  322. if (!wdata->pnfs_error)
  323. wdata->pnfs_error = -EIO;
  324. pnfs_set_lo_fail(wdata->lseg);
  325. }
  326. bio_put(bio);
  327. put_parallel(par);
  328. }
  329. static void bl_end_io_write(struct bio *bio, int err)
  330. {
  331. struct parallel_io *par = bio->bi_private;
  332. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  333. struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
  334. if (!uptodate) {
  335. if (!wdata->pnfs_error)
  336. wdata->pnfs_error = -EIO;
  337. pnfs_set_lo_fail(wdata->lseg);
  338. }
  339. bio_put(bio);
  340. put_parallel(par);
  341. }
  342. /* Function scheduled for call during bl_end_par_io_write,
  343. * it marks sectors as written and extends the commitlist.
  344. */
  345. static void bl_write_cleanup(struct work_struct *work)
  346. {
  347. struct rpc_task *task;
  348. struct nfs_write_data *wdata;
  349. dprintk("%s enter\n", __func__);
  350. task = container_of(work, struct rpc_task, u.tk_work);
  351. wdata = container_of(task, struct nfs_write_data, task);
  352. if (likely(!wdata->pnfs_error)) {
  353. /* Marks for LAYOUTCOMMIT */
  354. mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
  355. wdata->args.offset, wdata->args.count);
  356. }
  357. pnfs_ld_write_done(wdata);
  358. }
  359. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  360. static void bl_end_par_io_write(void *data, int num_se)
  361. {
  362. struct nfs_write_data *wdata = data;
  363. if (unlikely(wdata->pnfs_error)) {
  364. bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
  365. num_se);
  366. }
  367. wdata->task.tk_status = wdata->pnfs_error;
  368. wdata->verf.committed = NFS_FILE_SYNC;
  369. INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
  370. schedule_work(&wdata->task.u.tk_work);
  371. }
  372. /* FIXME STUB - mark intersection of layout and page as bad, so is not
  373. * used again.
  374. */
  375. static void mark_bad_read(void)
  376. {
  377. return;
  378. }
  379. /*
  380. * map_block: map a requested I/0 block (isect) into an offset in the LVM
  381. * block_device
  382. */
  383. static void
  384. map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
  385. {
  386. dprintk("%s enter be=%p\n", __func__, be);
  387. set_buffer_mapped(bh);
  388. bh->b_bdev = be->be_mdev;
  389. bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
  390. (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
  391. dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
  392. __func__, (unsigned long long)isect, (long)bh->b_blocknr,
  393. bh->b_size);
  394. return;
  395. }
  396. /* Given an unmapped page, zero it or read in page for COW, page is locked
  397. * by caller.
  398. */
  399. static int
  400. init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
  401. {
  402. struct buffer_head *bh = NULL;
  403. int ret = 0;
  404. sector_t isect;
  405. dprintk("%s enter, %p\n", __func__, page);
  406. BUG_ON(PageUptodate(page));
  407. if (!cow_read) {
  408. zero_user_segment(page, 0, PAGE_SIZE);
  409. SetPageUptodate(page);
  410. goto cleanup;
  411. }
  412. bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
  413. if (!bh) {
  414. ret = -ENOMEM;
  415. goto cleanup;
  416. }
  417. isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
  418. map_block(bh, isect, cow_read);
  419. if (!bh_uptodate_or_lock(bh))
  420. ret = bh_submit_read(bh);
  421. if (ret)
  422. goto cleanup;
  423. SetPageUptodate(page);
  424. cleanup:
  425. bl_put_extent(cow_read);
  426. if (bh)
  427. free_buffer_head(bh);
  428. if (ret) {
  429. /* Need to mark layout with bad read...should now
  430. * just use nfs4 for reads and writes.
  431. */
  432. mark_bad_read();
  433. }
  434. return ret;
  435. }
  436. /* Find or create a zeroing page marked being writeback.
  437. * Return ERR_PTR on error, NULL to indicate skip this page and page itself
  438. * to indicate write out.
  439. */
  440. static struct page *
  441. bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
  442. struct pnfs_block_extent *cow_read)
  443. {
  444. struct page *page;
  445. int locked = 0;
  446. page = find_get_page(inode->i_mapping, index);
  447. if (page)
  448. goto check_page;
  449. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  450. if (unlikely(!page)) {
  451. dprintk("%s oom\n", __func__);
  452. return ERR_PTR(-ENOMEM);
  453. }
  454. locked = 1;
  455. check_page:
  456. /* PageDirty: Other will write this out
  457. * PageWriteback: Other is writing this out
  458. * PageUptodate: It was read before
  459. */
  460. if (PageDirty(page) || PageWriteback(page)) {
  461. print_page(page);
  462. if (locked)
  463. unlock_page(page);
  464. page_cache_release(page);
  465. return NULL;
  466. }
  467. if (!locked) {
  468. lock_page(page);
  469. locked = 1;
  470. goto check_page;
  471. }
  472. if (!PageUptodate(page)) {
  473. /* New page, readin or zero it */
  474. init_page_for_write(page, cow_read);
  475. }
  476. set_page_writeback(page);
  477. unlock_page(page);
  478. return page;
  479. }
  480. static enum pnfs_try_status
  481. bl_write_pagelist(struct nfs_write_data *wdata, int sync)
  482. {
  483. int i, ret, npg_zero, pg_index, last = 0;
  484. struct bio *bio = NULL;
  485. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  486. sector_t isect, last_isect = 0, extent_length = 0;
  487. struct parallel_io *par;
  488. loff_t offset = wdata->args.offset;
  489. size_t count = wdata->args.count;
  490. struct page **pages = wdata->args.pages;
  491. struct page *page;
  492. pgoff_t index;
  493. u64 temp;
  494. int npg_per_block =
  495. NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
  496. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  497. /* At this point, wdata->pages is a (sequential) list of nfs_pages.
  498. * We want to write each, and if there is an error set pnfs_error
  499. * to have it redone using nfs.
  500. */
  501. par = alloc_parallel(wdata);
  502. if (!par)
  503. goto out_mds;
  504. par->pnfs_callback = bl_end_par_io_write;
  505. /* At this point, have to be more careful with error handling */
  506. isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  507. be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
  508. if (!be || !is_writable(be, isect)) {
  509. dprintk("%s no matching extents!\n", __func__);
  510. goto out_mds;
  511. }
  512. /* First page inside INVALID extent */
  513. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  514. if (likely(!bl_push_one_short_extent(be->be_inval)))
  515. par->bse_count++;
  516. else
  517. goto out_mds;
  518. temp = offset >> PAGE_CACHE_SHIFT;
  519. npg_zero = do_div(temp, npg_per_block);
  520. isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
  521. (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  522. extent_length = be->be_length - (isect - be->be_f_offset);
  523. fill_invalid_ext:
  524. dprintk("%s need to zero %d pages\n", __func__, npg_zero);
  525. for (;npg_zero > 0; npg_zero--) {
  526. if (bl_is_sector_init(be->be_inval, isect)) {
  527. dprintk("isect %llu already init\n",
  528. (unsigned long long)isect);
  529. goto next_page;
  530. }
  531. /* page ref released in bl_end_io_write_zero */
  532. index = isect >> PAGE_CACHE_SECTOR_SHIFT;
  533. dprintk("%s zero %dth page: index %lu isect %llu\n",
  534. __func__, npg_zero, index,
  535. (unsigned long long)isect);
  536. page = bl_find_get_zeroing_page(wdata->inode, index,
  537. cow_read);
  538. if (unlikely(IS_ERR(page))) {
  539. wdata->pnfs_error = PTR_ERR(page);
  540. goto out;
  541. } else if (page == NULL)
  542. goto next_page;
  543. ret = bl_mark_sectors_init(be->be_inval, isect,
  544. PAGE_CACHE_SECTORS);
  545. if (unlikely(ret)) {
  546. dprintk("%s bl_mark_sectors_init fail %d\n",
  547. __func__, ret);
  548. end_page_writeback(page);
  549. page_cache_release(page);
  550. wdata->pnfs_error = ret;
  551. goto out;
  552. }
  553. if (likely(!bl_push_one_short_extent(be->be_inval)))
  554. par->bse_count++;
  555. else {
  556. end_page_writeback(page);
  557. page_cache_release(page);
  558. wdata->pnfs_error = -ENOMEM;
  559. goto out;
  560. }
  561. /* FIXME: This should be done in bi_end_io */
  562. mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
  563. page->index << PAGE_CACHE_SHIFT,
  564. PAGE_CACHE_SIZE);
  565. bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
  566. isect, page, be,
  567. bl_end_io_write_zero, par);
  568. if (IS_ERR(bio)) {
  569. wdata->pnfs_error = PTR_ERR(bio);
  570. bio = NULL;
  571. goto out;
  572. }
  573. next_page:
  574. isect += PAGE_CACHE_SECTORS;
  575. extent_length -= PAGE_CACHE_SECTORS;
  576. }
  577. if (last)
  578. goto write_done;
  579. }
  580. bio = bl_submit_bio(WRITE, bio);
  581. /* Middle pages */
  582. pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
  583. for (i = pg_index; i < wdata->npages; i++) {
  584. if (!extent_length) {
  585. /* We've used up the previous extent */
  586. bl_put_extent(be);
  587. bio = bl_submit_bio(WRITE, bio);
  588. /* Get the next one */
  589. be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
  590. isect, NULL);
  591. if (!be || !is_writable(be, isect)) {
  592. wdata->pnfs_error = -EINVAL;
  593. goto out;
  594. }
  595. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  596. if (likely(!bl_push_one_short_extent(
  597. be->be_inval)))
  598. par->bse_count++;
  599. else {
  600. wdata->pnfs_error = -ENOMEM;
  601. goto out;
  602. }
  603. }
  604. extent_length = be->be_length -
  605. (isect - be->be_f_offset);
  606. }
  607. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  608. ret = bl_mark_sectors_init(be->be_inval, isect,
  609. PAGE_CACHE_SECTORS);
  610. if (unlikely(ret)) {
  611. dprintk("%s bl_mark_sectors_init fail %d\n",
  612. __func__, ret);
  613. wdata->pnfs_error = ret;
  614. goto out;
  615. }
  616. }
  617. bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
  618. isect, pages[i], be,
  619. bl_end_io_write, par);
  620. if (IS_ERR(bio)) {
  621. wdata->pnfs_error = PTR_ERR(bio);
  622. bio = NULL;
  623. goto out;
  624. }
  625. isect += PAGE_CACHE_SECTORS;
  626. last_isect = isect;
  627. extent_length -= PAGE_CACHE_SECTORS;
  628. }
  629. /* Last page inside INVALID extent */
  630. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  631. bio = bl_submit_bio(WRITE, bio);
  632. temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
  633. npg_zero = npg_per_block - do_div(temp, npg_per_block);
  634. if (npg_zero < npg_per_block) {
  635. last = 1;
  636. goto fill_invalid_ext;
  637. }
  638. }
  639. write_done:
  640. wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
  641. if (count < wdata->res.count) {
  642. wdata->res.count = count;
  643. }
  644. out:
  645. bl_put_extent(be);
  646. bl_submit_bio(WRITE, bio);
  647. put_parallel(par);
  648. return PNFS_ATTEMPTED;
  649. out_mds:
  650. bl_put_extent(be);
  651. kfree(par);
  652. return PNFS_NOT_ATTEMPTED;
  653. }
  654. /* FIXME - range ignored */
  655. static void
  656. release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
  657. {
  658. int i;
  659. struct pnfs_block_extent *be;
  660. spin_lock(&bl->bl_ext_lock);
  661. for (i = 0; i < EXTENT_LISTS; i++) {
  662. while (!list_empty(&bl->bl_extents[i])) {
  663. be = list_first_entry(&bl->bl_extents[i],
  664. struct pnfs_block_extent,
  665. be_node);
  666. list_del(&be->be_node);
  667. bl_put_extent(be);
  668. }
  669. }
  670. spin_unlock(&bl->bl_ext_lock);
  671. }
  672. static void
  673. release_inval_marks(struct pnfs_inval_markings *marks)
  674. {
  675. struct pnfs_inval_tracking *pos, *temp;
  676. struct pnfs_block_short_extent *se, *stemp;
  677. list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
  678. list_del(&pos->it_link);
  679. kfree(pos);
  680. }
  681. list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
  682. list_del(&se->bse_node);
  683. kfree(se);
  684. }
  685. return;
  686. }
  687. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  688. {
  689. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  690. dprintk("%s enter\n", __func__);
  691. release_extents(bl, NULL);
  692. release_inval_marks(&bl->bl_inval);
  693. kfree(bl);
  694. }
  695. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  696. gfp_t gfp_flags)
  697. {
  698. struct pnfs_block_layout *bl;
  699. dprintk("%s enter\n", __func__);
  700. bl = kzalloc(sizeof(*bl), gfp_flags);
  701. if (!bl)
  702. return NULL;
  703. spin_lock_init(&bl->bl_ext_lock);
  704. INIT_LIST_HEAD(&bl->bl_extents[0]);
  705. INIT_LIST_HEAD(&bl->bl_extents[1]);
  706. INIT_LIST_HEAD(&bl->bl_commit);
  707. INIT_LIST_HEAD(&bl->bl_committing);
  708. bl->bl_count = 0;
  709. bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
  710. BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
  711. return &bl->bl_layout;
  712. }
  713. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  714. {
  715. dprintk("%s enter\n", __func__);
  716. kfree(lseg);
  717. }
  718. /* We pretty much ignore lseg, and store all data layout wide, so we
  719. * can correctly merge.
  720. */
  721. static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
  722. struct nfs4_layoutget_res *lgr,
  723. gfp_t gfp_flags)
  724. {
  725. struct pnfs_layout_segment *lseg;
  726. int status;
  727. dprintk("%s enter\n", __func__);
  728. lseg = kzalloc(sizeof(*lseg), gfp_flags);
  729. if (!lseg)
  730. return ERR_PTR(-ENOMEM);
  731. status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
  732. if (status) {
  733. /* We don't want to call the full-blown bl_free_lseg,
  734. * since on error extents were not touched.
  735. */
  736. kfree(lseg);
  737. return ERR_PTR(status);
  738. }
  739. return lseg;
  740. }
  741. static void
  742. bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
  743. const struct nfs4_layoutcommit_args *arg)
  744. {
  745. dprintk("%s enter\n", __func__);
  746. encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
  747. }
  748. static void
  749. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  750. {
  751. struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
  752. dprintk("%s enter\n", __func__);
  753. clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
  754. }
  755. static void free_blk_mountid(struct block_mount_id *mid)
  756. {
  757. if (mid) {
  758. struct pnfs_block_dev *dev, *tmp;
  759. /* No need to take bm_lock as we are last user freeing bm_devlist */
  760. list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
  761. list_del(&dev->bm_node);
  762. bl_free_block_dev(dev);
  763. }
  764. kfree(mid);
  765. }
  766. }
  767. /* This is mostly copied from the filelayout's get_device_info function.
  768. * It seems much of this should be at the generic pnfs level.
  769. */
  770. static struct pnfs_block_dev *
  771. nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
  772. struct nfs4_deviceid *d_id)
  773. {
  774. struct pnfs_device *dev;
  775. struct pnfs_block_dev *rv;
  776. u32 max_resp_sz;
  777. int max_pages;
  778. struct page **pages = NULL;
  779. int i, rc;
  780. /*
  781. * Use the session max response size as the basis for setting
  782. * GETDEVICEINFO's maxcount
  783. */
  784. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  785. max_pages = max_resp_sz >> PAGE_SHIFT;
  786. dprintk("%s max_resp_sz %u max_pages %d\n",
  787. __func__, max_resp_sz, max_pages);
  788. dev = kmalloc(sizeof(*dev), GFP_NOFS);
  789. if (!dev) {
  790. dprintk("%s kmalloc failed\n", __func__);
  791. return ERR_PTR(-ENOMEM);
  792. }
  793. pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
  794. if (pages == NULL) {
  795. kfree(dev);
  796. return ERR_PTR(-ENOMEM);
  797. }
  798. for (i = 0; i < max_pages; i++) {
  799. pages[i] = alloc_page(GFP_NOFS);
  800. if (!pages[i]) {
  801. rv = ERR_PTR(-ENOMEM);
  802. goto out_free;
  803. }
  804. }
  805. memcpy(&dev->dev_id, d_id, sizeof(*d_id));
  806. dev->layout_type = LAYOUT_BLOCK_VOLUME;
  807. dev->pages = pages;
  808. dev->pgbase = 0;
  809. dev->pglen = PAGE_SIZE * max_pages;
  810. dev->mincount = 0;
  811. dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
  812. rc = nfs4_proc_getdeviceinfo(server, dev);
  813. dprintk("%s getdevice info returns %d\n", __func__, rc);
  814. if (rc) {
  815. rv = ERR_PTR(rc);
  816. goto out_free;
  817. }
  818. rv = nfs4_blk_decode_device(server, dev);
  819. out_free:
  820. for (i = 0; i < max_pages; i++)
  821. __free_page(pages[i]);
  822. kfree(pages);
  823. kfree(dev);
  824. return rv;
  825. }
  826. static int
  827. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  828. {
  829. struct block_mount_id *b_mt_id = NULL;
  830. struct pnfs_devicelist *dlist = NULL;
  831. struct pnfs_block_dev *bdev;
  832. LIST_HEAD(block_disklist);
  833. int status, i;
  834. dprintk("%s enter\n", __func__);
  835. if (server->pnfs_blksize == 0) {
  836. dprintk("%s Server did not return blksize\n", __func__);
  837. return -EINVAL;
  838. }
  839. b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
  840. if (!b_mt_id) {
  841. status = -ENOMEM;
  842. goto out_error;
  843. }
  844. /* Initialize nfs4 block layout mount id */
  845. spin_lock_init(&b_mt_id->bm_lock);
  846. INIT_LIST_HEAD(&b_mt_id->bm_devlist);
  847. dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
  848. if (!dlist) {
  849. status = -ENOMEM;
  850. goto out_error;
  851. }
  852. dlist->eof = 0;
  853. while (!dlist->eof) {
  854. status = nfs4_proc_getdevicelist(server, fh, dlist);
  855. if (status)
  856. goto out_error;
  857. dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
  858. __func__, dlist->num_devs, dlist->eof);
  859. for (i = 0; i < dlist->num_devs; i++) {
  860. bdev = nfs4_blk_get_deviceinfo(server, fh,
  861. &dlist->dev_id[i]);
  862. if (IS_ERR(bdev)) {
  863. status = PTR_ERR(bdev);
  864. goto out_error;
  865. }
  866. spin_lock(&b_mt_id->bm_lock);
  867. list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
  868. spin_unlock(&b_mt_id->bm_lock);
  869. }
  870. }
  871. dprintk("%s SUCCESS\n", __func__);
  872. server->pnfs_ld_data = b_mt_id;
  873. out_return:
  874. kfree(dlist);
  875. return status;
  876. out_error:
  877. free_blk_mountid(b_mt_id);
  878. goto out_return;
  879. }
  880. static int
  881. bl_clear_layoutdriver(struct nfs_server *server)
  882. {
  883. struct block_mount_id *b_mt_id = server->pnfs_ld_data;
  884. dprintk("%s enter\n", __func__);
  885. free_blk_mountid(b_mt_id);
  886. dprintk("%s RETURNS\n", __func__);
  887. return 0;
  888. }
  889. static const struct nfs_pageio_ops bl_pg_read_ops = {
  890. .pg_init = pnfs_generic_pg_init_read,
  891. .pg_test = pnfs_generic_pg_test,
  892. .pg_doio = pnfs_generic_pg_readpages,
  893. };
  894. static const struct nfs_pageio_ops bl_pg_write_ops = {
  895. .pg_init = pnfs_generic_pg_init_write,
  896. .pg_test = pnfs_generic_pg_test,
  897. .pg_doio = pnfs_generic_pg_writepages,
  898. };
  899. static struct pnfs_layoutdriver_type blocklayout_type = {
  900. .id = LAYOUT_BLOCK_VOLUME,
  901. .name = "LAYOUT_BLOCK_VOLUME",
  902. .read_pagelist = bl_read_pagelist,
  903. .write_pagelist = bl_write_pagelist,
  904. .alloc_layout_hdr = bl_alloc_layout_hdr,
  905. .free_layout_hdr = bl_free_layout_hdr,
  906. .alloc_lseg = bl_alloc_lseg,
  907. .free_lseg = bl_free_lseg,
  908. .encode_layoutcommit = bl_encode_layoutcommit,
  909. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  910. .set_layoutdriver = bl_set_layoutdriver,
  911. .clear_layoutdriver = bl_clear_layoutdriver,
  912. .pg_read_ops = &bl_pg_read_ops,
  913. .pg_write_ops = &bl_pg_write_ops,
  914. };
  915. static const struct rpc_pipe_ops bl_upcall_ops = {
  916. .upcall = rpc_pipe_generic_upcall,
  917. .downcall = bl_pipe_downcall,
  918. .destroy_msg = bl_pipe_destroy_msg,
  919. };
  920. static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
  921. struct rpc_pipe *pipe)
  922. {
  923. struct dentry *dir, *dentry;
  924. dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
  925. if (dir == NULL)
  926. return ERR_PTR(-ENOENT);
  927. dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
  928. dput(dir);
  929. return dentry;
  930. }
  931. static void nfs4blocklayout_unregister_sb(struct super_block *sb,
  932. struct rpc_pipe *pipe)
  933. {
  934. if (pipe->dentry)
  935. rpc_unlink(pipe->dentry);
  936. }
  937. static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
  938. void *ptr)
  939. {
  940. struct super_block *sb = ptr;
  941. struct net *net = sb->s_fs_info;
  942. struct nfs_net *nn = net_generic(net, nfs_net_id);
  943. struct dentry *dentry;
  944. int ret = 0;
  945. if (!try_module_get(THIS_MODULE))
  946. return 0;
  947. if (nn->bl_device_pipe == NULL) {
  948. module_put(THIS_MODULE);
  949. return 0;
  950. }
  951. switch (event) {
  952. case RPC_PIPEFS_MOUNT:
  953. dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
  954. if (IS_ERR(dentry)) {
  955. ret = PTR_ERR(dentry);
  956. break;
  957. }
  958. nn->bl_device_pipe->dentry = dentry;
  959. break;
  960. case RPC_PIPEFS_UMOUNT:
  961. if (nn->bl_device_pipe->dentry)
  962. nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
  963. break;
  964. default:
  965. ret = -ENOTSUPP;
  966. break;
  967. }
  968. module_put(THIS_MODULE);
  969. return ret;
  970. }
  971. static struct notifier_block nfs4blocklayout_block = {
  972. .notifier_call = rpc_pipefs_event,
  973. };
  974. static struct dentry *nfs4blocklayout_register_net(struct net *net,
  975. struct rpc_pipe *pipe)
  976. {
  977. struct super_block *pipefs_sb;
  978. struct dentry *dentry;
  979. pipefs_sb = rpc_get_sb_net(net);
  980. if (!pipefs_sb)
  981. return NULL;
  982. dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
  983. rpc_put_sb_net(net);
  984. return dentry;
  985. }
  986. static void nfs4blocklayout_unregister_net(struct net *net,
  987. struct rpc_pipe *pipe)
  988. {
  989. struct super_block *pipefs_sb;
  990. pipefs_sb = rpc_get_sb_net(net);
  991. if (pipefs_sb) {
  992. nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
  993. rpc_put_sb_net(net);
  994. }
  995. }
  996. static int nfs4blocklayout_net_init(struct net *net)
  997. {
  998. struct nfs_net *nn = net_generic(net, nfs_net_id);
  999. struct dentry *dentry;
  1000. init_waitqueue_head(&nn->bl_wq);
  1001. nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
  1002. if (IS_ERR(nn->bl_device_pipe))
  1003. return PTR_ERR(nn->bl_device_pipe);
  1004. dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
  1005. if (IS_ERR(dentry)) {
  1006. rpc_destroy_pipe_data(nn->bl_device_pipe);
  1007. return PTR_ERR(dentry);
  1008. }
  1009. nn->bl_device_pipe->dentry = dentry;
  1010. return 0;
  1011. }
  1012. static void nfs4blocklayout_net_exit(struct net *net)
  1013. {
  1014. struct nfs_net *nn = net_generic(net, nfs_net_id);
  1015. nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
  1016. rpc_destroy_pipe_data(nn->bl_device_pipe);
  1017. nn->bl_device_pipe = NULL;
  1018. }
  1019. static struct pernet_operations nfs4blocklayout_net_ops = {
  1020. .init = nfs4blocklayout_net_init,
  1021. .exit = nfs4blocklayout_net_exit,
  1022. };
  1023. static int __init nfs4blocklayout_init(void)
  1024. {
  1025. int ret;
  1026. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  1027. ret = pnfs_register_layoutdriver(&blocklayout_type);
  1028. if (ret)
  1029. goto out;
  1030. ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
  1031. if (ret)
  1032. goto out_remove;
  1033. ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
  1034. if (ret)
  1035. goto out_notifier;
  1036. out:
  1037. return ret;
  1038. out_notifier:
  1039. rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
  1040. out_remove:
  1041. pnfs_unregister_layoutdriver(&blocklayout_type);
  1042. return ret;
  1043. }
  1044. static void __exit nfs4blocklayout_exit(void)
  1045. {
  1046. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  1047. __func__);
  1048. rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
  1049. unregister_pernet_subsys(&nfs4blocklayout_net_ops);
  1050. pnfs_unregister_layoutdriver(&blocklayout_type);
  1051. }
  1052. MODULE_ALIAS("nfs-layouttype4-3");
  1053. module_init(nfs4blocklayout_init);
  1054. module_exit(nfs4blocklayout_exit);