blocklayout.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/buffer_head.h> /* various write calls */
  38. #include <linux/prefetch.h>
  39. #include "blocklayout.h"
  40. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  41. MODULE_LICENSE("GPL");
  42. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  43. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  44. struct dentry *bl_device_pipe;
  45. wait_queue_head_t bl_wq;
  46. static void print_page(struct page *page)
  47. {
  48. dprintk("PRINTPAGE page %p\n", page);
  49. dprintk(" PagePrivate %d\n", PagePrivate(page));
  50. dprintk(" PageUptodate %d\n", PageUptodate(page));
  51. dprintk(" PageError %d\n", PageError(page));
  52. dprintk(" PageDirty %d\n", PageDirty(page));
  53. dprintk(" PageReferenced %d\n", PageReferenced(page));
  54. dprintk(" PageLocked %d\n", PageLocked(page));
  55. dprintk(" PageWriteback %d\n", PageWriteback(page));
  56. dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
  57. dprintk("\n");
  58. }
  59. /* Given the be associated with isect, determine if page data needs to be
  60. * initialized.
  61. */
  62. static int is_hole(struct pnfs_block_extent *be, sector_t isect)
  63. {
  64. if (be->be_state == PNFS_BLOCK_NONE_DATA)
  65. return 1;
  66. else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
  67. return 0;
  68. else
  69. return !bl_is_sector_init(be->be_inval, isect);
  70. }
  71. /* Given the be associated with isect, determine if page data can be
  72. * written to disk.
  73. */
  74. static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  75. {
  76. return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  77. be->be_state == PNFS_BLOCK_INVALID_DATA);
  78. }
  79. /* The data we are handed might be spread across several bios. We need
  80. * to track when the last one is finished.
  81. */
  82. struct parallel_io {
  83. struct kref refcnt;
  84. void (*pnfs_callback) (void *data, int num_se);
  85. void *data;
  86. int bse_count;
  87. };
  88. static inline struct parallel_io *alloc_parallel(void *data)
  89. {
  90. struct parallel_io *rv;
  91. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  92. if (rv) {
  93. rv->data = data;
  94. kref_init(&rv->refcnt);
  95. rv->bse_count = 0;
  96. }
  97. return rv;
  98. }
  99. static inline void get_parallel(struct parallel_io *p)
  100. {
  101. kref_get(&p->refcnt);
  102. }
  103. static void destroy_parallel(struct kref *kref)
  104. {
  105. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  106. dprintk("%s enter\n", __func__);
  107. p->pnfs_callback(p->data, p->bse_count);
  108. kfree(p);
  109. }
  110. static inline void put_parallel(struct parallel_io *p)
  111. {
  112. kref_put(&p->refcnt, destroy_parallel);
  113. }
  114. static struct bio *
  115. bl_submit_bio(int rw, struct bio *bio)
  116. {
  117. if (bio) {
  118. get_parallel(bio->bi_private);
  119. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  120. rw == READ ? "read" : "write",
  121. bio->bi_size, (unsigned long long)bio->bi_sector);
  122. submit_bio(rw, bio);
  123. }
  124. return NULL;
  125. }
  126. static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
  127. struct pnfs_block_extent *be,
  128. void (*end_io)(struct bio *, int err),
  129. struct parallel_io *par)
  130. {
  131. struct bio *bio;
  132. npg = min(npg, BIO_MAX_PAGES);
  133. bio = bio_alloc(GFP_NOIO, npg);
  134. if (!bio && (current->flags & PF_MEMALLOC)) {
  135. while (!bio && (npg /= 2))
  136. bio = bio_alloc(GFP_NOIO, npg);
  137. }
  138. if (bio) {
  139. bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
  140. bio->bi_bdev = be->be_mdev;
  141. bio->bi_end_io = end_io;
  142. bio->bi_private = par;
  143. }
  144. return bio;
  145. }
  146. static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
  147. sector_t isect, struct page *page,
  148. struct pnfs_block_extent *be,
  149. void (*end_io)(struct bio *, int err),
  150. struct parallel_io *par)
  151. {
  152. retry:
  153. if (!bio) {
  154. bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
  155. if (!bio)
  156. return ERR_PTR(-ENOMEM);
  157. }
  158. if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
  159. bio = bl_submit_bio(rw, bio);
  160. goto retry;
  161. }
  162. return bio;
  163. }
  164. /* This is basically copied from mpage_end_io_read */
  165. static void bl_end_io_read(struct bio *bio, int err)
  166. {
  167. struct parallel_io *par = bio->bi_private;
  168. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  169. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  170. struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
  171. do {
  172. struct page *page = bvec->bv_page;
  173. if (--bvec >= bio->bi_io_vec)
  174. prefetchw(&bvec->bv_page->flags);
  175. if (uptodate)
  176. SetPageUptodate(page);
  177. } while (bvec >= bio->bi_io_vec);
  178. if (!uptodate) {
  179. if (!rdata->pnfs_error)
  180. rdata->pnfs_error = -EIO;
  181. pnfs_set_lo_fail(rdata->lseg);
  182. }
  183. bio_put(bio);
  184. put_parallel(par);
  185. }
  186. static void bl_read_cleanup(struct work_struct *work)
  187. {
  188. struct rpc_task *task;
  189. struct nfs_read_data *rdata;
  190. dprintk("%s enter\n", __func__);
  191. task = container_of(work, struct rpc_task, u.tk_work);
  192. rdata = container_of(task, struct nfs_read_data, task);
  193. pnfs_ld_read_done(rdata);
  194. }
  195. static void
  196. bl_end_par_io_read(void *data, int unused)
  197. {
  198. struct nfs_read_data *rdata = data;
  199. rdata->task.tk_status = rdata->pnfs_error;
  200. INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
  201. schedule_work(&rdata->task.u.tk_work);
  202. }
  203. static enum pnfs_try_status
  204. bl_read_pagelist(struct nfs_read_data *rdata)
  205. {
  206. int i, hole;
  207. struct bio *bio = NULL;
  208. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  209. sector_t isect, extent_length = 0;
  210. struct parallel_io *par;
  211. loff_t f_offset = rdata->args.offset;
  212. size_t count = rdata->args.count;
  213. struct page **pages = rdata->args.pages;
  214. int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
  215. dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
  216. rdata->npages, f_offset, count);
  217. par = alloc_parallel(rdata);
  218. if (!par)
  219. goto use_mds;
  220. par->pnfs_callback = bl_end_par_io_read;
  221. /* At this point, we can no longer jump to use_mds */
  222. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  223. /* Code assumes extents are page-aligned */
  224. for (i = pg_index; i < rdata->npages; i++) {
  225. if (!extent_length) {
  226. /* We've used up the previous extent */
  227. bl_put_extent(be);
  228. bl_put_extent(cow_read);
  229. bio = bl_submit_bio(READ, bio);
  230. /* Get the next one */
  231. be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
  232. isect, &cow_read);
  233. if (!be) {
  234. rdata->pnfs_error = -EIO;
  235. goto out;
  236. }
  237. extent_length = be->be_length -
  238. (isect - be->be_f_offset);
  239. if (cow_read) {
  240. sector_t cow_length = cow_read->be_length -
  241. (isect - cow_read->be_f_offset);
  242. extent_length = min(extent_length, cow_length);
  243. }
  244. }
  245. hole = is_hole(be, isect);
  246. if (hole && !cow_read) {
  247. bio = bl_submit_bio(READ, bio);
  248. /* Fill hole w/ zeroes w/o accessing device */
  249. dprintk("%s Zeroing page for hole\n", __func__);
  250. zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
  251. print_page(pages[i]);
  252. SetPageUptodate(pages[i]);
  253. } else {
  254. struct pnfs_block_extent *be_read;
  255. be_read = (hole && cow_read) ? cow_read : be;
  256. bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
  257. isect, pages[i], be_read,
  258. bl_end_io_read, par);
  259. if (IS_ERR(bio)) {
  260. rdata->pnfs_error = PTR_ERR(bio);
  261. bio = NULL;
  262. goto out;
  263. }
  264. }
  265. isect += PAGE_CACHE_SECTORS;
  266. extent_length -= PAGE_CACHE_SECTORS;
  267. }
  268. if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
  269. rdata->res.eof = 1;
  270. rdata->res.count = rdata->inode->i_size - f_offset;
  271. } else {
  272. rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
  273. }
  274. out:
  275. bl_put_extent(be);
  276. bl_put_extent(cow_read);
  277. bl_submit_bio(READ, bio);
  278. put_parallel(par);
  279. return PNFS_ATTEMPTED;
  280. use_mds:
  281. dprintk("Giving up and using normal NFS\n");
  282. return PNFS_NOT_ATTEMPTED;
  283. }
  284. static void mark_extents_written(struct pnfs_block_layout *bl,
  285. __u64 offset, __u32 count)
  286. {
  287. sector_t isect, end;
  288. struct pnfs_block_extent *be;
  289. struct pnfs_block_short_extent *se;
  290. dprintk("%s(%llu, %u)\n", __func__, offset, count);
  291. if (count == 0)
  292. return;
  293. isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
  294. end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
  295. end >>= SECTOR_SHIFT;
  296. while (isect < end) {
  297. sector_t len;
  298. be = bl_find_get_extent(bl, isect, NULL);
  299. BUG_ON(!be); /* FIXME */
  300. len = min(end, be->be_f_offset + be->be_length) - isect;
  301. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  302. se = bl_pop_one_short_extent(be->be_inval);
  303. BUG_ON(!se);
  304. bl_mark_for_commit(be, isect, len, se);
  305. }
  306. isect += len;
  307. bl_put_extent(be);
  308. }
  309. }
  310. static void bl_end_io_write_zero(struct bio *bio, int err)
  311. {
  312. struct parallel_io *par = bio->bi_private;
  313. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  314. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  315. struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
  316. do {
  317. struct page *page = bvec->bv_page;
  318. if (--bvec >= bio->bi_io_vec)
  319. prefetchw(&bvec->bv_page->flags);
  320. /* This is the zeroing page we added */
  321. end_page_writeback(page);
  322. page_cache_release(page);
  323. } while (bvec >= bio->bi_io_vec);
  324. if (unlikely(!uptodate)) {
  325. if (!wdata->pnfs_error)
  326. wdata->pnfs_error = -EIO;
  327. pnfs_set_lo_fail(wdata->lseg);
  328. }
  329. bio_put(bio);
  330. put_parallel(par);
  331. }
  332. static void bl_end_io_write(struct bio *bio, int err)
  333. {
  334. struct parallel_io *par = bio->bi_private;
  335. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  336. struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
  337. if (!uptodate) {
  338. if (!wdata->pnfs_error)
  339. wdata->pnfs_error = -EIO;
  340. pnfs_set_lo_fail(wdata->lseg);
  341. }
  342. bio_put(bio);
  343. put_parallel(par);
  344. }
  345. /* Function scheduled for call during bl_end_par_io_write,
  346. * it marks sectors as written and extends the commitlist.
  347. */
  348. static void bl_write_cleanup(struct work_struct *work)
  349. {
  350. struct rpc_task *task;
  351. struct nfs_write_data *wdata;
  352. dprintk("%s enter\n", __func__);
  353. task = container_of(work, struct rpc_task, u.tk_work);
  354. wdata = container_of(task, struct nfs_write_data, task);
  355. if (likely(!wdata->pnfs_error)) {
  356. /* Marks for LAYOUTCOMMIT */
  357. mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
  358. wdata->args.offset, wdata->args.count);
  359. }
  360. pnfs_ld_write_done(wdata);
  361. }
  362. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  363. static void bl_end_par_io_write(void *data, int num_se)
  364. {
  365. struct nfs_write_data *wdata = data;
  366. if (unlikely(wdata->pnfs_error)) {
  367. bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
  368. num_se);
  369. }
  370. wdata->task.tk_status = wdata->pnfs_error;
  371. wdata->verf.committed = NFS_FILE_SYNC;
  372. INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
  373. schedule_work(&wdata->task.u.tk_work);
  374. }
  375. /* FIXME STUB - mark intersection of layout and page as bad, so is not
  376. * used again.
  377. */
  378. static void mark_bad_read(void)
  379. {
  380. return;
  381. }
  382. /*
  383. * map_block: map a requested I/0 block (isect) into an offset in the LVM
  384. * block_device
  385. */
  386. static void
  387. map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
  388. {
  389. dprintk("%s enter be=%p\n", __func__, be);
  390. set_buffer_mapped(bh);
  391. bh->b_bdev = be->be_mdev;
  392. bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
  393. (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
  394. dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
  395. __func__, (unsigned long long)isect, (long)bh->b_blocknr,
  396. bh->b_size);
  397. return;
  398. }
  399. /* Given an unmapped page, zero it or read in page for COW, page is locked
  400. * by caller.
  401. */
  402. static int
  403. init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
  404. {
  405. struct buffer_head *bh = NULL;
  406. int ret = 0;
  407. sector_t isect;
  408. dprintk("%s enter, %p\n", __func__, page);
  409. BUG_ON(PageUptodate(page));
  410. if (!cow_read) {
  411. zero_user_segment(page, 0, PAGE_SIZE);
  412. SetPageUptodate(page);
  413. goto cleanup;
  414. }
  415. bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
  416. if (!bh) {
  417. ret = -ENOMEM;
  418. goto cleanup;
  419. }
  420. isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
  421. map_block(bh, isect, cow_read);
  422. if (!bh_uptodate_or_lock(bh))
  423. ret = bh_submit_read(bh);
  424. if (ret)
  425. goto cleanup;
  426. SetPageUptodate(page);
  427. cleanup:
  428. bl_put_extent(cow_read);
  429. if (bh)
  430. free_buffer_head(bh);
  431. if (ret) {
  432. /* Need to mark layout with bad read...should now
  433. * just use nfs4 for reads and writes.
  434. */
  435. mark_bad_read();
  436. }
  437. return ret;
  438. }
  439. /* Find or create a zeroing page marked being writeback.
  440. * Return ERR_PTR on error, NULL to indicate skip this page and page itself
  441. * to indicate write out.
  442. */
  443. static struct page *
  444. bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
  445. struct pnfs_block_extent *cow_read)
  446. {
  447. struct page *page;
  448. int locked = 0;
  449. page = find_get_page(inode->i_mapping, index);
  450. if (page)
  451. goto check_page;
  452. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  453. if (unlikely(!page)) {
  454. dprintk("%s oom\n", __func__);
  455. return ERR_PTR(-ENOMEM);
  456. }
  457. locked = 1;
  458. check_page:
  459. /* PageDirty: Other will write this out
  460. * PageWriteback: Other is writing this out
  461. * PageUptodate: It was read before
  462. */
  463. if (PageDirty(page) || PageWriteback(page)) {
  464. print_page(page);
  465. if (locked)
  466. unlock_page(page);
  467. page_cache_release(page);
  468. return NULL;
  469. }
  470. if (!locked) {
  471. lock_page(page);
  472. locked = 1;
  473. goto check_page;
  474. }
  475. if (!PageUptodate(page)) {
  476. /* New page, readin or zero it */
  477. init_page_for_write(page, cow_read);
  478. }
  479. set_page_writeback(page);
  480. unlock_page(page);
  481. return page;
  482. }
  483. static enum pnfs_try_status
  484. bl_write_pagelist(struct nfs_write_data *wdata, int sync)
  485. {
  486. int i, ret, npg_zero, pg_index, last = 0;
  487. struct bio *bio = NULL;
  488. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  489. sector_t isect, last_isect = 0, extent_length = 0;
  490. struct parallel_io *par;
  491. loff_t offset = wdata->args.offset;
  492. size_t count = wdata->args.count;
  493. struct page **pages = wdata->args.pages;
  494. struct page *page;
  495. pgoff_t index;
  496. u64 temp;
  497. int npg_per_block =
  498. NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
  499. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  500. /* At this point, wdata->pages is a (sequential) list of nfs_pages.
  501. * We want to write each, and if there is an error set pnfs_error
  502. * to have it redone using nfs.
  503. */
  504. par = alloc_parallel(wdata);
  505. if (!par)
  506. goto out_mds;
  507. par->pnfs_callback = bl_end_par_io_write;
  508. /* At this point, have to be more careful with error handling */
  509. isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  510. be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
  511. if (!be || !is_writable(be, isect)) {
  512. dprintk("%s no matching extents!\n", __func__);
  513. goto out_mds;
  514. }
  515. /* First page inside INVALID extent */
  516. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  517. if (likely(!bl_push_one_short_extent(be->be_inval)))
  518. par->bse_count++;
  519. else
  520. goto out_mds;
  521. temp = offset >> PAGE_CACHE_SHIFT;
  522. npg_zero = do_div(temp, npg_per_block);
  523. isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
  524. (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  525. extent_length = be->be_length - (isect - be->be_f_offset);
  526. fill_invalid_ext:
  527. dprintk("%s need to zero %d pages\n", __func__, npg_zero);
  528. for (;npg_zero > 0; npg_zero--) {
  529. if (bl_is_sector_init(be->be_inval, isect)) {
  530. dprintk("isect %llu already init\n",
  531. (unsigned long long)isect);
  532. goto next_page;
  533. }
  534. /* page ref released in bl_end_io_write_zero */
  535. index = isect >> PAGE_CACHE_SECTOR_SHIFT;
  536. dprintk("%s zero %dth page: index %lu isect %llu\n",
  537. __func__, npg_zero, index,
  538. (unsigned long long)isect);
  539. page = bl_find_get_zeroing_page(wdata->inode, index,
  540. cow_read);
  541. if (unlikely(IS_ERR(page))) {
  542. wdata->pnfs_error = PTR_ERR(page);
  543. goto out;
  544. } else if (page == NULL)
  545. goto next_page;
  546. ret = bl_mark_sectors_init(be->be_inval, isect,
  547. PAGE_CACHE_SECTORS);
  548. if (unlikely(ret)) {
  549. dprintk("%s bl_mark_sectors_init fail %d\n",
  550. __func__, ret);
  551. end_page_writeback(page);
  552. page_cache_release(page);
  553. wdata->pnfs_error = ret;
  554. goto out;
  555. }
  556. if (likely(!bl_push_one_short_extent(be->be_inval)))
  557. par->bse_count++;
  558. else {
  559. end_page_writeback(page);
  560. page_cache_release(page);
  561. wdata->pnfs_error = -ENOMEM;
  562. goto out;
  563. }
  564. /* FIXME: This should be done in bi_end_io */
  565. mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
  566. page->index << PAGE_CACHE_SHIFT,
  567. PAGE_CACHE_SIZE);
  568. bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
  569. isect, page, be,
  570. bl_end_io_write_zero, par);
  571. if (IS_ERR(bio)) {
  572. wdata->pnfs_error = PTR_ERR(bio);
  573. bio = NULL;
  574. goto out;
  575. }
  576. next_page:
  577. isect += PAGE_CACHE_SECTORS;
  578. extent_length -= PAGE_CACHE_SECTORS;
  579. }
  580. if (last)
  581. goto write_done;
  582. }
  583. bio = bl_submit_bio(WRITE, bio);
  584. /* Middle pages */
  585. pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
  586. for (i = pg_index; i < wdata->npages; i++) {
  587. if (!extent_length) {
  588. /* We've used up the previous extent */
  589. bl_put_extent(be);
  590. bio = bl_submit_bio(WRITE, bio);
  591. /* Get the next one */
  592. be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
  593. isect, NULL);
  594. if (!be || !is_writable(be, isect)) {
  595. wdata->pnfs_error = -EINVAL;
  596. goto out;
  597. }
  598. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  599. if (likely(!bl_push_one_short_extent(
  600. be->be_inval)))
  601. par->bse_count++;
  602. else {
  603. wdata->pnfs_error = -ENOMEM;
  604. goto out;
  605. }
  606. }
  607. extent_length = be->be_length -
  608. (isect - be->be_f_offset);
  609. }
  610. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  611. ret = bl_mark_sectors_init(be->be_inval, isect,
  612. PAGE_CACHE_SECTORS);
  613. if (unlikely(ret)) {
  614. dprintk("%s bl_mark_sectors_init fail %d\n",
  615. __func__, ret);
  616. wdata->pnfs_error = ret;
  617. goto out;
  618. }
  619. }
  620. bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
  621. isect, pages[i], be,
  622. bl_end_io_write, par);
  623. if (IS_ERR(bio)) {
  624. wdata->pnfs_error = PTR_ERR(bio);
  625. bio = NULL;
  626. goto out;
  627. }
  628. isect += PAGE_CACHE_SECTORS;
  629. last_isect = isect;
  630. extent_length -= PAGE_CACHE_SECTORS;
  631. }
  632. /* Last page inside INVALID extent */
  633. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  634. bio = bl_submit_bio(WRITE, bio);
  635. temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
  636. npg_zero = npg_per_block - do_div(temp, npg_per_block);
  637. if (npg_zero < npg_per_block) {
  638. last = 1;
  639. goto fill_invalid_ext;
  640. }
  641. }
  642. write_done:
  643. wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
  644. if (count < wdata->res.count) {
  645. wdata->res.count = count;
  646. }
  647. out:
  648. bl_put_extent(be);
  649. bl_submit_bio(WRITE, bio);
  650. put_parallel(par);
  651. return PNFS_ATTEMPTED;
  652. out_mds:
  653. bl_put_extent(be);
  654. kfree(par);
  655. return PNFS_NOT_ATTEMPTED;
  656. }
  657. /* FIXME - range ignored */
  658. static void
  659. release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
  660. {
  661. int i;
  662. struct pnfs_block_extent *be;
  663. spin_lock(&bl->bl_ext_lock);
  664. for (i = 0; i < EXTENT_LISTS; i++) {
  665. while (!list_empty(&bl->bl_extents[i])) {
  666. be = list_first_entry(&bl->bl_extents[i],
  667. struct pnfs_block_extent,
  668. be_node);
  669. list_del(&be->be_node);
  670. bl_put_extent(be);
  671. }
  672. }
  673. spin_unlock(&bl->bl_ext_lock);
  674. }
  675. static void
  676. release_inval_marks(struct pnfs_inval_markings *marks)
  677. {
  678. struct pnfs_inval_tracking *pos, *temp;
  679. struct pnfs_block_short_extent *se, *stemp;
  680. list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
  681. list_del(&pos->it_link);
  682. kfree(pos);
  683. }
  684. list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
  685. list_del(&se->bse_node);
  686. kfree(se);
  687. }
  688. return;
  689. }
  690. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  691. {
  692. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  693. dprintk("%s enter\n", __func__);
  694. release_extents(bl, NULL);
  695. release_inval_marks(&bl->bl_inval);
  696. kfree(bl);
  697. }
  698. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  699. gfp_t gfp_flags)
  700. {
  701. struct pnfs_block_layout *bl;
  702. dprintk("%s enter\n", __func__);
  703. bl = kzalloc(sizeof(*bl), gfp_flags);
  704. if (!bl)
  705. return NULL;
  706. spin_lock_init(&bl->bl_ext_lock);
  707. INIT_LIST_HEAD(&bl->bl_extents[0]);
  708. INIT_LIST_HEAD(&bl->bl_extents[1]);
  709. INIT_LIST_HEAD(&bl->bl_commit);
  710. INIT_LIST_HEAD(&bl->bl_committing);
  711. bl->bl_count = 0;
  712. bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
  713. BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
  714. return &bl->bl_layout;
  715. }
  716. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  717. {
  718. dprintk("%s enter\n", __func__);
  719. kfree(lseg);
  720. }
  721. /* We pretty much ignore lseg, and store all data layout wide, so we
  722. * can correctly merge.
  723. */
  724. static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
  725. struct nfs4_layoutget_res *lgr,
  726. gfp_t gfp_flags)
  727. {
  728. struct pnfs_layout_segment *lseg;
  729. int status;
  730. dprintk("%s enter\n", __func__);
  731. lseg = kzalloc(sizeof(*lseg), gfp_flags);
  732. if (!lseg)
  733. return ERR_PTR(-ENOMEM);
  734. status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
  735. if (status) {
  736. /* We don't want to call the full-blown bl_free_lseg,
  737. * since on error extents were not touched.
  738. */
  739. kfree(lseg);
  740. return ERR_PTR(status);
  741. }
  742. return lseg;
  743. }
  744. static void
  745. bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
  746. const struct nfs4_layoutcommit_args *arg)
  747. {
  748. dprintk("%s enter\n", __func__);
  749. encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
  750. }
  751. static void
  752. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  753. {
  754. struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
  755. dprintk("%s enter\n", __func__);
  756. clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
  757. }
  758. static void free_blk_mountid(struct block_mount_id *mid)
  759. {
  760. if (mid) {
  761. struct pnfs_block_dev *dev, *tmp;
  762. /* No need to take bm_lock as we are last user freeing bm_devlist */
  763. list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
  764. list_del(&dev->bm_node);
  765. bl_free_block_dev(dev);
  766. }
  767. kfree(mid);
  768. }
  769. }
  770. /* This is mostly copied from the filelayout's get_device_info function.
  771. * It seems much of this should be at the generic pnfs level.
  772. */
  773. static struct pnfs_block_dev *
  774. nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
  775. struct nfs4_deviceid *d_id)
  776. {
  777. struct pnfs_device *dev;
  778. struct pnfs_block_dev *rv;
  779. u32 max_resp_sz;
  780. int max_pages;
  781. struct page **pages = NULL;
  782. int i, rc;
  783. /*
  784. * Use the session max response size as the basis for setting
  785. * GETDEVICEINFO's maxcount
  786. */
  787. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  788. max_pages = max_resp_sz >> PAGE_SHIFT;
  789. dprintk("%s max_resp_sz %u max_pages %d\n",
  790. __func__, max_resp_sz, max_pages);
  791. dev = kmalloc(sizeof(*dev), GFP_NOFS);
  792. if (!dev) {
  793. dprintk("%s kmalloc failed\n", __func__);
  794. return ERR_PTR(-ENOMEM);
  795. }
  796. pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
  797. if (pages == NULL) {
  798. kfree(dev);
  799. return ERR_PTR(-ENOMEM);
  800. }
  801. for (i = 0; i < max_pages; i++) {
  802. pages[i] = alloc_page(GFP_NOFS);
  803. if (!pages[i]) {
  804. rv = ERR_PTR(-ENOMEM);
  805. goto out_free;
  806. }
  807. }
  808. memcpy(&dev->dev_id, d_id, sizeof(*d_id));
  809. dev->layout_type = LAYOUT_BLOCK_VOLUME;
  810. dev->pages = pages;
  811. dev->pgbase = 0;
  812. dev->pglen = PAGE_SIZE * max_pages;
  813. dev->mincount = 0;
  814. dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
  815. rc = nfs4_proc_getdeviceinfo(server, dev);
  816. dprintk("%s getdevice info returns %d\n", __func__, rc);
  817. if (rc) {
  818. rv = ERR_PTR(rc);
  819. goto out_free;
  820. }
  821. rv = nfs4_blk_decode_device(server, dev);
  822. out_free:
  823. for (i = 0; i < max_pages; i++)
  824. __free_page(pages[i]);
  825. kfree(pages);
  826. kfree(dev);
  827. return rv;
  828. }
  829. static int
  830. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  831. {
  832. struct block_mount_id *b_mt_id = NULL;
  833. struct pnfs_devicelist *dlist = NULL;
  834. struct pnfs_block_dev *bdev;
  835. LIST_HEAD(block_disklist);
  836. int status, i;
  837. dprintk("%s enter\n", __func__);
  838. if (server->pnfs_blksize == 0) {
  839. dprintk("%s Server did not return blksize\n", __func__);
  840. return -EINVAL;
  841. }
  842. b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
  843. if (!b_mt_id) {
  844. status = -ENOMEM;
  845. goto out_error;
  846. }
  847. /* Initialize nfs4 block layout mount id */
  848. spin_lock_init(&b_mt_id->bm_lock);
  849. INIT_LIST_HEAD(&b_mt_id->bm_devlist);
  850. dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
  851. if (!dlist) {
  852. status = -ENOMEM;
  853. goto out_error;
  854. }
  855. dlist->eof = 0;
  856. while (!dlist->eof) {
  857. status = nfs4_proc_getdevicelist(server, fh, dlist);
  858. if (status)
  859. goto out_error;
  860. dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
  861. __func__, dlist->num_devs, dlist->eof);
  862. for (i = 0; i < dlist->num_devs; i++) {
  863. bdev = nfs4_blk_get_deviceinfo(server, fh,
  864. &dlist->dev_id[i]);
  865. if (IS_ERR(bdev)) {
  866. status = PTR_ERR(bdev);
  867. goto out_error;
  868. }
  869. spin_lock(&b_mt_id->bm_lock);
  870. list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
  871. spin_unlock(&b_mt_id->bm_lock);
  872. }
  873. }
  874. dprintk("%s SUCCESS\n", __func__);
  875. server->pnfs_ld_data = b_mt_id;
  876. out_return:
  877. kfree(dlist);
  878. return status;
  879. out_error:
  880. free_blk_mountid(b_mt_id);
  881. goto out_return;
  882. }
  883. static int
  884. bl_clear_layoutdriver(struct nfs_server *server)
  885. {
  886. struct block_mount_id *b_mt_id = server->pnfs_ld_data;
  887. dprintk("%s enter\n", __func__);
  888. free_blk_mountid(b_mt_id);
  889. dprintk("%s RETURNS\n", __func__);
  890. return 0;
  891. }
  892. static const struct nfs_pageio_ops bl_pg_read_ops = {
  893. .pg_init = pnfs_generic_pg_init_read,
  894. .pg_test = pnfs_generic_pg_test,
  895. .pg_doio = pnfs_generic_pg_readpages,
  896. };
  897. static const struct nfs_pageio_ops bl_pg_write_ops = {
  898. .pg_init = pnfs_generic_pg_init_write,
  899. .pg_test = pnfs_generic_pg_test,
  900. .pg_doio = pnfs_generic_pg_writepages,
  901. };
  902. static struct pnfs_layoutdriver_type blocklayout_type = {
  903. .id = LAYOUT_BLOCK_VOLUME,
  904. .name = "LAYOUT_BLOCK_VOLUME",
  905. .read_pagelist = bl_read_pagelist,
  906. .write_pagelist = bl_write_pagelist,
  907. .alloc_layout_hdr = bl_alloc_layout_hdr,
  908. .free_layout_hdr = bl_free_layout_hdr,
  909. .alloc_lseg = bl_alloc_lseg,
  910. .free_lseg = bl_free_lseg,
  911. .encode_layoutcommit = bl_encode_layoutcommit,
  912. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  913. .set_layoutdriver = bl_set_layoutdriver,
  914. .clear_layoutdriver = bl_clear_layoutdriver,
  915. .pg_read_ops = &bl_pg_read_ops,
  916. .pg_write_ops = &bl_pg_write_ops,
  917. };
  918. static const struct rpc_pipe_ops bl_upcall_ops = {
  919. .upcall = rpc_pipe_generic_upcall,
  920. .downcall = bl_pipe_downcall,
  921. .destroy_msg = bl_pipe_destroy_msg,
  922. };
  923. static int __init nfs4blocklayout_init(void)
  924. {
  925. struct vfsmount *mnt;
  926. struct path path;
  927. int ret;
  928. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  929. ret = pnfs_register_layoutdriver(&blocklayout_type);
  930. if (ret)
  931. goto out;
  932. init_waitqueue_head(&bl_wq);
  933. mnt = rpc_get_mount();
  934. if (IS_ERR(mnt)) {
  935. ret = PTR_ERR(mnt);
  936. goto out_remove;
  937. }
  938. ret = vfs_path_lookup(mnt->mnt_root,
  939. mnt,
  940. NFS_PIPE_DIRNAME, 0, &path);
  941. if (ret)
  942. goto out_putrpc;
  943. bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
  944. &bl_upcall_ops, 0);
  945. path_put(&path);
  946. if (IS_ERR(bl_device_pipe)) {
  947. ret = PTR_ERR(bl_device_pipe);
  948. goto out_putrpc;
  949. }
  950. out:
  951. return ret;
  952. out_putrpc:
  953. rpc_put_mount();
  954. out_remove:
  955. pnfs_unregister_layoutdriver(&blocklayout_type);
  956. return ret;
  957. }
  958. static void __exit nfs4blocklayout_exit(void)
  959. {
  960. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  961. __func__);
  962. pnfs_unregister_layoutdriver(&blocklayout_type);
  963. rpc_unlink(bl_device_pipe);
  964. rpc_put_mount();
  965. }
  966. MODULE_ALIAS("nfs-layouttype4-3");
  967. module_init(nfs4blocklayout_init);
  968. module_exit(nfs4blocklayout_exit);