blocklayout.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/buffer_head.h> /* various write calls */
  38. #include <linux/prefetch.h>
  39. #include "../pnfs.h"
  40. #include "../internal.h"
  41. #include "blocklayout.h"
  42. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  43. MODULE_LICENSE("GPL");
  44. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  45. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  46. static void print_page(struct page *page)
  47. {
  48. dprintk("PRINTPAGE page %p\n", page);
  49. dprintk(" PagePrivate %d\n", PagePrivate(page));
  50. dprintk(" PageUptodate %d\n", PageUptodate(page));
  51. dprintk(" PageError %d\n", PageError(page));
  52. dprintk(" PageDirty %d\n", PageDirty(page));
  53. dprintk(" PageReferenced %d\n", PageReferenced(page));
  54. dprintk(" PageLocked %d\n", PageLocked(page));
  55. dprintk(" PageWriteback %d\n", PageWriteback(page));
  56. dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
  57. dprintk("\n");
  58. }
  59. /* Given the be associated with isect, determine if page data needs to be
  60. * initialized.
  61. */
  62. static int is_hole(struct pnfs_block_extent *be, sector_t isect)
  63. {
  64. if (be->be_state == PNFS_BLOCK_NONE_DATA)
  65. return 1;
  66. else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
  67. return 0;
  68. else
  69. return !bl_is_sector_init(be->be_inval, isect);
  70. }
  71. /* Given the be associated with isect, determine if page data can be
  72. * written to disk.
  73. */
  74. static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  75. {
  76. return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  77. be->be_state == PNFS_BLOCK_INVALID_DATA);
  78. }
  79. /* The data we are handed might be spread across several bios. We need
  80. * to track when the last one is finished.
  81. */
  82. struct parallel_io {
  83. struct kref refcnt;
  84. void (*pnfs_callback) (void *data, int num_se);
  85. void *data;
  86. int bse_count;
  87. };
  88. static inline struct parallel_io *alloc_parallel(void *data)
  89. {
  90. struct parallel_io *rv;
  91. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  92. if (rv) {
  93. rv->data = data;
  94. kref_init(&rv->refcnt);
  95. rv->bse_count = 0;
  96. }
  97. return rv;
  98. }
  99. static inline void get_parallel(struct parallel_io *p)
  100. {
  101. kref_get(&p->refcnt);
  102. }
  103. static void destroy_parallel(struct kref *kref)
  104. {
  105. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  106. dprintk("%s enter\n", __func__);
  107. p->pnfs_callback(p->data, p->bse_count);
  108. kfree(p);
  109. }
  110. static inline void put_parallel(struct parallel_io *p)
  111. {
  112. kref_put(&p->refcnt, destroy_parallel);
  113. }
  114. static struct bio *
  115. bl_submit_bio(int rw, struct bio *bio)
  116. {
  117. if (bio) {
  118. get_parallel(bio->bi_private);
  119. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  120. rw == READ ? "read" : "write",
  121. bio->bi_size, (unsigned long long)bio->bi_sector);
  122. submit_bio(rw, bio);
  123. }
  124. return NULL;
  125. }
  126. static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
  127. struct pnfs_block_extent *be,
  128. void (*end_io)(struct bio *, int err),
  129. struct parallel_io *par)
  130. {
  131. struct bio *bio;
  132. npg = min(npg, BIO_MAX_PAGES);
  133. bio = bio_alloc(GFP_NOIO, npg);
  134. if (!bio && (current->flags & PF_MEMALLOC)) {
  135. while (!bio && (npg /= 2))
  136. bio = bio_alloc(GFP_NOIO, npg);
  137. }
  138. if (bio) {
  139. bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
  140. bio->bi_bdev = be->be_mdev;
  141. bio->bi_end_io = end_io;
  142. bio->bi_private = par;
  143. }
  144. return bio;
  145. }
  146. static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
  147. sector_t isect, struct page *page,
  148. struct pnfs_block_extent *be,
  149. void (*end_io)(struct bio *, int err),
  150. struct parallel_io *par)
  151. {
  152. retry:
  153. if (!bio) {
  154. bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
  155. if (!bio)
  156. return ERR_PTR(-ENOMEM);
  157. }
  158. if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
  159. bio = bl_submit_bio(rw, bio);
  160. goto retry;
  161. }
  162. return bio;
  163. }
  164. /* This is basically copied from mpage_end_io_read */
  165. static void bl_end_io_read(struct bio *bio, int err)
  166. {
  167. struct parallel_io *par = bio->bi_private;
  168. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  169. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  170. do {
  171. struct page *page = bvec->bv_page;
  172. if (--bvec >= bio->bi_io_vec)
  173. prefetchw(&bvec->bv_page->flags);
  174. if (uptodate)
  175. SetPageUptodate(page);
  176. } while (bvec >= bio->bi_io_vec);
  177. if (!uptodate) {
  178. struct nfs_read_data *rdata = par->data;
  179. struct nfs_pgio_header *header = rdata->header;
  180. if (!header->pnfs_error)
  181. header->pnfs_error = -EIO;
  182. pnfs_set_lo_fail(header->lseg);
  183. }
  184. bio_put(bio);
  185. put_parallel(par);
  186. }
  187. static void bl_read_cleanup(struct work_struct *work)
  188. {
  189. struct rpc_task *task;
  190. struct nfs_read_data *rdata;
  191. dprintk("%s enter\n", __func__);
  192. task = container_of(work, struct rpc_task, u.tk_work);
  193. rdata = container_of(task, struct nfs_read_data, task);
  194. pnfs_ld_read_done(rdata);
  195. }
  196. static void
  197. bl_end_par_io_read(void *data, int unused)
  198. {
  199. struct nfs_read_data *rdata = data;
  200. rdata->task.tk_status = rdata->header->pnfs_error;
  201. INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
  202. schedule_work(&rdata->task.u.tk_work);
  203. }
  204. static bool
  205. bl_check_alignment(u64 offset, u32 len, unsigned long blkmask)
  206. {
  207. if ((offset & blkmask) || (len & blkmask))
  208. return false;
  209. return true;
  210. }
  211. static enum pnfs_try_status
  212. bl_read_pagelist(struct nfs_read_data *rdata)
  213. {
  214. struct nfs_pgio_header *header = rdata->header;
  215. int i, hole;
  216. struct bio *bio = NULL;
  217. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  218. sector_t isect, extent_length = 0;
  219. struct parallel_io *par;
  220. loff_t f_offset = rdata->args.offset;
  221. struct page **pages = rdata->args.pages;
  222. int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
  223. dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
  224. rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
  225. if (!bl_check_alignment(f_offset, rdata->args.count, PAGE_CACHE_MASK))
  226. goto use_mds;
  227. par = alloc_parallel(rdata);
  228. if (!par)
  229. goto use_mds;
  230. par->pnfs_callback = bl_end_par_io_read;
  231. /* At this point, we can no longer jump to use_mds */
  232. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  233. /* Code assumes extents are page-aligned */
  234. for (i = pg_index; i < rdata->pages.npages; i++) {
  235. if (!extent_length) {
  236. /* We've used up the previous extent */
  237. bl_put_extent(be);
  238. bl_put_extent(cow_read);
  239. bio = bl_submit_bio(READ, bio);
  240. /* Get the next one */
  241. be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
  242. isect, &cow_read);
  243. if (!be) {
  244. header->pnfs_error = -EIO;
  245. goto out;
  246. }
  247. extent_length = be->be_length -
  248. (isect - be->be_f_offset);
  249. if (cow_read) {
  250. sector_t cow_length = cow_read->be_length -
  251. (isect - cow_read->be_f_offset);
  252. extent_length = min(extent_length, cow_length);
  253. }
  254. }
  255. hole = is_hole(be, isect);
  256. if (hole && !cow_read) {
  257. bio = bl_submit_bio(READ, bio);
  258. /* Fill hole w/ zeroes w/o accessing device */
  259. dprintk("%s Zeroing page for hole\n", __func__);
  260. zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
  261. print_page(pages[i]);
  262. SetPageUptodate(pages[i]);
  263. } else {
  264. struct pnfs_block_extent *be_read;
  265. be_read = (hole && cow_read) ? cow_read : be;
  266. bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
  267. READ,
  268. isect, pages[i], be_read,
  269. bl_end_io_read, par);
  270. if (IS_ERR(bio)) {
  271. header->pnfs_error = PTR_ERR(bio);
  272. bio = NULL;
  273. goto out;
  274. }
  275. }
  276. isect += PAGE_CACHE_SECTORS;
  277. extent_length -= PAGE_CACHE_SECTORS;
  278. }
  279. if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
  280. rdata->res.eof = 1;
  281. rdata->res.count = header->inode->i_size - f_offset;
  282. } else {
  283. rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
  284. }
  285. out:
  286. bl_put_extent(be);
  287. bl_put_extent(cow_read);
  288. bl_submit_bio(READ, bio);
  289. put_parallel(par);
  290. return PNFS_ATTEMPTED;
  291. use_mds:
  292. dprintk("Giving up and using normal NFS\n");
  293. return PNFS_NOT_ATTEMPTED;
  294. }
  295. static void mark_extents_written(struct pnfs_block_layout *bl,
  296. __u64 offset, __u32 count)
  297. {
  298. sector_t isect, end;
  299. struct pnfs_block_extent *be;
  300. struct pnfs_block_short_extent *se;
  301. dprintk("%s(%llu, %u)\n", __func__, offset, count);
  302. if (count == 0)
  303. return;
  304. isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
  305. end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
  306. end >>= SECTOR_SHIFT;
  307. while (isect < end) {
  308. sector_t len;
  309. be = bl_find_get_extent(bl, isect, NULL);
  310. BUG_ON(!be); /* FIXME */
  311. len = min(end, be->be_f_offset + be->be_length) - isect;
  312. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  313. se = bl_pop_one_short_extent(be->be_inval);
  314. BUG_ON(!se);
  315. bl_mark_for_commit(be, isect, len, se);
  316. }
  317. isect += len;
  318. bl_put_extent(be);
  319. }
  320. }
  321. static void bl_end_io_write_zero(struct bio *bio, int err)
  322. {
  323. struct parallel_io *par = bio->bi_private;
  324. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  325. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  326. do {
  327. struct page *page = bvec->bv_page;
  328. if (--bvec >= bio->bi_io_vec)
  329. prefetchw(&bvec->bv_page->flags);
  330. /* This is the zeroing page we added */
  331. end_page_writeback(page);
  332. page_cache_release(page);
  333. } while (bvec >= bio->bi_io_vec);
  334. if (unlikely(!uptodate)) {
  335. struct nfs_write_data *data = par->data;
  336. struct nfs_pgio_header *header = data->header;
  337. if (!header->pnfs_error)
  338. header->pnfs_error = -EIO;
  339. pnfs_set_lo_fail(header->lseg);
  340. }
  341. bio_put(bio);
  342. put_parallel(par);
  343. }
  344. static void bl_end_io_write(struct bio *bio, int err)
  345. {
  346. struct parallel_io *par = bio->bi_private;
  347. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  348. struct nfs_write_data *data = par->data;
  349. struct nfs_pgio_header *header = data->header;
  350. if (!uptodate) {
  351. if (!header->pnfs_error)
  352. header->pnfs_error = -EIO;
  353. pnfs_set_lo_fail(header->lseg);
  354. }
  355. bio_put(bio);
  356. put_parallel(par);
  357. }
  358. /* Function scheduled for call during bl_end_par_io_write,
  359. * it marks sectors as written and extends the commitlist.
  360. */
  361. static void bl_write_cleanup(struct work_struct *work)
  362. {
  363. struct rpc_task *task;
  364. struct nfs_write_data *wdata;
  365. dprintk("%s enter\n", __func__);
  366. task = container_of(work, struct rpc_task, u.tk_work);
  367. wdata = container_of(task, struct nfs_write_data, task);
  368. if (likely(!wdata->header->pnfs_error)) {
  369. /* Marks for LAYOUTCOMMIT */
  370. mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
  371. wdata->args.offset, wdata->args.count);
  372. }
  373. pnfs_ld_write_done(wdata);
  374. }
  375. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  376. static void bl_end_par_io_write(void *data, int num_se)
  377. {
  378. struct nfs_write_data *wdata = data;
  379. if (unlikely(wdata->header->pnfs_error)) {
  380. bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
  381. num_se);
  382. }
  383. wdata->task.tk_status = wdata->header->pnfs_error;
  384. wdata->verf.committed = NFS_FILE_SYNC;
  385. INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
  386. schedule_work(&wdata->task.u.tk_work);
  387. }
  388. /* FIXME STUB - mark intersection of layout and page as bad, so is not
  389. * used again.
  390. */
  391. static void mark_bad_read(void)
  392. {
  393. return;
  394. }
  395. /*
  396. * map_block: map a requested I/0 block (isect) into an offset in the LVM
  397. * block_device
  398. */
  399. static void
  400. map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
  401. {
  402. dprintk("%s enter be=%p\n", __func__, be);
  403. set_buffer_mapped(bh);
  404. bh->b_bdev = be->be_mdev;
  405. bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
  406. (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
  407. dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
  408. __func__, (unsigned long long)isect, (long)bh->b_blocknr,
  409. bh->b_size);
  410. return;
  411. }
  412. /* Given an unmapped page, zero it or read in page for COW, page is locked
  413. * by caller.
  414. */
  415. static int
  416. init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
  417. {
  418. struct buffer_head *bh = NULL;
  419. int ret = 0;
  420. sector_t isect;
  421. dprintk("%s enter, %p\n", __func__, page);
  422. BUG_ON(PageUptodate(page));
  423. if (!cow_read) {
  424. zero_user_segment(page, 0, PAGE_SIZE);
  425. SetPageUptodate(page);
  426. goto cleanup;
  427. }
  428. bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
  429. if (!bh) {
  430. ret = -ENOMEM;
  431. goto cleanup;
  432. }
  433. isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
  434. map_block(bh, isect, cow_read);
  435. if (!bh_uptodate_or_lock(bh))
  436. ret = bh_submit_read(bh);
  437. if (ret)
  438. goto cleanup;
  439. SetPageUptodate(page);
  440. cleanup:
  441. bl_put_extent(cow_read);
  442. if (bh)
  443. free_buffer_head(bh);
  444. if (ret) {
  445. /* Need to mark layout with bad read...should now
  446. * just use nfs4 for reads and writes.
  447. */
  448. mark_bad_read();
  449. }
  450. return ret;
  451. }
  452. /* Find or create a zeroing page marked being writeback.
  453. * Return ERR_PTR on error, NULL to indicate skip this page and page itself
  454. * to indicate write out.
  455. */
  456. static struct page *
  457. bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
  458. struct pnfs_block_extent *cow_read)
  459. {
  460. struct page *page;
  461. int locked = 0;
  462. page = find_get_page(inode->i_mapping, index);
  463. if (page)
  464. goto check_page;
  465. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  466. if (unlikely(!page)) {
  467. dprintk("%s oom\n", __func__);
  468. return ERR_PTR(-ENOMEM);
  469. }
  470. locked = 1;
  471. check_page:
  472. /* PageDirty: Other will write this out
  473. * PageWriteback: Other is writing this out
  474. * PageUptodate: It was read before
  475. */
  476. if (PageDirty(page) || PageWriteback(page)) {
  477. print_page(page);
  478. if (locked)
  479. unlock_page(page);
  480. page_cache_release(page);
  481. return NULL;
  482. }
  483. if (!locked) {
  484. lock_page(page);
  485. locked = 1;
  486. goto check_page;
  487. }
  488. if (!PageUptodate(page)) {
  489. /* New page, readin or zero it */
  490. init_page_for_write(page, cow_read);
  491. }
  492. set_page_writeback(page);
  493. unlock_page(page);
  494. return page;
  495. }
  496. static enum pnfs_try_status
  497. bl_write_pagelist(struct nfs_write_data *wdata, int sync)
  498. {
  499. struct nfs_pgio_header *header = wdata->header;
  500. int i, ret, npg_zero, pg_index, last = 0;
  501. struct bio *bio = NULL;
  502. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  503. sector_t isect, last_isect = 0, extent_length = 0;
  504. struct parallel_io *par = NULL;
  505. loff_t offset = wdata->args.offset;
  506. size_t count = wdata->args.count;
  507. struct page **pages = wdata->args.pages;
  508. struct page *page;
  509. pgoff_t index;
  510. u64 temp;
  511. int npg_per_block =
  512. NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
  513. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  514. /* Check for alignment first */
  515. if (!bl_check_alignment(offset, count, PAGE_CACHE_MASK))
  516. goto out_mds;
  517. /* At this point, wdata->pages is a (sequential) list of nfs_pages.
  518. * We want to write each, and if there is an error set pnfs_error
  519. * to have it redone using nfs.
  520. */
  521. par = alloc_parallel(wdata);
  522. if (!par)
  523. goto out_mds;
  524. par->pnfs_callback = bl_end_par_io_write;
  525. /* At this point, have to be more careful with error handling */
  526. isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  527. be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
  528. if (!be || !is_writable(be, isect)) {
  529. dprintk("%s no matching extents!\n", __func__);
  530. goto out_mds;
  531. }
  532. /* First page inside INVALID extent */
  533. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  534. if (likely(!bl_push_one_short_extent(be->be_inval)))
  535. par->bse_count++;
  536. else
  537. goto out_mds;
  538. temp = offset >> PAGE_CACHE_SHIFT;
  539. npg_zero = do_div(temp, npg_per_block);
  540. isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
  541. (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  542. extent_length = be->be_length - (isect - be->be_f_offset);
  543. fill_invalid_ext:
  544. dprintk("%s need to zero %d pages\n", __func__, npg_zero);
  545. for (;npg_zero > 0; npg_zero--) {
  546. if (bl_is_sector_init(be->be_inval, isect)) {
  547. dprintk("isect %llu already init\n",
  548. (unsigned long long)isect);
  549. goto next_page;
  550. }
  551. /* page ref released in bl_end_io_write_zero */
  552. index = isect >> PAGE_CACHE_SECTOR_SHIFT;
  553. dprintk("%s zero %dth page: index %lu isect %llu\n",
  554. __func__, npg_zero, index,
  555. (unsigned long long)isect);
  556. page = bl_find_get_zeroing_page(header->inode, index,
  557. cow_read);
  558. if (unlikely(IS_ERR(page))) {
  559. header->pnfs_error = PTR_ERR(page);
  560. goto out;
  561. } else if (page == NULL)
  562. goto next_page;
  563. ret = bl_mark_sectors_init(be->be_inval, isect,
  564. PAGE_CACHE_SECTORS);
  565. if (unlikely(ret)) {
  566. dprintk("%s bl_mark_sectors_init fail %d\n",
  567. __func__, ret);
  568. end_page_writeback(page);
  569. page_cache_release(page);
  570. header->pnfs_error = ret;
  571. goto out;
  572. }
  573. if (likely(!bl_push_one_short_extent(be->be_inval)))
  574. par->bse_count++;
  575. else {
  576. end_page_writeback(page);
  577. page_cache_release(page);
  578. header->pnfs_error = -ENOMEM;
  579. goto out;
  580. }
  581. /* FIXME: This should be done in bi_end_io */
  582. mark_extents_written(BLK_LSEG2EXT(header->lseg),
  583. page->index << PAGE_CACHE_SHIFT,
  584. PAGE_CACHE_SIZE);
  585. bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
  586. isect, page, be,
  587. bl_end_io_write_zero, par);
  588. if (IS_ERR(bio)) {
  589. header->pnfs_error = PTR_ERR(bio);
  590. bio = NULL;
  591. goto out;
  592. }
  593. next_page:
  594. isect += PAGE_CACHE_SECTORS;
  595. extent_length -= PAGE_CACHE_SECTORS;
  596. }
  597. if (last)
  598. goto write_done;
  599. }
  600. bio = bl_submit_bio(WRITE, bio);
  601. /* Middle pages */
  602. pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
  603. for (i = pg_index; i < wdata->pages.npages; i++) {
  604. if (!extent_length) {
  605. /* We've used up the previous extent */
  606. bl_put_extent(be);
  607. bio = bl_submit_bio(WRITE, bio);
  608. /* Get the next one */
  609. be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
  610. isect, NULL);
  611. if (!be || !is_writable(be, isect)) {
  612. header->pnfs_error = -EINVAL;
  613. goto out;
  614. }
  615. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  616. if (likely(!bl_push_one_short_extent(
  617. be->be_inval)))
  618. par->bse_count++;
  619. else {
  620. header->pnfs_error = -ENOMEM;
  621. goto out;
  622. }
  623. }
  624. extent_length = be->be_length -
  625. (isect - be->be_f_offset);
  626. }
  627. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  628. ret = bl_mark_sectors_init(be->be_inval, isect,
  629. PAGE_CACHE_SECTORS);
  630. if (unlikely(ret)) {
  631. dprintk("%s bl_mark_sectors_init fail %d\n",
  632. __func__, ret);
  633. header->pnfs_error = ret;
  634. goto out;
  635. }
  636. }
  637. bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
  638. isect, pages[i], be,
  639. bl_end_io_write, par);
  640. if (IS_ERR(bio)) {
  641. header->pnfs_error = PTR_ERR(bio);
  642. bio = NULL;
  643. goto out;
  644. }
  645. isect += PAGE_CACHE_SECTORS;
  646. last_isect = isect;
  647. extent_length -= PAGE_CACHE_SECTORS;
  648. }
  649. /* Last page inside INVALID extent */
  650. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  651. bio = bl_submit_bio(WRITE, bio);
  652. temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
  653. npg_zero = npg_per_block - do_div(temp, npg_per_block);
  654. if (npg_zero < npg_per_block) {
  655. last = 1;
  656. goto fill_invalid_ext;
  657. }
  658. }
  659. write_done:
  660. wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
  661. if (count < wdata->res.count) {
  662. wdata->res.count = count;
  663. }
  664. out:
  665. bl_put_extent(be);
  666. bl_submit_bio(WRITE, bio);
  667. put_parallel(par);
  668. return PNFS_ATTEMPTED;
  669. out_mds:
  670. bl_put_extent(be);
  671. kfree(par);
  672. return PNFS_NOT_ATTEMPTED;
  673. }
  674. /* FIXME - range ignored */
  675. static void
  676. release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
  677. {
  678. int i;
  679. struct pnfs_block_extent *be;
  680. spin_lock(&bl->bl_ext_lock);
  681. for (i = 0; i < EXTENT_LISTS; i++) {
  682. while (!list_empty(&bl->bl_extents[i])) {
  683. be = list_first_entry(&bl->bl_extents[i],
  684. struct pnfs_block_extent,
  685. be_node);
  686. list_del(&be->be_node);
  687. bl_put_extent(be);
  688. }
  689. }
  690. spin_unlock(&bl->bl_ext_lock);
  691. }
  692. static void
  693. release_inval_marks(struct pnfs_inval_markings *marks)
  694. {
  695. struct pnfs_inval_tracking *pos, *temp;
  696. struct pnfs_block_short_extent *se, *stemp;
  697. list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
  698. list_del(&pos->it_link);
  699. kfree(pos);
  700. }
  701. list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
  702. list_del(&se->bse_node);
  703. kfree(se);
  704. }
  705. return;
  706. }
  707. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  708. {
  709. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  710. dprintk("%s enter\n", __func__);
  711. release_extents(bl, NULL);
  712. release_inval_marks(&bl->bl_inval);
  713. kfree(bl);
  714. }
  715. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  716. gfp_t gfp_flags)
  717. {
  718. struct pnfs_block_layout *bl;
  719. dprintk("%s enter\n", __func__);
  720. bl = kzalloc(sizeof(*bl), gfp_flags);
  721. if (!bl)
  722. return NULL;
  723. spin_lock_init(&bl->bl_ext_lock);
  724. INIT_LIST_HEAD(&bl->bl_extents[0]);
  725. INIT_LIST_HEAD(&bl->bl_extents[1]);
  726. INIT_LIST_HEAD(&bl->bl_commit);
  727. INIT_LIST_HEAD(&bl->bl_committing);
  728. bl->bl_count = 0;
  729. bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
  730. BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
  731. return &bl->bl_layout;
  732. }
  733. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  734. {
  735. dprintk("%s enter\n", __func__);
  736. kfree(lseg);
  737. }
  738. /* We pretty much ignore lseg, and store all data layout wide, so we
  739. * can correctly merge.
  740. */
  741. static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
  742. struct nfs4_layoutget_res *lgr,
  743. gfp_t gfp_flags)
  744. {
  745. struct pnfs_layout_segment *lseg;
  746. int status;
  747. dprintk("%s enter\n", __func__);
  748. lseg = kzalloc(sizeof(*lseg), gfp_flags);
  749. if (!lseg)
  750. return ERR_PTR(-ENOMEM);
  751. status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
  752. if (status) {
  753. /* We don't want to call the full-blown bl_free_lseg,
  754. * since on error extents were not touched.
  755. */
  756. kfree(lseg);
  757. return ERR_PTR(status);
  758. }
  759. return lseg;
  760. }
  761. static void
  762. bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
  763. const struct nfs4_layoutcommit_args *arg)
  764. {
  765. dprintk("%s enter\n", __func__);
  766. encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
  767. }
  768. static void
  769. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  770. {
  771. struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
  772. dprintk("%s enter\n", __func__);
  773. clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
  774. }
  775. static void free_blk_mountid(struct block_mount_id *mid)
  776. {
  777. if (mid) {
  778. struct pnfs_block_dev *dev, *tmp;
  779. /* No need to take bm_lock as we are last user freeing bm_devlist */
  780. list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
  781. list_del(&dev->bm_node);
  782. bl_free_block_dev(dev);
  783. }
  784. kfree(mid);
  785. }
  786. }
  787. /* This is mostly copied from the filelayout's get_device_info function.
  788. * It seems much of this should be at the generic pnfs level.
  789. */
  790. static struct pnfs_block_dev *
  791. nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
  792. struct nfs4_deviceid *d_id)
  793. {
  794. struct pnfs_device *dev;
  795. struct pnfs_block_dev *rv;
  796. u32 max_resp_sz;
  797. int max_pages;
  798. struct page **pages = NULL;
  799. int i, rc;
  800. /*
  801. * Use the session max response size as the basis for setting
  802. * GETDEVICEINFO's maxcount
  803. */
  804. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  805. max_pages = nfs_page_array_len(0, max_resp_sz);
  806. dprintk("%s max_resp_sz %u max_pages %d\n",
  807. __func__, max_resp_sz, max_pages);
  808. dev = kmalloc(sizeof(*dev), GFP_NOFS);
  809. if (!dev) {
  810. dprintk("%s kmalloc failed\n", __func__);
  811. return ERR_PTR(-ENOMEM);
  812. }
  813. pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
  814. if (pages == NULL) {
  815. kfree(dev);
  816. return ERR_PTR(-ENOMEM);
  817. }
  818. for (i = 0; i < max_pages; i++) {
  819. pages[i] = alloc_page(GFP_NOFS);
  820. if (!pages[i]) {
  821. rv = ERR_PTR(-ENOMEM);
  822. goto out_free;
  823. }
  824. }
  825. memcpy(&dev->dev_id, d_id, sizeof(*d_id));
  826. dev->layout_type = LAYOUT_BLOCK_VOLUME;
  827. dev->pages = pages;
  828. dev->pgbase = 0;
  829. dev->pglen = PAGE_SIZE * max_pages;
  830. dev->mincount = 0;
  831. dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
  832. rc = nfs4_proc_getdeviceinfo(server, dev);
  833. dprintk("%s getdevice info returns %d\n", __func__, rc);
  834. if (rc) {
  835. rv = ERR_PTR(rc);
  836. goto out_free;
  837. }
  838. rv = nfs4_blk_decode_device(server, dev);
  839. out_free:
  840. for (i = 0; i < max_pages; i++)
  841. __free_page(pages[i]);
  842. kfree(pages);
  843. kfree(dev);
  844. return rv;
  845. }
  846. static int
  847. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  848. {
  849. struct block_mount_id *b_mt_id = NULL;
  850. struct pnfs_devicelist *dlist = NULL;
  851. struct pnfs_block_dev *bdev;
  852. LIST_HEAD(block_disklist);
  853. int status, i;
  854. dprintk("%s enter\n", __func__);
  855. if (server->pnfs_blksize == 0) {
  856. dprintk("%s Server did not return blksize\n", __func__);
  857. return -EINVAL;
  858. }
  859. b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
  860. if (!b_mt_id) {
  861. status = -ENOMEM;
  862. goto out_error;
  863. }
  864. /* Initialize nfs4 block layout mount id */
  865. spin_lock_init(&b_mt_id->bm_lock);
  866. INIT_LIST_HEAD(&b_mt_id->bm_devlist);
  867. dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
  868. if (!dlist) {
  869. status = -ENOMEM;
  870. goto out_error;
  871. }
  872. dlist->eof = 0;
  873. while (!dlist->eof) {
  874. status = nfs4_proc_getdevicelist(server, fh, dlist);
  875. if (status)
  876. goto out_error;
  877. dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
  878. __func__, dlist->num_devs, dlist->eof);
  879. for (i = 0; i < dlist->num_devs; i++) {
  880. bdev = nfs4_blk_get_deviceinfo(server, fh,
  881. &dlist->dev_id[i]);
  882. if (IS_ERR(bdev)) {
  883. status = PTR_ERR(bdev);
  884. goto out_error;
  885. }
  886. spin_lock(&b_mt_id->bm_lock);
  887. list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
  888. spin_unlock(&b_mt_id->bm_lock);
  889. }
  890. }
  891. dprintk("%s SUCCESS\n", __func__);
  892. server->pnfs_ld_data = b_mt_id;
  893. out_return:
  894. kfree(dlist);
  895. return status;
  896. out_error:
  897. free_blk_mountid(b_mt_id);
  898. goto out_return;
  899. }
  900. static int
  901. bl_clear_layoutdriver(struct nfs_server *server)
  902. {
  903. struct block_mount_id *b_mt_id = server->pnfs_ld_data;
  904. dprintk("%s enter\n", __func__);
  905. free_blk_mountid(b_mt_id);
  906. dprintk("%s RETURNS\n", __func__);
  907. return 0;
  908. }
  909. static void
  910. bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  911. {
  912. if (!bl_check_alignment(req->wb_offset, req->wb_bytes, PAGE_CACHE_MASK))
  913. nfs_pageio_reset_read_mds(pgio);
  914. else
  915. pnfs_generic_pg_init_read(pgio, req);
  916. }
  917. static void
  918. bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  919. {
  920. if (!bl_check_alignment(req->wb_offset, req->wb_bytes, PAGE_CACHE_MASK))
  921. nfs_pageio_reset_write_mds(pgio);
  922. else
  923. pnfs_generic_pg_init_write(pgio, req);
  924. }
  925. static const struct nfs_pageio_ops bl_pg_read_ops = {
  926. .pg_init = bl_pg_init_read,
  927. .pg_test = pnfs_generic_pg_test,
  928. .pg_doio = pnfs_generic_pg_readpages,
  929. };
  930. static const struct nfs_pageio_ops bl_pg_write_ops = {
  931. .pg_init = bl_pg_init_write,
  932. .pg_test = pnfs_generic_pg_test,
  933. .pg_doio = pnfs_generic_pg_writepages,
  934. };
  935. static struct pnfs_layoutdriver_type blocklayout_type = {
  936. .id = LAYOUT_BLOCK_VOLUME,
  937. .name = "LAYOUT_BLOCK_VOLUME",
  938. .read_pagelist = bl_read_pagelist,
  939. .write_pagelist = bl_write_pagelist,
  940. .alloc_layout_hdr = bl_alloc_layout_hdr,
  941. .free_layout_hdr = bl_free_layout_hdr,
  942. .alloc_lseg = bl_alloc_lseg,
  943. .free_lseg = bl_free_lseg,
  944. .encode_layoutcommit = bl_encode_layoutcommit,
  945. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  946. .set_layoutdriver = bl_set_layoutdriver,
  947. .clear_layoutdriver = bl_clear_layoutdriver,
  948. .pg_read_ops = &bl_pg_read_ops,
  949. .pg_write_ops = &bl_pg_write_ops,
  950. };
  951. static const struct rpc_pipe_ops bl_upcall_ops = {
  952. .upcall = rpc_pipe_generic_upcall,
  953. .downcall = bl_pipe_downcall,
  954. .destroy_msg = bl_pipe_destroy_msg,
  955. };
  956. static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
  957. struct rpc_pipe *pipe)
  958. {
  959. struct dentry *dir, *dentry;
  960. dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
  961. if (dir == NULL)
  962. return ERR_PTR(-ENOENT);
  963. dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
  964. dput(dir);
  965. return dentry;
  966. }
  967. static void nfs4blocklayout_unregister_sb(struct super_block *sb,
  968. struct rpc_pipe *pipe)
  969. {
  970. if (pipe->dentry)
  971. rpc_unlink(pipe->dentry);
  972. }
  973. static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
  974. void *ptr)
  975. {
  976. struct super_block *sb = ptr;
  977. struct net *net = sb->s_fs_info;
  978. struct nfs_net *nn = net_generic(net, nfs_net_id);
  979. struct dentry *dentry;
  980. int ret = 0;
  981. if (!try_module_get(THIS_MODULE))
  982. return 0;
  983. if (nn->bl_device_pipe == NULL) {
  984. module_put(THIS_MODULE);
  985. return 0;
  986. }
  987. switch (event) {
  988. case RPC_PIPEFS_MOUNT:
  989. dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
  990. if (IS_ERR(dentry)) {
  991. ret = PTR_ERR(dentry);
  992. break;
  993. }
  994. nn->bl_device_pipe->dentry = dentry;
  995. break;
  996. case RPC_PIPEFS_UMOUNT:
  997. if (nn->bl_device_pipe->dentry)
  998. nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
  999. break;
  1000. default:
  1001. ret = -ENOTSUPP;
  1002. break;
  1003. }
  1004. module_put(THIS_MODULE);
  1005. return ret;
  1006. }
  1007. static struct notifier_block nfs4blocklayout_block = {
  1008. .notifier_call = rpc_pipefs_event,
  1009. };
  1010. static struct dentry *nfs4blocklayout_register_net(struct net *net,
  1011. struct rpc_pipe *pipe)
  1012. {
  1013. struct super_block *pipefs_sb;
  1014. struct dentry *dentry;
  1015. pipefs_sb = rpc_get_sb_net(net);
  1016. if (!pipefs_sb)
  1017. return NULL;
  1018. dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
  1019. rpc_put_sb_net(net);
  1020. return dentry;
  1021. }
  1022. static void nfs4blocklayout_unregister_net(struct net *net,
  1023. struct rpc_pipe *pipe)
  1024. {
  1025. struct super_block *pipefs_sb;
  1026. pipefs_sb = rpc_get_sb_net(net);
  1027. if (pipefs_sb) {
  1028. nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
  1029. rpc_put_sb_net(net);
  1030. }
  1031. }
  1032. static int nfs4blocklayout_net_init(struct net *net)
  1033. {
  1034. struct nfs_net *nn = net_generic(net, nfs_net_id);
  1035. struct dentry *dentry;
  1036. init_waitqueue_head(&nn->bl_wq);
  1037. nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
  1038. if (IS_ERR(nn->bl_device_pipe))
  1039. return PTR_ERR(nn->bl_device_pipe);
  1040. dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
  1041. if (IS_ERR(dentry)) {
  1042. rpc_destroy_pipe_data(nn->bl_device_pipe);
  1043. return PTR_ERR(dentry);
  1044. }
  1045. nn->bl_device_pipe->dentry = dentry;
  1046. return 0;
  1047. }
  1048. static void nfs4blocklayout_net_exit(struct net *net)
  1049. {
  1050. struct nfs_net *nn = net_generic(net, nfs_net_id);
  1051. nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
  1052. rpc_destroy_pipe_data(nn->bl_device_pipe);
  1053. nn->bl_device_pipe = NULL;
  1054. }
  1055. static struct pernet_operations nfs4blocklayout_net_ops = {
  1056. .init = nfs4blocklayout_net_init,
  1057. .exit = nfs4blocklayout_net_exit,
  1058. };
  1059. static int __init nfs4blocklayout_init(void)
  1060. {
  1061. int ret;
  1062. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  1063. ret = pnfs_register_layoutdriver(&blocklayout_type);
  1064. if (ret)
  1065. goto out;
  1066. ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
  1067. if (ret)
  1068. goto out_remove;
  1069. ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
  1070. if (ret)
  1071. goto out_notifier;
  1072. out:
  1073. return ret;
  1074. out_notifier:
  1075. rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
  1076. out_remove:
  1077. pnfs_unregister_layoutdriver(&blocklayout_type);
  1078. return ret;
  1079. }
  1080. static void __exit nfs4blocklayout_exit(void)
  1081. {
  1082. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  1083. __func__);
  1084. rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
  1085. unregister_pernet_subsys(&nfs4blocklayout_net_ops);
  1086. pnfs_unregister_layoutdriver(&blocklayout_type);
  1087. }
  1088. MODULE_ALIAS("nfs-layouttype4-3");
  1089. module_init(nfs4blocklayout_init);
  1090. module_exit(nfs4blocklayout_exit);