inode.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * Copyrights for code taken from ext2:
  8. * Copyright (C) 1992, 1993, 1994, 1995
  9. * Remy Card (card@masi.ibp.fr)
  10. * Laboratoire MASI - Institut Blaise Pascal
  11. * Universite Pierre et Marie Curie (Paris VI)
  12. * from
  13. * linux/fs/minix/inode.c
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is part of exofs.
  17. *
  18. * exofs is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation. Since it is based on ext2, and the only
  21. * valid version of GPL for the Linux kernel is version 2, the only valid
  22. * version of GPL for exofs is version 2.
  23. *
  24. * exofs is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with exofs; if not, write to the Free Software
  31. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  32. */
  33. #include <linux/writeback.h>
  34. #include <linux/buffer_head.h>
  35. #include <scsi/scsi_device.h>
  36. #include "exofs.h"
  37. #define EXOFS_DBGMSG2(M...) do {} while (0)
  38. enum { BIO_MAX_PAGES_KMALLOC =
  39. (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
  40. };
  41. struct page_collect {
  42. struct exofs_sb_info *sbi;
  43. struct request_queue *req_q;
  44. struct inode *inode;
  45. unsigned expected_pages;
  46. struct exofs_io_state *ios;
  47. struct bio *bio;
  48. unsigned nr_pages;
  49. unsigned long length;
  50. loff_t pg_first; /* keep 64bit also in 32-arches */
  51. };
  52. static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
  53. struct inode *inode)
  54. {
  55. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  56. pcol->sbi = sbi;
  57. /* Create master bios on first Q, later on cloning, each clone will be
  58. * allocated on it's destination Q
  59. */
  60. pcol->req_q = osd_request_queue(sbi->s_ods[0]);
  61. pcol->inode = inode;
  62. pcol->expected_pages = expected_pages;
  63. pcol->ios = NULL;
  64. pcol->bio = NULL;
  65. pcol->nr_pages = 0;
  66. pcol->length = 0;
  67. pcol->pg_first = -1;
  68. }
  69. static void _pcol_reset(struct page_collect *pcol)
  70. {
  71. pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
  72. pcol->bio = NULL;
  73. pcol->nr_pages = 0;
  74. pcol->length = 0;
  75. pcol->pg_first = -1;
  76. pcol->ios = NULL;
  77. /* this is probably the end of the loop but in writes
  78. * it might not end here. don't be left with nothing
  79. */
  80. if (!pcol->expected_pages)
  81. pcol->expected_pages = BIO_MAX_PAGES_KMALLOC;
  82. }
  83. static int pcol_try_alloc(struct page_collect *pcol)
  84. {
  85. int pages = min_t(unsigned, pcol->expected_pages,
  86. BIO_MAX_PAGES_KMALLOC);
  87. if (!pcol->ios) { /* First time allocate io_state */
  88. int ret = exofs_get_io_state(pcol->sbi, &pcol->ios);
  89. if (ret)
  90. return ret;
  91. }
  92. for (; pages; pages >>= 1) {
  93. pcol->bio = bio_kmalloc(GFP_KERNEL, pages);
  94. if (likely(pcol->bio))
  95. return 0;
  96. }
  97. EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n",
  98. pcol->expected_pages);
  99. return -ENOMEM;
  100. }
  101. static void pcol_free(struct page_collect *pcol)
  102. {
  103. if (pcol->bio) {
  104. bio_put(pcol->bio);
  105. pcol->bio = NULL;
  106. }
  107. if (pcol->ios) {
  108. exofs_put_io_state(pcol->ios);
  109. pcol->ios = NULL;
  110. }
  111. }
  112. static int pcol_add_page(struct page_collect *pcol, struct page *page,
  113. unsigned len)
  114. {
  115. int added_len = bio_add_pc_page(pcol->req_q, pcol->bio, page, len, 0);
  116. if (unlikely(len != added_len))
  117. return -ENOMEM;
  118. ++pcol->nr_pages;
  119. pcol->length += len;
  120. return 0;
  121. }
  122. static int update_read_page(struct page *page, int ret)
  123. {
  124. if (ret == 0) {
  125. /* Everything is OK */
  126. SetPageUptodate(page);
  127. if (PageError(page))
  128. ClearPageError(page);
  129. } else if (ret == -EFAULT) {
  130. /* In this case we were trying to read something that wasn't on
  131. * disk yet - return a page full of zeroes. This should be OK,
  132. * because the object should be empty (if there was a write
  133. * before this read, the read would be waiting with the page
  134. * locked */
  135. clear_highpage(page);
  136. SetPageUptodate(page);
  137. if (PageError(page))
  138. ClearPageError(page);
  139. ret = 0; /* recovered error */
  140. EXOFS_DBGMSG("recovered read error\n");
  141. } else /* Error */
  142. SetPageError(page);
  143. return ret;
  144. }
  145. static void update_write_page(struct page *page, int ret)
  146. {
  147. if (ret) {
  148. mapping_set_error(page->mapping, ret);
  149. SetPageError(page);
  150. }
  151. end_page_writeback(page);
  152. }
  153. /* Called at the end of reads, to optionally unlock pages and update their
  154. * status.
  155. */
  156. static int __readpages_done(struct page_collect *pcol, bool do_unlock)
  157. {
  158. struct bio_vec *bvec;
  159. int i;
  160. u64 resid;
  161. u64 good_bytes;
  162. u64 length = 0;
  163. int ret = exofs_check_io(pcol->ios, &resid);
  164. if (likely(!ret))
  165. good_bytes = pcol->length;
  166. else
  167. good_bytes = pcol->length - resid;
  168. EXOFS_DBGMSG("readpages_done(0x%lx) good_bytes=0x%llx"
  169. " length=0x%lx nr_pages=%u\n",
  170. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  171. pcol->nr_pages);
  172. __bio_for_each_segment(bvec, pcol->bio, i, 0) {
  173. struct page *page = bvec->bv_page;
  174. struct inode *inode = page->mapping->host;
  175. int page_stat;
  176. if (inode != pcol->inode)
  177. continue; /* osd might add more pages at end */
  178. if (likely(length < good_bytes))
  179. page_stat = 0;
  180. else
  181. page_stat = ret;
  182. EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
  183. inode->i_ino, page->index,
  184. page_stat ? "bad_bytes" : "good_bytes");
  185. ret = update_read_page(page, page_stat);
  186. if (do_unlock)
  187. unlock_page(page);
  188. length += bvec->bv_len;
  189. }
  190. pcol_free(pcol);
  191. EXOFS_DBGMSG("readpages_done END\n");
  192. return ret;
  193. }
  194. /* callback of async reads */
  195. static void readpages_done(struct exofs_io_state *ios, void *p)
  196. {
  197. struct page_collect *pcol = p;
  198. __readpages_done(pcol, true);
  199. atomic_dec(&pcol->sbi->s_curr_pending);
  200. kfree(pcol);
  201. }
  202. static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
  203. {
  204. struct bio_vec *bvec;
  205. int i;
  206. __bio_for_each_segment(bvec, pcol->bio, i, 0) {
  207. struct page *page = bvec->bv_page;
  208. if (rw == READ)
  209. update_read_page(page, ret);
  210. else
  211. update_write_page(page, ret);
  212. unlock_page(page);
  213. }
  214. }
  215. static int read_exec(struct page_collect *pcol, bool is_sync)
  216. {
  217. struct exofs_i_info *oi = exofs_i(pcol->inode);
  218. struct exofs_io_state *ios = pcol->ios;
  219. struct page_collect *pcol_copy = NULL;
  220. int ret;
  221. if (!pcol->bio)
  222. return 0;
  223. /* see comment in _readpage() about sync reads */
  224. WARN_ON(is_sync && (pcol->nr_pages != 1));
  225. ios->bio = pcol->bio;
  226. ios->length = pcol->length;
  227. ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
  228. if (is_sync) {
  229. exofs_oi_read(oi, pcol->ios);
  230. return __readpages_done(pcol, false);
  231. }
  232. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  233. if (!pcol_copy) {
  234. ret = -ENOMEM;
  235. goto err;
  236. }
  237. *pcol_copy = *pcol;
  238. ios->done = readpages_done;
  239. ios->private = pcol_copy;
  240. ret = exofs_oi_read(oi, ios);
  241. if (unlikely(ret))
  242. goto err;
  243. atomic_inc(&pcol->sbi->s_curr_pending);
  244. EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
  245. ios->obj.id, _LLU(ios->offset), pcol->length);
  246. /* pages ownership was passed to pcol_copy */
  247. _pcol_reset(pcol);
  248. return 0;
  249. err:
  250. if (!is_sync)
  251. _unlock_pcol_pages(pcol, ret, READ);
  252. pcol_free(pcol);
  253. kfree(pcol_copy);
  254. return ret;
  255. }
  256. /* readpage_strip is called either directly from readpage() or by the VFS from
  257. * within read_cache_pages(), to add one more page to be read. It will try to
  258. * collect as many contiguous pages as posible. If a discontinuity is
  259. * encountered, or it runs out of resources, it will submit the previous segment
  260. * and will start a new collection. Eventually caller must submit the last
  261. * segment if present.
  262. */
  263. static int readpage_strip(void *data, struct page *page)
  264. {
  265. struct page_collect *pcol = data;
  266. struct inode *inode = pcol->inode;
  267. struct exofs_i_info *oi = exofs_i(inode);
  268. loff_t i_size = i_size_read(inode);
  269. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  270. size_t len;
  271. int ret;
  272. /* FIXME: Just for debugging, will be removed */
  273. if (PageUptodate(page))
  274. EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
  275. page->index);
  276. if (page->index < end_index)
  277. len = PAGE_CACHE_SIZE;
  278. else if (page->index == end_index)
  279. len = i_size & ~PAGE_CACHE_MASK;
  280. else
  281. len = 0;
  282. if (!len || !obj_created(oi)) {
  283. /* this will be out of bounds, or doesn't exist yet.
  284. * Current page is cleared and the request is split
  285. */
  286. clear_highpage(page);
  287. SetPageUptodate(page);
  288. if (PageError(page))
  289. ClearPageError(page);
  290. unlock_page(page);
  291. EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
  292. " splitting\n", inode->i_ino, page->index);
  293. return read_exec(pcol, false);
  294. }
  295. try_again:
  296. if (unlikely(pcol->pg_first == -1)) {
  297. pcol->pg_first = page->index;
  298. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  299. page->index)) {
  300. /* Discontinuity detected, split the request */
  301. ret = read_exec(pcol, false);
  302. if (unlikely(ret))
  303. goto fail;
  304. goto try_again;
  305. }
  306. if (!pcol->bio) {
  307. ret = pcol_try_alloc(pcol);
  308. if (unlikely(ret))
  309. goto fail;
  310. }
  311. if (len != PAGE_CACHE_SIZE)
  312. zero_user(page, len, PAGE_CACHE_SIZE - len);
  313. EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  314. inode->i_ino, page->index, len);
  315. ret = pcol_add_page(pcol, page, len);
  316. if (ret) {
  317. EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
  318. "this_len=0x%zx nr_pages=%u length=0x%lx\n",
  319. page, len, pcol->nr_pages, pcol->length);
  320. /* split the request, and start again with current page */
  321. ret = read_exec(pcol, false);
  322. if (unlikely(ret))
  323. goto fail;
  324. goto try_again;
  325. }
  326. return 0;
  327. fail:
  328. /* SetPageError(page); ??? */
  329. unlock_page(page);
  330. return ret;
  331. }
  332. static int exofs_readpages(struct file *file, struct address_space *mapping,
  333. struct list_head *pages, unsigned nr_pages)
  334. {
  335. struct page_collect pcol;
  336. int ret;
  337. _pcol_init(&pcol, nr_pages, mapping->host);
  338. ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
  339. if (ret) {
  340. EXOFS_ERR("read_cache_pages => %d\n", ret);
  341. return ret;
  342. }
  343. return read_exec(&pcol, false);
  344. }
  345. static int _readpage(struct page *page, bool is_sync)
  346. {
  347. struct page_collect pcol;
  348. int ret;
  349. _pcol_init(&pcol, 1, page->mapping->host);
  350. /* readpage_strip might call read_exec(,is_sync==false) at several
  351. * places but not if we have a single page.
  352. */
  353. ret = readpage_strip(&pcol, page);
  354. if (ret) {
  355. EXOFS_ERR("_readpage => %d\n", ret);
  356. return ret;
  357. }
  358. return read_exec(&pcol, is_sync);
  359. }
  360. /*
  361. * We don't need the file
  362. */
  363. static int exofs_readpage(struct file *file, struct page *page)
  364. {
  365. return _readpage(page, false);
  366. }
  367. /* Callback for osd_write. All writes are asynchronous */
  368. static void writepages_done(struct exofs_io_state *ios, void *p)
  369. {
  370. struct page_collect *pcol = p;
  371. struct bio_vec *bvec;
  372. int i;
  373. u64 resid;
  374. u64 good_bytes;
  375. u64 length = 0;
  376. int ret = exofs_check_io(ios, &resid);
  377. atomic_dec(&pcol->sbi->s_curr_pending);
  378. if (likely(!ret))
  379. good_bytes = pcol->length;
  380. else
  381. good_bytes = pcol->length - resid;
  382. EXOFS_DBGMSG("writepages_done(0x%lx) good_bytes=0x%llx"
  383. " length=0x%lx nr_pages=%u\n",
  384. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  385. pcol->nr_pages);
  386. __bio_for_each_segment(bvec, pcol->bio, i, 0) {
  387. struct page *page = bvec->bv_page;
  388. struct inode *inode = page->mapping->host;
  389. int page_stat;
  390. if (inode != pcol->inode)
  391. continue; /* osd might add more pages to a bio */
  392. if (likely(length < good_bytes))
  393. page_stat = 0;
  394. else
  395. page_stat = ret;
  396. update_write_page(page, page_stat);
  397. unlock_page(page);
  398. EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
  399. inode->i_ino, page->index, page_stat);
  400. length += bvec->bv_len;
  401. }
  402. pcol_free(pcol);
  403. kfree(pcol);
  404. EXOFS_DBGMSG("writepages_done END\n");
  405. }
  406. static int write_exec(struct page_collect *pcol)
  407. {
  408. struct exofs_i_info *oi = exofs_i(pcol->inode);
  409. struct exofs_io_state *ios = pcol->ios;
  410. struct page_collect *pcol_copy = NULL;
  411. int ret;
  412. if (!pcol->bio)
  413. return 0;
  414. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  415. if (!pcol_copy) {
  416. EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n");
  417. ret = -ENOMEM;
  418. goto err;
  419. }
  420. *pcol_copy = *pcol;
  421. pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
  422. ios->bio = pcol_copy->bio;
  423. ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
  424. ios->length = pcol_copy->length;
  425. ios->done = writepages_done;
  426. ios->private = pcol_copy;
  427. ret = exofs_oi_write(oi, ios);
  428. if (unlikely(ret)) {
  429. EXOFS_ERR("write_exec: exofs_oi_write() Faild\n");
  430. goto err;
  431. }
  432. atomic_inc(&pcol->sbi->s_curr_pending);
  433. EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
  434. pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
  435. pcol->length);
  436. /* pages ownership was passed to pcol_copy */
  437. _pcol_reset(pcol);
  438. return 0;
  439. err:
  440. _unlock_pcol_pages(pcol, ret, WRITE);
  441. pcol_free(pcol);
  442. kfree(pcol_copy);
  443. return ret;
  444. }
  445. /* writepage_strip is called either directly from writepage() or by the VFS from
  446. * within write_cache_pages(), to add one more page to be written to storage.
  447. * It will try to collect as many contiguous pages as possible. If a
  448. * discontinuity is encountered or it runs out of resources it will submit the
  449. * previous segment and will start a new collection.
  450. * Eventually caller must submit the last segment if present.
  451. */
  452. static int writepage_strip(struct page *page,
  453. struct writeback_control *wbc_unused, void *data)
  454. {
  455. struct page_collect *pcol = data;
  456. struct inode *inode = pcol->inode;
  457. struct exofs_i_info *oi = exofs_i(inode);
  458. loff_t i_size = i_size_read(inode);
  459. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  460. size_t len;
  461. int ret;
  462. BUG_ON(!PageLocked(page));
  463. ret = wait_obj_created(oi);
  464. if (unlikely(ret))
  465. goto fail;
  466. if (page->index < end_index)
  467. /* in this case, the page is within the limits of the file */
  468. len = PAGE_CACHE_SIZE;
  469. else {
  470. len = i_size & ~PAGE_CACHE_MASK;
  471. if (page->index > end_index || !len) {
  472. /* in this case, the page is outside the limits
  473. * (truncate in progress)
  474. */
  475. ret = write_exec(pcol);
  476. if (unlikely(ret))
  477. goto fail;
  478. if (PageError(page))
  479. ClearPageError(page);
  480. unlock_page(page);
  481. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
  482. "outside the limits\n",
  483. inode->i_ino, page->index);
  484. return 0;
  485. }
  486. }
  487. try_again:
  488. if (unlikely(pcol->pg_first == -1)) {
  489. pcol->pg_first = page->index;
  490. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  491. page->index)) {
  492. /* Discontinuity detected, split the request */
  493. ret = write_exec(pcol);
  494. if (unlikely(ret))
  495. goto fail;
  496. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
  497. inode->i_ino, page->index);
  498. goto try_again;
  499. }
  500. if (!pcol->bio) {
  501. ret = pcol_try_alloc(pcol);
  502. if (unlikely(ret))
  503. goto fail;
  504. }
  505. EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  506. inode->i_ino, page->index, len);
  507. ret = pcol_add_page(pcol, page, len);
  508. if (unlikely(ret)) {
  509. EXOFS_DBGMSG("Failed pcol_add_page "
  510. "nr_pages=%u total_length=0x%lx\n",
  511. pcol->nr_pages, pcol->length);
  512. /* split the request, next loop will start again */
  513. ret = write_exec(pcol);
  514. if (unlikely(ret)) {
  515. EXOFS_DBGMSG("write_exec faild => %d", ret);
  516. goto fail;
  517. }
  518. goto try_again;
  519. }
  520. BUG_ON(PageWriteback(page));
  521. set_page_writeback(page);
  522. return 0;
  523. fail:
  524. EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
  525. inode->i_ino, page->index, ret);
  526. set_bit(AS_EIO, &page->mapping->flags);
  527. unlock_page(page);
  528. return ret;
  529. }
  530. static int exofs_writepages(struct address_space *mapping,
  531. struct writeback_control *wbc)
  532. {
  533. struct page_collect pcol;
  534. long start, end, expected_pages;
  535. int ret;
  536. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  537. end = (wbc->range_end == LLONG_MAX) ?
  538. start + mapping->nrpages :
  539. wbc->range_end >> PAGE_CACHE_SHIFT;
  540. if (start || end)
  541. expected_pages = end - start + 1;
  542. else
  543. expected_pages = mapping->nrpages;
  544. if (expected_pages < 32L)
  545. expected_pages = 32L;
  546. EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
  547. "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
  548. mapping->host->i_ino, wbc->range_start, wbc->range_end,
  549. mapping->nrpages, start, end, expected_pages);
  550. _pcol_init(&pcol, expected_pages, mapping->host);
  551. ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
  552. if (ret) {
  553. EXOFS_ERR("write_cache_pages => %d\n", ret);
  554. return ret;
  555. }
  556. return write_exec(&pcol);
  557. }
  558. static int exofs_writepage(struct page *page, struct writeback_control *wbc)
  559. {
  560. struct page_collect pcol;
  561. int ret;
  562. _pcol_init(&pcol, 1, page->mapping->host);
  563. ret = writepage_strip(page, NULL, &pcol);
  564. if (ret) {
  565. EXOFS_ERR("exofs_writepage => %d\n", ret);
  566. return ret;
  567. }
  568. return write_exec(&pcol);
  569. }
  570. int exofs_write_begin(struct file *file, struct address_space *mapping,
  571. loff_t pos, unsigned len, unsigned flags,
  572. struct page **pagep, void **fsdata)
  573. {
  574. int ret = 0;
  575. struct page *page;
  576. page = *pagep;
  577. if (page == NULL) {
  578. ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
  579. fsdata);
  580. if (ret) {
  581. EXOFS_DBGMSG("simple_write_begin faild\n");
  582. return ret;
  583. }
  584. page = *pagep;
  585. }
  586. /* read modify write */
  587. if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
  588. ret = _readpage(page, true);
  589. if (ret) {
  590. /*SetPageError was done by _readpage. Is it ok?*/
  591. unlock_page(page);
  592. EXOFS_DBGMSG("__readpage_filler faild\n");
  593. }
  594. }
  595. return ret;
  596. }
  597. static int exofs_write_begin_export(struct file *file,
  598. struct address_space *mapping,
  599. loff_t pos, unsigned len, unsigned flags,
  600. struct page **pagep, void **fsdata)
  601. {
  602. *pagep = NULL;
  603. return exofs_write_begin(file, mapping, pos, len, flags, pagep,
  604. fsdata);
  605. }
  606. static int exofs_write_end(struct file *file, struct address_space *mapping,
  607. loff_t pos, unsigned len, unsigned copied,
  608. struct page *page, void *fsdata)
  609. {
  610. struct inode *inode = mapping->host;
  611. /* According to comment in simple_write_end i_mutex is held */
  612. loff_t i_size = inode->i_size;
  613. int ret;
  614. ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
  615. if (i_size != inode->i_size)
  616. mark_inode_dirty(inode);
  617. return ret;
  618. }
  619. const struct address_space_operations exofs_aops = {
  620. .readpage = exofs_readpage,
  621. .readpages = exofs_readpages,
  622. .writepage = exofs_writepage,
  623. .writepages = exofs_writepages,
  624. .write_begin = exofs_write_begin_export,
  625. .write_end = exofs_write_end,
  626. };
  627. /******************************************************************************
  628. * INODE OPERATIONS
  629. *****************************************************************************/
  630. /*
  631. * Test whether an inode is a fast symlink.
  632. */
  633. static inline int exofs_inode_is_fast_symlink(struct inode *inode)
  634. {
  635. struct exofs_i_info *oi = exofs_i(inode);
  636. return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
  637. }
  638. /*
  639. * get_block_t - Fill in a buffer_head
  640. * An OSD takes care of block allocation so we just fake an allocation by
  641. * putting in the inode's sector_t in the buffer_head.
  642. * TODO: What about the case of create==0 and @iblock does not exist in the
  643. * object?
  644. */
  645. static int exofs_get_block(struct inode *inode, sector_t iblock,
  646. struct buffer_head *bh_result, int create)
  647. {
  648. map_bh(bh_result, inode->i_sb, iblock);
  649. return 0;
  650. }
  651. const struct osd_attr g_attr_logical_length = ATTR_DEF(
  652. OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
  653. static int _do_truncate(struct inode *inode)
  654. {
  655. struct exofs_i_info *oi = exofs_i(inode);
  656. loff_t isize = i_size_read(inode);
  657. int ret;
  658. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  659. nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
  660. ret = exofs_oi_truncate(oi, (u64)isize);
  661. EXOFS_DBGMSG("(0x%lx) size=0x%llx\n", inode->i_ino, isize);
  662. return ret;
  663. }
  664. /*
  665. * Truncate a file to the specified size - all we have to do is set the size
  666. * attribute. We make sure the object exists first.
  667. */
  668. void exofs_truncate(struct inode *inode)
  669. {
  670. struct exofs_i_info *oi = exofs_i(inode);
  671. int ret;
  672. if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
  673. || S_ISLNK(inode->i_mode)))
  674. return;
  675. if (exofs_inode_is_fast_symlink(inode))
  676. return;
  677. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  678. return;
  679. /* if we are about to truncate an object, and it hasn't been
  680. * created yet, wait
  681. */
  682. if (unlikely(wait_obj_created(oi)))
  683. goto fail;
  684. ret = _do_truncate(inode);
  685. if (ret)
  686. goto fail;
  687. out:
  688. mark_inode_dirty(inode);
  689. return;
  690. fail:
  691. make_bad_inode(inode);
  692. goto out;
  693. }
  694. /*
  695. * Set inode attributes - just call generic functions.
  696. */
  697. int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
  698. {
  699. struct inode *inode = dentry->d_inode;
  700. int error;
  701. error = inode_change_ok(inode, iattr);
  702. if (error)
  703. return error;
  704. error = inode_setattr(inode, iattr);
  705. return error;
  706. }
  707. /*
  708. * Read an inode from the OSD, and return it as is. We also return the size
  709. * attribute in the 'obj_size' argument.
  710. */
  711. static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
  712. struct exofs_fcb *inode, uint64_t *obj_size)
  713. {
  714. struct exofs_sb_info *sbi = sb->s_fs_info;
  715. struct osd_attr attrs[2];
  716. struct exofs_io_state *ios;
  717. int ret;
  718. *obj_size = ~0;
  719. ret = exofs_get_io_state(sbi, &ios);
  720. if (unlikely(ret)) {
  721. EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
  722. return ret;
  723. }
  724. ios->obj.id = exofs_oi_objno(oi);
  725. exofs_make_credential(oi->i_cred, &ios->obj);
  726. ios->cred = oi->i_cred;
  727. attrs[0] = g_attr_inode_data;
  728. attrs[1] = g_attr_logical_length;
  729. ios->in_attr = attrs;
  730. ios->in_attr_len = ARRAY_SIZE(attrs);
  731. ret = exofs_sbi_read(ios);
  732. if (ret)
  733. goto out;
  734. ret = extract_attr_from_ios(ios, &attrs[0]);
  735. if (ret) {
  736. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  737. goto out;
  738. }
  739. WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
  740. memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
  741. ret = extract_attr_from_ios(ios, &attrs[1]);
  742. if (ret) {
  743. EXOFS_ERR("%s: extract_attr of logical_length failed\n",
  744. __func__);
  745. goto out;
  746. }
  747. *obj_size = get_unaligned_be64(attrs[1].val_ptr);
  748. out:
  749. exofs_put_io_state(ios);
  750. return ret;
  751. }
  752. static void __oi_init(struct exofs_i_info *oi)
  753. {
  754. init_waitqueue_head(&oi->i_wq);
  755. oi->i_flags = 0;
  756. }
  757. /*
  758. * Fill in an inode read from the OSD and set it up for use
  759. */
  760. struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
  761. {
  762. struct exofs_i_info *oi;
  763. struct exofs_fcb fcb;
  764. struct inode *inode;
  765. uint64_t obj_size;
  766. int ret;
  767. inode = iget_locked(sb, ino);
  768. if (!inode)
  769. return ERR_PTR(-ENOMEM);
  770. if (!(inode->i_state & I_NEW))
  771. return inode;
  772. oi = exofs_i(inode);
  773. __oi_init(oi);
  774. /* read the inode from the osd */
  775. ret = exofs_get_inode(sb, oi, &fcb, &obj_size);
  776. if (ret)
  777. goto bad_inode;
  778. set_obj_created(oi);
  779. /* copy stuff from on-disk struct to in-memory struct */
  780. inode->i_mode = le16_to_cpu(fcb.i_mode);
  781. inode->i_uid = le32_to_cpu(fcb.i_uid);
  782. inode->i_gid = le32_to_cpu(fcb.i_gid);
  783. inode->i_nlink = le16_to_cpu(fcb.i_links_count);
  784. inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
  785. inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
  786. inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
  787. inode->i_ctime.tv_nsec =
  788. inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
  789. oi->i_commit_size = le64_to_cpu(fcb.i_size);
  790. i_size_write(inode, oi->i_commit_size);
  791. inode->i_blkbits = EXOFS_BLKSHIFT;
  792. inode->i_generation = le32_to_cpu(fcb.i_generation);
  793. if ((inode->i_size != obj_size) &&
  794. (!exofs_inode_is_fast_symlink(inode))) {
  795. EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
  796. inode->i_size, _LLU(obj_size));
  797. /* FIXME: call exofs_inode_recovery() */
  798. }
  799. oi->i_dir_start_lookup = 0;
  800. if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
  801. ret = -ESTALE;
  802. goto bad_inode;
  803. }
  804. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  805. if (fcb.i_data[0])
  806. inode->i_rdev =
  807. old_decode_dev(le32_to_cpu(fcb.i_data[0]));
  808. else
  809. inode->i_rdev =
  810. new_decode_dev(le32_to_cpu(fcb.i_data[1]));
  811. } else {
  812. memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
  813. }
  814. if (S_ISREG(inode->i_mode)) {
  815. inode->i_op = &exofs_file_inode_operations;
  816. inode->i_fop = &exofs_file_operations;
  817. inode->i_mapping->a_ops = &exofs_aops;
  818. } else if (S_ISDIR(inode->i_mode)) {
  819. inode->i_op = &exofs_dir_inode_operations;
  820. inode->i_fop = &exofs_dir_operations;
  821. inode->i_mapping->a_ops = &exofs_aops;
  822. } else if (S_ISLNK(inode->i_mode)) {
  823. if (exofs_inode_is_fast_symlink(inode))
  824. inode->i_op = &exofs_fast_symlink_inode_operations;
  825. else {
  826. inode->i_op = &exofs_symlink_inode_operations;
  827. inode->i_mapping->a_ops = &exofs_aops;
  828. }
  829. } else {
  830. inode->i_op = &exofs_special_inode_operations;
  831. if (fcb.i_data[0])
  832. init_special_inode(inode, inode->i_mode,
  833. old_decode_dev(le32_to_cpu(fcb.i_data[0])));
  834. else
  835. init_special_inode(inode, inode->i_mode,
  836. new_decode_dev(le32_to_cpu(fcb.i_data[1])));
  837. }
  838. unlock_new_inode(inode);
  839. return inode;
  840. bad_inode:
  841. iget_failed(inode);
  842. return ERR_PTR(ret);
  843. }
  844. int __exofs_wait_obj_created(struct exofs_i_info *oi)
  845. {
  846. if (!obj_created(oi)) {
  847. BUG_ON(!obj_2bcreated(oi));
  848. wait_event(oi->i_wq, obj_created(oi));
  849. }
  850. return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
  851. }
  852. /*
  853. * Callback function from exofs_new_inode(). The important thing is that we
  854. * set the obj_created flag so that other methods know that the object exists on
  855. * the OSD.
  856. */
  857. static void create_done(struct exofs_io_state *ios, void *p)
  858. {
  859. struct inode *inode = p;
  860. struct exofs_i_info *oi = exofs_i(inode);
  861. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  862. int ret;
  863. ret = exofs_check_io(ios, NULL);
  864. exofs_put_io_state(ios);
  865. atomic_dec(&sbi->s_curr_pending);
  866. if (unlikely(ret)) {
  867. EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
  868. _LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid));
  869. /*TODO: When FS is corrupted creation can fail, object already
  870. * exist. Get rid of this asynchronous creation, if exist
  871. * increment the obj counter and try the next object. Until we
  872. * succeed. All these dangling objects will be made into lost
  873. * files by chkfs.exofs
  874. */
  875. }
  876. set_obj_created(oi);
  877. atomic_dec(&inode->i_count);
  878. wake_up(&oi->i_wq);
  879. }
  880. /*
  881. * Set up a new inode and create an object for it on the OSD
  882. */
  883. struct inode *exofs_new_inode(struct inode *dir, int mode)
  884. {
  885. struct super_block *sb;
  886. struct inode *inode;
  887. struct exofs_i_info *oi;
  888. struct exofs_sb_info *sbi;
  889. struct exofs_io_state *ios;
  890. int ret;
  891. sb = dir->i_sb;
  892. inode = new_inode(sb);
  893. if (!inode)
  894. return ERR_PTR(-ENOMEM);
  895. oi = exofs_i(inode);
  896. __oi_init(oi);
  897. set_obj_2bcreated(oi);
  898. sbi = sb->s_fs_info;
  899. sb->s_dirt = 1;
  900. inode->i_uid = current->cred->fsuid;
  901. if (dir->i_mode & S_ISGID) {
  902. inode->i_gid = dir->i_gid;
  903. if (S_ISDIR(mode))
  904. mode |= S_ISGID;
  905. } else {
  906. inode->i_gid = current->cred->fsgid;
  907. }
  908. inode->i_mode = mode;
  909. inode->i_ino = sbi->s_nextid++;
  910. inode->i_blkbits = EXOFS_BLKSHIFT;
  911. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  912. oi->i_commit_size = inode->i_size = 0;
  913. spin_lock(&sbi->s_next_gen_lock);
  914. inode->i_generation = sbi->s_next_generation++;
  915. spin_unlock(&sbi->s_next_gen_lock);
  916. insert_inode_hash(inode);
  917. mark_inode_dirty(inode);
  918. ret = exofs_get_io_state(sbi, &ios);
  919. if (unlikely(ret)) {
  920. EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
  921. return ERR_PTR(ret);
  922. }
  923. ios->obj.id = exofs_oi_objno(oi);
  924. exofs_make_credential(oi->i_cred, &ios->obj);
  925. /* increment the refcount so that the inode will still be around when we
  926. * reach the callback
  927. */
  928. atomic_inc(&inode->i_count);
  929. ios->done = create_done;
  930. ios->private = inode;
  931. ios->cred = oi->i_cred;
  932. ret = exofs_sbi_create(ios);
  933. if (ret) {
  934. atomic_dec(&inode->i_count);
  935. exofs_put_io_state(ios);
  936. return ERR_PTR(ret);
  937. }
  938. atomic_inc(&sbi->s_curr_pending);
  939. return inode;
  940. }
  941. /*
  942. * struct to pass two arguments to update_inode's callback
  943. */
  944. struct updatei_args {
  945. struct exofs_sb_info *sbi;
  946. struct exofs_fcb fcb;
  947. };
  948. /*
  949. * Callback function from exofs_update_inode().
  950. */
  951. static void updatei_done(struct exofs_io_state *ios, void *p)
  952. {
  953. struct updatei_args *args = p;
  954. exofs_put_io_state(ios);
  955. atomic_dec(&args->sbi->s_curr_pending);
  956. kfree(args);
  957. }
  958. /*
  959. * Write the inode to the OSD. Just fill up the struct, and set the attribute
  960. * synchronously or asynchronously depending on the do_sync flag.
  961. */
  962. static int exofs_update_inode(struct inode *inode, int do_sync)
  963. {
  964. struct exofs_i_info *oi = exofs_i(inode);
  965. struct super_block *sb = inode->i_sb;
  966. struct exofs_sb_info *sbi = sb->s_fs_info;
  967. struct exofs_io_state *ios;
  968. struct osd_attr attr;
  969. struct exofs_fcb *fcb;
  970. struct updatei_args *args;
  971. int ret;
  972. args = kzalloc(sizeof(*args), GFP_KERNEL);
  973. if (!args)
  974. return -ENOMEM;
  975. fcb = &args->fcb;
  976. fcb->i_mode = cpu_to_le16(inode->i_mode);
  977. fcb->i_uid = cpu_to_le32(inode->i_uid);
  978. fcb->i_gid = cpu_to_le32(inode->i_gid);
  979. fcb->i_links_count = cpu_to_le16(inode->i_nlink);
  980. fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
  981. fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
  982. fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
  983. oi->i_commit_size = i_size_read(inode);
  984. fcb->i_size = cpu_to_le64(oi->i_commit_size);
  985. fcb->i_generation = cpu_to_le32(inode->i_generation);
  986. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  987. if (old_valid_dev(inode->i_rdev)) {
  988. fcb->i_data[0] =
  989. cpu_to_le32(old_encode_dev(inode->i_rdev));
  990. fcb->i_data[1] = 0;
  991. } else {
  992. fcb->i_data[0] = 0;
  993. fcb->i_data[1] =
  994. cpu_to_le32(new_encode_dev(inode->i_rdev));
  995. fcb->i_data[2] = 0;
  996. }
  997. } else
  998. memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
  999. ret = exofs_get_io_state(sbi, &ios);
  1000. if (unlikely(ret)) {
  1001. EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
  1002. goto free_args;
  1003. }
  1004. attr = g_attr_inode_data;
  1005. attr.val_ptr = fcb;
  1006. ios->out_attr_len = 1;
  1007. ios->out_attr = &attr;
  1008. if (!obj_created(oi)) {
  1009. EXOFS_DBGMSG("!obj_created\n");
  1010. BUG_ON(!obj_2bcreated(oi));
  1011. wait_event(oi->i_wq, obj_created(oi));
  1012. EXOFS_DBGMSG("wait_event done\n");
  1013. }
  1014. if (!do_sync) {
  1015. args->sbi = sbi;
  1016. ios->done = updatei_done;
  1017. ios->private = args;
  1018. }
  1019. ret = exofs_oi_write(oi, ios);
  1020. if (!do_sync && !ret) {
  1021. atomic_inc(&sbi->s_curr_pending);
  1022. goto out; /* deallocation in updatei_done */
  1023. }
  1024. exofs_put_io_state(ios);
  1025. free_args:
  1026. kfree(args);
  1027. out:
  1028. EXOFS_DBGMSG("ret=>%d\n", ret);
  1029. return ret;
  1030. }
  1031. int exofs_write_inode(struct inode *inode, int wait)
  1032. {
  1033. return exofs_update_inode(inode, wait);
  1034. }
  1035. /*
  1036. * Callback function from exofs_delete_inode() - don't have much cleaning up to
  1037. * do.
  1038. */
  1039. static void delete_done(struct exofs_io_state *ios, void *p)
  1040. {
  1041. struct exofs_sb_info *sbi = p;
  1042. exofs_put_io_state(ios);
  1043. atomic_dec(&sbi->s_curr_pending);
  1044. }
  1045. /*
  1046. * Called when the refcount of an inode reaches zero. We remove the object
  1047. * from the OSD here. We make sure the object was created before we try and
  1048. * delete it.
  1049. */
  1050. void exofs_delete_inode(struct inode *inode)
  1051. {
  1052. struct exofs_i_info *oi = exofs_i(inode);
  1053. struct super_block *sb = inode->i_sb;
  1054. struct exofs_sb_info *sbi = sb->s_fs_info;
  1055. struct exofs_io_state *ios;
  1056. int ret;
  1057. truncate_inode_pages(&inode->i_data, 0);
  1058. if (is_bad_inode(inode))
  1059. goto no_delete;
  1060. mark_inode_dirty(inode);
  1061. exofs_update_inode(inode, inode_needs_sync(inode));
  1062. inode->i_size = 0;
  1063. if (inode->i_blocks)
  1064. exofs_truncate(inode);
  1065. clear_inode(inode);
  1066. ret = exofs_get_io_state(sbi, &ios);
  1067. if (unlikely(ret)) {
  1068. EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
  1069. return;
  1070. }
  1071. /* if we are deleting an obj that hasn't been created yet, wait */
  1072. if (!obj_created(oi)) {
  1073. BUG_ON(!obj_2bcreated(oi));
  1074. wait_event(oi->i_wq, obj_created(oi));
  1075. }
  1076. ios->obj.id = exofs_oi_objno(oi);
  1077. ios->done = delete_done;
  1078. ios->private = sbi;
  1079. ios->cred = oi->i_cred;
  1080. ret = exofs_sbi_remove(ios);
  1081. if (ret) {
  1082. EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__);
  1083. exofs_put_io_state(ios);
  1084. return;
  1085. }
  1086. atomic_inc(&sbi->s_curr_pending);
  1087. return;
  1088. no_delete:
  1089. clear_inode(inode);
  1090. }