inode.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * Copyrights for code taken from ext2:
  8. * Copyright (C) 1992, 1993, 1994, 1995
  9. * Remy Card (card@masi.ibp.fr)
  10. * Laboratoire MASI - Institut Blaise Pascal
  11. * Universite Pierre et Marie Curie (Paris VI)
  12. * from
  13. * linux/fs/minix/inode.c
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is part of exofs.
  17. *
  18. * exofs is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation. Since it is based on ext2, and the only
  21. * valid version of GPL for the Linux kernel is version 2, the only valid
  22. * version of GPL for exofs is version 2.
  23. *
  24. * exofs is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with exofs; if not, write to the Free Software
  31. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  32. */
  33. #include <linux/slab.h>
  34. #include "exofs.h"
  35. #define EXOFS_DBGMSG2(M...) do {} while (0)
  36. enum { BIO_MAX_PAGES_KMALLOC =
  37. (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
  38. MAX_PAGES_KMALLOC =
  39. PAGE_SIZE / sizeof(struct page *),
  40. };
  41. unsigned exofs_max_io_pages(struct ore_layout *layout,
  42. unsigned expected_pages)
  43. {
  44. unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
  45. /* TODO: easily support bio chaining */
  46. pages = min_t(unsigned, pages,
  47. layout->group_width * BIO_MAX_PAGES_KMALLOC);
  48. return pages;
  49. }
  50. struct page_collect {
  51. struct exofs_sb_info *sbi;
  52. struct inode *inode;
  53. unsigned expected_pages;
  54. struct ore_io_state *ios;
  55. struct page **pages;
  56. unsigned alloc_pages;
  57. unsigned nr_pages;
  58. unsigned long length;
  59. loff_t pg_first; /* keep 64bit also in 32-arches */
  60. bool read_4_write; /* This means two things: that the read is sync
  61. * And the pages should not be unlocked.
  62. */
  63. };
  64. static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
  65. struct inode *inode)
  66. {
  67. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  68. pcol->sbi = sbi;
  69. pcol->inode = inode;
  70. pcol->expected_pages = expected_pages;
  71. pcol->ios = NULL;
  72. pcol->pages = NULL;
  73. pcol->alloc_pages = 0;
  74. pcol->nr_pages = 0;
  75. pcol->length = 0;
  76. pcol->pg_first = -1;
  77. pcol->read_4_write = false;
  78. }
  79. static void _pcol_reset(struct page_collect *pcol)
  80. {
  81. pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
  82. pcol->pages = NULL;
  83. pcol->alloc_pages = 0;
  84. pcol->nr_pages = 0;
  85. pcol->length = 0;
  86. pcol->pg_first = -1;
  87. pcol->ios = NULL;
  88. /* this is probably the end of the loop but in writes
  89. * it might not end here. don't be left with nothing
  90. */
  91. if (!pcol->expected_pages)
  92. pcol->expected_pages = MAX_PAGES_KMALLOC;
  93. }
  94. static int pcol_try_alloc(struct page_collect *pcol)
  95. {
  96. unsigned pages;
  97. /* TODO: easily support bio chaining */
  98. pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
  99. for (; pages; pages >>= 1) {
  100. pcol->pages = kmalloc(pages * sizeof(struct page *),
  101. GFP_KERNEL);
  102. if (likely(pcol->pages)) {
  103. pcol->alloc_pages = pages;
  104. return 0;
  105. }
  106. }
  107. EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
  108. pcol->expected_pages);
  109. return -ENOMEM;
  110. }
  111. static void pcol_free(struct page_collect *pcol)
  112. {
  113. kfree(pcol->pages);
  114. pcol->pages = NULL;
  115. if (pcol->ios) {
  116. ore_put_io_state(pcol->ios);
  117. pcol->ios = NULL;
  118. }
  119. }
  120. static int pcol_add_page(struct page_collect *pcol, struct page *page,
  121. unsigned len)
  122. {
  123. if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
  124. return -ENOMEM;
  125. pcol->pages[pcol->nr_pages++] = page;
  126. pcol->length += len;
  127. return 0;
  128. }
  129. static int update_read_page(struct page *page, int ret)
  130. {
  131. if (ret == 0) {
  132. /* Everything is OK */
  133. SetPageUptodate(page);
  134. if (PageError(page))
  135. ClearPageError(page);
  136. } else if (ret == -EFAULT) {
  137. /* In this case we were trying to read something that wasn't on
  138. * disk yet - return a page full of zeroes. This should be OK,
  139. * because the object should be empty (if there was a write
  140. * before this read, the read would be waiting with the page
  141. * locked */
  142. clear_highpage(page);
  143. SetPageUptodate(page);
  144. if (PageError(page))
  145. ClearPageError(page);
  146. ret = 0; /* recovered error */
  147. EXOFS_DBGMSG("recovered read error\n");
  148. } else /* Error */
  149. SetPageError(page);
  150. return ret;
  151. }
  152. static void update_write_page(struct page *page, int ret)
  153. {
  154. if (ret) {
  155. mapping_set_error(page->mapping, ret);
  156. SetPageError(page);
  157. }
  158. end_page_writeback(page);
  159. }
  160. /* Called at the end of reads, to optionally unlock pages and update their
  161. * status.
  162. */
  163. static int __readpages_done(struct page_collect *pcol)
  164. {
  165. int i;
  166. u64 resid;
  167. u64 good_bytes;
  168. u64 length = 0;
  169. int ret = ore_check_io(pcol->ios, &resid);
  170. if (likely(!ret))
  171. good_bytes = pcol->length;
  172. else
  173. good_bytes = pcol->length - resid;
  174. EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
  175. " length=0x%lx nr_pages=%u\n",
  176. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  177. pcol->nr_pages);
  178. for (i = 0; i < pcol->nr_pages; i++) {
  179. struct page *page = pcol->pages[i];
  180. struct inode *inode = page->mapping->host;
  181. int page_stat;
  182. if (inode != pcol->inode)
  183. continue; /* osd might add more pages at end */
  184. if (likely(length < good_bytes))
  185. page_stat = 0;
  186. else
  187. page_stat = ret;
  188. EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
  189. inode->i_ino, page->index,
  190. page_stat ? "bad_bytes" : "good_bytes");
  191. ret = update_read_page(page, page_stat);
  192. if (!pcol->read_4_write)
  193. unlock_page(page);
  194. length += PAGE_SIZE;
  195. }
  196. pcol_free(pcol);
  197. EXOFS_DBGMSG2("readpages_done END\n");
  198. return ret;
  199. }
  200. /* callback of async reads */
  201. static void readpages_done(struct ore_io_state *ios, void *p)
  202. {
  203. struct page_collect *pcol = p;
  204. __readpages_done(pcol);
  205. atomic_dec(&pcol->sbi->s_curr_pending);
  206. kfree(pcol);
  207. }
  208. static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
  209. {
  210. int i;
  211. for (i = 0; i < pcol->nr_pages; i++) {
  212. struct page *page = pcol->pages[i];
  213. if (rw == READ)
  214. update_read_page(page, ret);
  215. else
  216. update_write_page(page, ret);
  217. unlock_page(page);
  218. }
  219. }
  220. static int read_exec(struct page_collect *pcol)
  221. {
  222. struct exofs_i_info *oi = exofs_i(pcol->inode);
  223. struct ore_io_state *ios;
  224. struct page_collect *pcol_copy = NULL;
  225. int ret;
  226. if (!pcol->pages)
  227. return 0;
  228. if (!pcol->ios) {
  229. int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, true,
  230. pcol->pg_first << PAGE_CACHE_SHIFT,
  231. pcol->length, &pcol->ios);
  232. if (ret)
  233. return ret;
  234. }
  235. ios = pcol->ios;
  236. ios->pages = pcol->pages;
  237. ios->nr_pages = pcol->nr_pages;
  238. if (pcol->read_4_write) {
  239. ore_read(pcol->ios);
  240. return __readpages_done(pcol);
  241. }
  242. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  243. if (!pcol_copy) {
  244. ret = -ENOMEM;
  245. goto err;
  246. }
  247. *pcol_copy = *pcol;
  248. ios->done = readpages_done;
  249. ios->private = pcol_copy;
  250. ret = ore_read(ios);
  251. if (unlikely(ret))
  252. goto err;
  253. atomic_inc(&pcol->sbi->s_curr_pending);
  254. EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
  255. oi->one_comp.obj.id, _LLU(ios->offset), pcol->length);
  256. /* pages ownership was passed to pcol_copy */
  257. _pcol_reset(pcol);
  258. return 0;
  259. err:
  260. if (!pcol->read_4_write)
  261. _unlock_pcol_pages(pcol, ret, READ);
  262. pcol_free(pcol);
  263. kfree(pcol_copy);
  264. return ret;
  265. }
  266. /* readpage_strip is called either directly from readpage() or by the VFS from
  267. * within read_cache_pages(), to add one more page to be read. It will try to
  268. * collect as many contiguous pages as posible. If a discontinuity is
  269. * encountered, or it runs out of resources, it will submit the previous segment
  270. * and will start a new collection. Eventually caller must submit the last
  271. * segment if present.
  272. */
  273. static int readpage_strip(void *data, struct page *page)
  274. {
  275. struct page_collect *pcol = data;
  276. struct inode *inode = pcol->inode;
  277. struct exofs_i_info *oi = exofs_i(inode);
  278. loff_t i_size = i_size_read(inode);
  279. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  280. size_t len;
  281. int ret;
  282. /* FIXME: Just for debugging, will be removed */
  283. if (PageUptodate(page))
  284. EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
  285. page->index);
  286. if (page->index < end_index)
  287. len = PAGE_CACHE_SIZE;
  288. else if (page->index == end_index)
  289. len = i_size & ~PAGE_CACHE_MASK;
  290. else
  291. len = 0;
  292. if (!len || !obj_created(oi)) {
  293. /* this will be out of bounds, or doesn't exist yet.
  294. * Current page is cleared and the request is split
  295. */
  296. clear_highpage(page);
  297. SetPageUptodate(page);
  298. if (PageError(page))
  299. ClearPageError(page);
  300. if (!pcol->read_4_write)
  301. unlock_page(page);
  302. EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
  303. "read_4_write=%d index=0x%lx end_index=0x%lx "
  304. "splitting\n", inode->i_ino, len,
  305. pcol->read_4_write, page->index, end_index);
  306. return read_exec(pcol);
  307. }
  308. try_again:
  309. if (unlikely(pcol->pg_first == -1)) {
  310. pcol->pg_first = page->index;
  311. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  312. page->index)) {
  313. /* Discontinuity detected, split the request */
  314. ret = read_exec(pcol);
  315. if (unlikely(ret))
  316. goto fail;
  317. goto try_again;
  318. }
  319. if (!pcol->pages) {
  320. ret = pcol_try_alloc(pcol);
  321. if (unlikely(ret))
  322. goto fail;
  323. }
  324. if (len != PAGE_CACHE_SIZE)
  325. zero_user(page, len, PAGE_CACHE_SIZE - len);
  326. EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  327. inode->i_ino, page->index, len);
  328. ret = pcol_add_page(pcol, page, len);
  329. if (ret) {
  330. EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
  331. "this_len=0x%zx nr_pages=%u length=0x%lx\n",
  332. page, len, pcol->nr_pages, pcol->length);
  333. /* split the request, and start again with current page */
  334. ret = read_exec(pcol);
  335. if (unlikely(ret))
  336. goto fail;
  337. goto try_again;
  338. }
  339. return 0;
  340. fail:
  341. /* SetPageError(page); ??? */
  342. unlock_page(page);
  343. return ret;
  344. }
  345. static int exofs_readpages(struct file *file, struct address_space *mapping,
  346. struct list_head *pages, unsigned nr_pages)
  347. {
  348. struct page_collect pcol;
  349. int ret;
  350. _pcol_init(&pcol, nr_pages, mapping->host);
  351. ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
  352. if (ret) {
  353. EXOFS_ERR("read_cache_pages => %d\n", ret);
  354. return ret;
  355. }
  356. return read_exec(&pcol);
  357. }
  358. static int _readpage(struct page *page, bool read_4_write)
  359. {
  360. struct page_collect pcol;
  361. int ret;
  362. _pcol_init(&pcol, 1, page->mapping->host);
  363. pcol.read_4_write = read_4_write;
  364. ret = readpage_strip(&pcol, page);
  365. if (ret) {
  366. EXOFS_ERR("_readpage => %d\n", ret);
  367. return ret;
  368. }
  369. return read_exec(&pcol);
  370. }
  371. /*
  372. * We don't need the file
  373. */
  374. static int exofs_readpage(struct file *file, struct page *page)
  375. {
  376. return _readpage(page, false);
  377. }
  378. /* Callback for osd_write. All writes are asynchronous */
  379. static void writepages_done(struct ore_io_state *ios, void *p)
  380. {
  381. struct page_collect *pcol = p;
  382. int i;
  383. u64 resid;
  384. u64 good_bytes;
  385. u64 length = 0;
  386. int ret = ore_check_io(ios, &resid);
  387. atomic_dec(&pcol->sbi->s_curr_pending);
  388. if (likely(!ret))
  389. good_bytes = pcol->length;
  390. else
  391. good_bytes = pcol->length - resid;
  392. EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
  393. " length=0x%lx nr_pages=%u\n",
  394. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  395. pcol->nr_pages);
  396. for (i = 0; i < pcol->nr_pages; i++) {
  397. struct page *page = pcol->pages[i];
  398. struct inode *inode = page->mapping->host;
  399. int page_stat;
  400. if (inode != pcol->inode)
  401. continue; /* osd might add more pages to a bio */
  402. if (likely(length < good_bytes))
  403. page_stat = 0;
  404. else
  405. page_stat = ret;
  406. update_write_page(page, page_stat);
  407. unlock_page(page);
  408. EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
  409. inode->i_ino, page->index, page_stat);
  410. length += PAGE_SIZE;
  411. }
  412. pcol_free(pcol);
  413. kfree(pcol);
  414. EXOFS_DBGMSG2("writepages_done END\n");
  415. }
  416. static int write_exec(struct page_collect *pcol)
  417. {
  418. struct exofs_i_info *oi = exofs_i(pcol->inode);
  419. struct ore_io_state *ios;
  420. struct page_collect *pcol_copy = NULL;
  421. int ret;
  422. if (!pcol->pages)
  423. return 0;
  424. BUG_ON(pcol->ios);
  425. ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, false,
  426. pcol->pg_first << PAGE_CACHE_SHIFT,
  427. pcol->length, &pcol->ios);
  428. if (unlikely(ret))
  429. goto err;
  430. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  431. if (!pcol_copy) {
  432. EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
  433. ret = -ENOMEM;
  434. goto err;
  435. }
  436. *pcol_copy = *pcol;
  437. ios = pcol->ios;
  438. ios->pages = pcol_copy->pages;
  439. ios->nr_pages = pcol_copy->nr_pages;
  440. ios->done = writepages_done;
  441. ios->private = pcol_copy;
  442. ret = ore_write(ios);
  443. if (unlikely(ret)) {
  444. EXOFS_ERR("write_exec: ore_write() Failed\n");
  445. goto err;
  446. }
  447. atomic_inc(&pcol->sbi->s_curr_pending);
  448. EXOFS_DBGMSG2("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
  449. pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset),
  450. pcol->length);
  451. /* pages ownership was passed to pcol_copy */
  452. _pcol_reset(pcol);
  453. return 0;
  454. err:
  455. _unlock_pcol_pages(pcol, ret, WRITE);
  456. pcol_free(pcol);
  457. kfree(pcol_copy);
  458. return ret;
  459. }
  460. /* writepage_strip is called either directly from writepage() or by the VFS from
  461. * within write_cache_pages(), to add one more page to be written to storage.
  462. * It will try to collect as many contiguous pages as possible. If a
  463. * discontinuity is encountered or it runs out of resources it will submit the
  464. * previous segment and will start a new collection.
  465. * Eventually caller must submit the last segment if present.
  466. */
  467. static int writepage_strip(struct page *page,
  468. struct writeback_control *wbc_unused, void *data)
  469. {
  470. struct page_collect *pcol = data;
  471. struct inode *inode = pcol->inode;
  472. struct exofs_i_info *oi = exofs_i(inode);
  473. loff_t i_size = i_size_read(inode);
  474. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  475. size_t len;
  476. int ret;
  477. BUG_ON(!PageLocked(page));
  478. ret = wait_obj_created(oi);
  479. if (unlikely(ret))
  480. goto fail;
  481. if (page->index < end_index)
  482. /* in this case, the page is within the limits of the file */
  483. len = PAGE_CACHE_SIZE;
  484. else {
  485. len = i_size & ~PAGE_CACHE_MASK;
  486. if (page->index > end_index || !len) {
  487. /* in this case, the page is outside the limits
  488. * (truncate in progress)
  489. */
  490. ret = write_exec(pcol);
  491. if (unlikely(ret))
  492. goto fail;
  493. if (PageError(page))
  494. ClearPageError(page);
  495. unlock_page(page);
  496. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
  497. "outside the limits\n",
  498. inode->i_ino, page->index);
  499. return 0;
  500. }
  501. }
  502. try_again:
  503. if (unlikely(pcol->pg_first == -1)) {
  504. pcol->pg_first = page->index;
  505. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  506. page->index)) {
  507. /* Discontinuity detected, split the request */
  508. ret = write_exec(pcol);
  509. if (unlikely(ret))
  510. goto fail;
  511. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
  512. inode->i_ino, page->index);
  513. goto try_again;
  514. }
  515. if (!pcol->pages) {
  516. ret = pcol_try_alloc(pcol);
  517. if (unlikely(ret))
  518. goto fail;
  519. }
  520. EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  521. inode->i_ino, page->index, len);
  522. ret = pcol_add_page(pcol, page, len);
  523. if (unlikely(ret)) {
  524. EXOFS_DBGMSG2("Failed pcol_add_page "
  525. "nr_pages=%u total_length=0x%lx\n",
  526. pcol->nr_pages, pcol->length);
  527. /* split the request, next loop will start again */
  528. ret = write_exec(pcol);
  529. if (unlikely(ret)) {
  530. EXOFS_DBGMSG("write_exec failed => %d", ret);
  531. goto fail;
  532. }
  533. goto try_again;
  534. }
  535. BUG_ON(PageWriteback(page));
  536. set_page_writeback(page);
  537. return 0;
  538. fail:
  539. EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
  540. inode->i_ino, page->index, ret);
  541. set_bit(AS_EIO, &page->mapping->flags);
  542. unlock_page(page);
  543. return ret;
  544. }
  545. static int exofs_writepages(struct address_space *mapping,
  546. struct writeback_control *wbc)
  547. {
  548. struct page_collect pcol;
  549. long start, end, expected_pages;
  550. int ret;
  551. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  552. end = (wbc->range_end == LLONG_MAX) ?
  553. start + mapping->nrpages :
  554. wbc->range_end >> PAGE_CACHE_SHIFT;
  555. if (start || end)
  556. expected_pages = end - start + 1;
  557. else
  558. expected_pages = mapping->nrpages;
  559. if (expected_pages < 32L)
  560. expected_pages = 32L;
  561. EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
  562. "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
  563. mapping->host->i_ino, wbc->range_start, wbc->range_end,
  564. mapping->nrpages, start, end, expected_pages);
  565. _pcol_init(&pcol, expected_pages, mapping->host);
  566. ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
  567. if (ret) {
  568. EXOFS_ERR("write_cache_pages => %d\n", ret);
  569. return ret;
  570. }
  571. return write_exec(&pcol);
  572. }
  573. static int exofs_writepage(struct page *page, struct writeback_control *wbc)
  574. {
  575. struct page_collect pcol;
  576. int ret;
  577. _pcol_init(&pcol, 1, page->mapping->host);
  578. ret = writepage_strip(page, NULL, &pcol);
  579. if (ret) {
  580. EXOFS_ERR("exofs_writepage => %d\n", ret);
  581. return ret;
  582. }
  583. return write_exec(&pcol);
  584. }
  585. /* i_mutex held using inode->i_size directly */
  586. static void _write_failed(struct inode *inode, loff_t to)
  587. {
  588. if (to > inode->i_size)
  589. truncate_pagecache(inode, to, inode->i_size);
  590. }
  591. int exofs_write_begin(struct file *file, struct address_space *mapping,
  592. loff_t pos, unsigned len, unsigned flags,
  593. struct page **pagep, void **fsdata)
  594. {
  595. int ret = 0;
  596. struct page *page;
  597. page = *pagep;
  598. if (page == NULL) {
  599. ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
  600. fsdata);
  601. if (ret) {
  602. EXOFS_DBGMSG("simple_write_begin failed\n");
  603. goto out;
  604. }
  605. page = *pagep;
  606. }
  607. /* read modify write */
  608. if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
  609. loff_t i_size = i_size_read(mapping->host);
  610. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  611. size_t rlen;
  612. if (page->index < end_index)
  613. rlen = PAGE_CACHE_SIZE;
  614. else if (page->index == end_index)
  615. rlen = i_size & ~PAGE_CACHE_MASK;
  616. else
  617. rlen = 0;
  618. if (!rlen) {
  619. clear_highpage(page);
  620. SetPageUptodate(page);
  621. goto out;
  622. }
  623. ret = _readpage(page, true);
  624. if (ret) {
  625. /*SetPageError was done by _readpage. Is it ok?*/
  626. unlock_page(page);
  627. EXOFS_DBGMSG("__readpage failed\n");
  628. }
  629. }
  630. out:
  631. if (unlikely(ret))
  632. _write_failed(mapping->host, pos + len);
  633. return ret;
  634. }
  635. static int exofs_write_begin_export(struct file *file,
  636. struct address_space *mapping,
  637. loff_t pos, unsigned len, unsigned flags,
  638. struct page **pagep, void **fsdata)
  639. {
  640. *pagep = NULL;
  641. return exofs_write_begin(file, mapping, pos, len, flags, pagep,
  642. fsdata);
  643. }
  644. static int exofs_write_end(struct file *file, struct address_space *mapping,
  645. loff_t pos, unsigned len, unsigned copied,
  646. struct page *page, void *fsdata)
  647. {
  648. struct inode *inode = mapping->host;
  649. /* According to comment in simple_write_end i_mutex is held */
  650. loff_t i_size = inode->i_size;
  651. int ret;
  652. ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
  653. if (unlikely(ret))
  654. _write_failed(inode, pos + len);
  655. /* TODO: once simple_write_end marks inode dirty remove */
  656. if (i_size != inode->i_size)
  657. mark_inode_dirty(inode);
  658. return ret;
  659. }
  660. static int exofs_releasepage(struct page *page, gfp_t gfp)
  661. {
  662. EXOFS_DBGMSG("page 0x%lx\n", page->index);
  663. WARN_ON(1);
  664. return 0;
  665. }
  666. static void exofs_invalidatepage(struct page *page, unsigned long offset)
  667. {
  668. EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
  669. WARN_ON(1);
  670. }
  671. const struct address_space_operations exofs_aops = {
  672. .readpage = exofs_readpage,
  673. .readpages = exofs_readpages,
  674. .writepage = exofs_writepage,
  675. .writepages = exofs_writepages,
  676. .write_begin = exofs_write_begin_export,
  677. .write_end = exofs_write_end,
  678. .releasepage = exofs_releasepage,
  679. .set_page_dirty = __set_page_dirty_nobuffers,
  680. .invalidatepage = exofs_invalidatepage,
  681. /* Not implemented Yet */
  682. .bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
  683. .direct_IO = NULL, /* TODO: Should be trivial to do */
  684. /* With these NULL has special meaning or default is not exported */
  685. .get_xip_mem = NULL,
  686. .migratepage = NULL,
  687. .launder_page = NULL,
  688. .is_partially_uptodate = NULL,
  689. .error_remove_page = NULL,
  690. };
  691. /******************************************************************************
  692. * INODE OPERATIONS
  693. *****************************************************************************/
  694. /*
  695. * Test whether an inode is a fast symlink.
  696. */
  697. static inline int exofs_inode_is_fast_symlink(struct inode *inode)
  698. {
  699. struct exofs_i_info *oi = exofs_i(inode);
  700. return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
  701. }
  702. static int _do_truncate(struct inode *inode, loff_t newsize)
  703. {
  704. struct exofs_i_info *oi = exofs_i(inode);
  705. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  706. int ret;
  707. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  708. ret = ore_truncate(&sbi->layout, &oi->comps, (u64)newsize);
  709. if (likely(!ret))
  710. truncate_setsize(inode, newsize);
  711. EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
  712. inode->i_ino, newsize, ret);
  713. return ret;
  714. }
  715. /*
  716. * Set inode attributes - update size attribute on OSD if needed,
  717. * otherwise just call generic functions.
  718. */
  719. int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
  720. {
  721. struct inode *inode = dentry->d_inode;
  722. int error;
  723. /* if we are about to modify an object, and it hasn't been
  724. * created yet, wait
  725. */
  726. error = wait_obj_created(exofs_i(inode));
  727. if (unlikely(error))
  728. return error;
  729. error = inode_change_ok(inode, iattr);
  730. if (unlikely(error))
  731. return error;
  732. if ((iattr->ia_valid & ATTR_SIZE) &&
  733. iattr->ia_size != i_size_read(inode)) {
  734. error = _do_truncate(inode, iattr->ia_size);
  735. if (unlikely(error))
  736. return error;
  737. }
  738. setattr_copy(inode, iattr);
  739. mark_inode_dirty(inode);
  740. return 0;
  741. }
  742. static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
  743. EXOFS_APAGE_FS_DATA,
  744. EXOFS_ATTR_INODE_FILE_LAYOUT,
  745. 0);
  746. static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
  747. EXOFS_APAGE_FS_DATA,
  748. EXOFS_ATTR_INODE_DIR_LAYOUT,
  749. 0);
  750. /*
  751. * Read the Linux inode info from the OSD, and return it as is. In exofs the
  752. * inode info is in an application specific page/attribute of the osd-object.
  753. */
  754. static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
  755. struct exofs_fcb *inode)
  756. {
  757. struct exofs_sb_info *sbi = sb->s_fs_info;
  758. struct osd_attr attrs[] = {
  759. [0] = g_attr_inode_data,
  760. [1] = g_attr_inode_file_layout,
  761. [2] = g_attr_inode_dir_layout,
  762. };
  763. struct ore_io_state *ios;
  764. struct exofs_on_disk_inode_layout *layout;
  765. int ret;
  766. ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
  767. if (unlikely(ret)) {
  768. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  769. return ret;
  770. }
  771. attrs[1].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
  772. attrs[2].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
  773. ios->in_attr = attrs;
  774. ios->in_attr_len = ARRAY_SIZE(attrs);
  775. ret = ore_read(ios);
  776. if (unlikely(ret)) {
  777. EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
  778. _LLU(oi->one_comp.obj.id), ret);
  779. memset(inode, 0, sizeof(*inode));
  780. inode->i_mode = 0040000 | (0777 & ~022);
  781. /* If object is lost on target we might as well enable it's
  782. * delete.
  783. */
  784. if ((ret == -ENOENT) || (ret == -EINVAL))
  785. ret = 0;
  786. goto out;
  787. }
  788. ret = extract_attr_from_ios(ios, &attrs[0]);
  789. if (ret) {
  790. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  791. goto out;
  792. }
  793. WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
  794. memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
  795. ret = extract_attr_from_ios(ios, &attrs[1]);
  796. if (ret) {
  797. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  798. goto out;
  799. }
  800. if (attrs[1].len) {
  801. layout = attrs[1].val_ptr;
  802. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  803. EXOFS_ERR("%s: unsupported files layout %d\n",
  804. __func__, layout->gen_func);
  805. ret = -ENOTSUPP;
  806. goto out;
  807. }
  808. }
  809. ret = extract_attr_from_ios(ios, &attrs[2]);
  810. if (ret) {
  811. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  812. goto out;
  813. }
  814. if (attrs[2].len) {
  815. layout = attrs[2].val_ptr;
  816. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  817. EXOFS_ERR("%s: unsupported meta-data layout %d\n",
  818. __func__, layout->gen_func);
  819. ret = -ENOTSUPP;
  820. goto out;
  821. }
  822. }
  823. out:
  824. ore_put_io_state(ios);
  825. return ret;
  826. }
  827. static void __oi_init(struct exofs_i_info *oi)
  828. {
  829. init_waitqueue_head(&oi->i_wq);
  830. oi->i_flags = 0;
  831. }
  832. /*
  833. * Fill in an inode read from the OSD and set it up for use
  834. */
  835. struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
  836. {
  837. struct exofs_i_info *oi;
  838. struct exofs_fcb fcb;
  839. struct inode *inode;
  840. int ret;
  841. inode = iget_locked(sb, ino);
  842. if (!inode)
  843. return ERR_PTR(-ENOMEM);
  844. if (!(inode->i_state & I_NEW))
  845. return inode;
  846. oi = exofs_i(inode);
  847. __oi_init(oi);
  848. exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
  849. exofs_oi_objno(oi));
  850. /* read the inode from the osd */
  851. ret = exofs_get_inode(sb, oi, &fcb);
  852. if (ret)
  853. goto bad_inode;
  854. set_obj_created(oi);
  855. /* copy stuff from on-disk struct to in-memory struct */
  856. inode->i_mode = le16_to_cpu(fcb.i_mode);
  857. inode->i_uid = le32_to_cpu(fcb.i_uid);
  858. inode->i_gid = le32_to_cpu(fcb.i_gid);
  859. inode->i_nlink = le16_to_cpu(fcb.i_links_count);
  860. inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
  861. inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
  862. inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
  863. inode->i_ctime.tv_nsec =
  864. inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
  865. oi->i_commit_size = le64_to_cpu(fcb.i_size);
  866. i_size_write(inode, oi->i_commit_size);
  867. inode->i_blkbits = EXOFS_BLKSHIFT;
  868. inode->i_generation = le32_to_cpu(fcb.i_generation);
  869. oi->i_dir_start_lookup = 0;
  870. if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
  871. ret = -ESTALE;
  872. goto bad_inode;
  873. }
  874. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  875. if (fcb.i_data[0])
  876. inode->i_rdev =
  877. old_decode_dev(le32_to_cpu(fcb.i_data[0]));
  878. else
  879. inode->i_rdev =
  880. new_decode_dev(le32_to_cpu(fcb.i_data[1]));
  881. } else {
  882. memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
  883. }
  884. inode->i_mapping->backing_dev_info = sb->s_bdi;
  885. if (S_ISREG(inode->i_mode)) {
  886. inode->i_op = &exofs_file_inode_operations;
  887. inode->i_fop = &exofs_file_operations;
  888. inode->i_mapping->a_ops = &exofs_aops;
  889. } else if (S_ISDIR(inode->i_mode)) {
  890. inode->i_op = &exofs_dir_inode_operations;
  891. inode->i_fop = &exofs_dir_operations;
  892. inode->i_mapping->a_ops = &exofs_aops;
  893. } else if (S_ISLNK(inode->i_mode)) {
  894. if (exofs_inode_is_fast_symlink(inode))
  895. inode->i_op = &exofs_fast_symlink_inode_operations;
  896. else {
  897. inode->i_op = &exofs_symlink_inode_operations;
  898. inode->i_mapping->a_ops = &exofs_aops;
  899. }
  900. } else {
  901. inode->i_op = &exofs_special_inode_operations;
  902. if (fcb.i_data[0])
  903. init_special_inode(inode, inode->i_mode,
  904. old_decode_dev(le32_to_cpu(fcb.i_data[0])));
  905. else
  906. init_special_inode(inode, inode->i_mode,
  907. new_decode_dev(le32_to_cpu(fcb.i_data[1])));
  908. }
  909. unlock_new_inode(inode);
  910. return inode;
  911. bad_inode:
  912. iget_failed(inode);
  913. return ERR_PTR(ret);
  914. }
  915. int __exofs_wait_obj_created(struct exofs_i_info *oi)
  916. {
  917. if (!obj_created(oi)) {
  918. EXOFS_DBGMSG("!obj_created\n");
  919. BUG_ON(!obj_2bcreated(oi));
  920. wait_event(oi->i_wq, obj_created(oi));
  921. EXOFS_DBGMSG("wait_event done\n");
  922. }
  923. return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
  924. }
  925. /*
  926. * Callback function from exofs_new_inode(). The important thing is that we
  927. * set the obj_created flag so that other methods know that the object exists on
  928. * the OSD.
  929. */
  930. static void create_done(struct ore_io_state *ios, void *p)
  931. {
  932. struct inode *inode = p;
  933. struct exofs_i_info *oi = exofs_i(inode);
  934. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  935. int ret;
  936. ret = ore_check_io(ios, NULL);
  937. ore_put_io_state(ios);
  938. atomic_dec(&sbi->s_curr_pending);
  939. if (unlikely(ret)) {
  940. EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
  941. _LLU(exofs_oi_objno(oi)),
  942. _LLU(oi->one_comp.obj.partition));
  943. /*TODO: When FS is corrupted creation can fail, object already
  944. * exist. Get rid of this asynchronous creation, if exist
  945. * increment the obj counter and try the next object. Until we
  946. * succeed. All these dangling objects will be made into lost
  947. * files by chkfs.exofs
  948. */
  949. }
  950. set_obj_created(oi);
  951. wake_up(&oi->i_wq);
  952. }
  953. /*
  954. * Set up a new inode and create an object for it on the OSD
  955. */
  956. struct inode *exofs_new_inode(struct inode *dir, int mode)
  957. {
  958. struct super_block *sb = dir->i_sb;
  959. struct exofs_sb_info *sbi = sb->s_fs_info;
  960. struct inode *inode;
  961. struct exofs_i_info *oi;
  962. struct ore_io_state *ios;
  963. int ret;
  964. inode = new_inode(sb);
  965. if (!inode)
  966. return ERR_PTR(-ENOMEM);
  967. oi = exofs_i(inode);
  968. __oi_init(oi);
  969. set_obj_2bcreated(oi);
  970. inode->i_mapping->backing_dev_info = sb->s_bdi;
  971. inode_init_owner(inode, dir, mode);
  972. inode->i_ino = sbi->s_nextid++;
  973. inode->i_blkbits = EXOFS_BLKSHIFT;
  974. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  975. oi->i_commit_size = inode->i_size = 0;
  976. spin_lock(&sbi->s_next_gen_lock);
  977. inode->i_generation = sbi->s_next_generation++;
  978. spin_unlock(&sbi->s_next_gen_lock);
  979. insert_inode_hash(inode);
  980. exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
  981. exofs_oi_objno(oi));
  982. exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
  983. mark_inode_dirty(inode);
  984. ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
  985. if (unlikely(ret)) {
  986. EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
  987. return ERR_PTR(ret);
  988. }
  989. ios->done = create_done;
  990. ios->private = inode;
  991. ret = ore_create(ios);
  992. if (ret) {
  993. ore_put_io_state(ios);
  994. return ERR_PTR(ret);
  995. }
  996. atomic_inc(&sbi->s_curr_pending);
  997. return inode;
  998. }
  999. /*
  1000. * struct to pass two arguments to update_inode's callback
  1001. */
  1002. struct updatei_args {
  1003. struct exofs_sb_info *sbi;
  1004. struct exofs_fcb fcb;
  1005. };
  1006. /*
  1007. * Callback function from exofs_update_inode().
  1008. */
  1009. static void updatei_done(struct ore_io_state *ios, void *p)
  1010. {
  1011. struct updatei_args *args = p;
  1012. ore_put_io_state(ios);
  1013. atomic_dec(&args->sbi->s_curr_pending);
  1014. kfree(args);
  1015. }
  1016. /*
  1017. * Write the inode to the OSD. Just fill up the struct, and set the attribute
  1018. * synchronously or asynchronously depending on the do_sync flag.
  1019. */
  1020. static int exofs_update_inode(struct inode *inode, int do_sync)
  1021. {
  1022. struct exofs_i_info *oi = exofs_i(inode);
  1023. struct super_block *sb = inode->i_sb;
  1024. struct exofs_sb_info *sbi = sb->s_fs_info;
  1025. struct ore_io_state *ios;
  1026. struct osd_attr attr;
  1027. struct exofs_fcb *fcb;
  1028. struct updatei_args *args;
  1029. int ret;
  1030. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1031. if (!args) {
  1032. EXOFS_DBGMSG("Failed kzalloc of args\n");
  1033. return -ENOMEM;
  1034. }
  1035. fcb = &args->fcb;
  1036. fcb->i_mode = cpu_to_le16(inode->i_mode);
  1037. fcb->i_uid = cpu_to_le32(inode->i_uid);
  1038. fcb->i_gid = cpu_to_le32(inode->i_gid);
  1039. fcb->i_links_count = cpu_to_le16(inode->i_nlink);
  1040. fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
  1041. fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
  1042. fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
  1043. oi->i_commit_size = i_size_read(inode);
  1044. fcb->i_size = cpu_to_le64(oi->i_commit_size);
  1045. fcb->i_generation = cpu_to_le32(inode->i_generation);
  1046. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  1047. if (old_valid_dev(inode->i_rdev)) {
  1048. fcb->i_data[0] =
  1049. cpu_to_le32(old_encode_dev(inode->i_rdev));
  1050. fcb->i_data[1] = 0;
  1051. } else {
  1052. fcb->i_data[0] = 0;
  1053. fcb->i_data[1] =
  1054. cpu_to_le32(new_encode_dev(inode->i_rdev));
  1055. fcb->i_data[2] = 0;
  1056. }
  1057. } else
  1058. memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
  1059. ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
  1060. if (unlikely(ret)) {
  1061. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  1062. goto free_args;
  1063. }
  1064. attr = g_attr_inode_data;
  1065. attr.val_ptr = fcb;
  1066. ios->out_attr_len = 1;
  1067. ios->out_attr = &attr;
  1068. wait_obj_created(oi);
  1069. if (!do_sync) {
  1070. args->sbi = sbi;
  1071. ios->done = updatei_done;
  1072. ios->private = args;
  1073. }
  1074. ret = ore_write(ios);
  1075. if (!do_sync && !ret) {
  1076. atomic_inc(&sbi->s_curr_pending);
  1077. goto out; /* deallocation in updatei_done */
  1078. }
  1079. ore_put_io_state(ios);
  1080. free_args:
  1081. kfree(args);
  1082. out:
  1083. EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
  1084. inode->i_ino, do_sync, ret);
  1085. return ret;
  1086. }
  1087. int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
  1088. {
  1089. /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
  1090. return exofs_update_inode(inode, 1);
  1091. }
  1092. /*
  1093. * Callback function from exofs_delete_inode() - don't have much cleaning up to
  1094. * do.
  1095. */
  1096. static void delete_done(struct ore_io_state *ios, void *p)
  1097. {
  1098. struct exofs_sb_info *sbi = p;
  1099. ore_put_io_state(ios);
  1100. atomic_dec(&sbi->s_curr_pending);
  1101. }
  1102. /*
  1103. * Called when the refcount of an inode reaches zero. We remove the object
  1104. * from the OSD here. We make sure the object was created before we try and
  1105. * delete it.
  1106. */
  1107. void exofs_evict_inode(struct inode *inode)
  1108. {
  1109. struct exofs_i_info *oi = exofs_i(inode);
  1110. struct super_block *sb = inode->i_sb;
  1111. struct exofs_sb_info *sbi = sb->s_fs_info;
  1112. struct ore_io_state *ios;
  1113. int ret;
  1114. truncate_inode_pages(&inode->i_data, 0);
  1115. /* TODO: should do better here */
  1116. if (inode->i_nlink || is_bad_inode(inode))
  1117. goto no_delete;
  1118. inode->i_size = 0;
  1119. end_writeback(inode);
  1120. /* if we are deleting an obj that hasn't been created yet, wait.
  1121. * This also makes sure that create_done cannot be called with an
  1122. * already evicted inode.
  1123. */
  1124. wait_obj_created(oi);
  1125. /* ignore the error, attempt a remove anyway */
  1126. /* Now Remove the OSD objects */
  1127. ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
  1128. if (unlikely(ret)) {
  1129. EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
  1130. return;
  1131. }
  1132. ios->done = delete_done;
  1133. ios->private = sbi;
  1134. ret = ore_remove(ios);
  1135. if (ret) {
  1136. EXOFS_ERR("%s: ore_remove failed\n", __func__);
  1137. ore_put_io_state(ios);
  1138. return;
  1139. }
  1140. atomic_inc(&sbi->s_curr_pending);
  1141. return;
  1142. no_delete:
  1143. end_writeback(inode);
  1144. }