inode.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * Copyrights for code taken from ext2:
  8. * Copyright (C) 1992, 1993, 1994, 1995
  9. * Remy Card (card@masi.ibp.fr)
  10. * Laboratoire MASI - Institut Blaise Pascal
  11. * Universite Pierre et Marie Curie (Paris VI)
  12. * from
  13. * linux/fs/minix/inode.c
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is part of exofs.
  17. *
  18. * exofs is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation. Since it is based on ext2, and the only
  21. * valid version of GPL for the Linux kernel is version 2, the only valid
  22. * version of GPL for exofs is version 2.
  23. *
  24. * exofs is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with exofs; if not, write to the Free Software
  31. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  32. */
  33. #include <linux/slab.h>
  34. #include "exofs.h"
  35. #define EXOFS_DBGMSG2(M...) do {} while (0)
  36. enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
  37. unsigned exofs_max_io_pages(struct ore_layout *layout,
  38. unsigned expected_pages)
  39. {
  40. unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
  41. /* TODO: easily support bio chaining */
  42. pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
  43. return pages;
  44. }
  45. struct page_collect {
  46. struct exofs_sb_info *sbi;
  47. struct inode *inode;
  48. unsigned expected_pages;
  49. struct ore_io_state *ios;
  50. struct page **pages;
  51. unsigned alloc_pages;
  52. unsigned nr_pages;
  53. unsigned long length;
  54. loff_t pg_first; /* keep 64bit also in 32-arches */
  55. bool read_4_write; /* This means two things: that the read is sync
  56. * And the pages should not be unlocked.
  57. */
  58. struct page *that_locked_page;
  59. };
  60. static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
  61. struct inode *inode)
  62. {
  63. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  64. pcol->sbi = sbi;
  65. pcol->inode = inode;
  66. pcol->expected_pages = expected_pages;
  67. pcol->ios = NULL;
  68. pcol->pages = NULL;
  69. pcol->alloc_pages = 0;
  70. pcol->nr_pages = 0;
  71. pcol->length = 0;
  72. pcol->pg_first = -1;
  73. pcol->read_4_write = false;
  74. pcol->that_locked_page = NULL;
  75. }
  76. static void _pcol_reset(struct page_collect *pcol)
  77. {
  78. pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
  79. pcol->pages = NULL;
  80. pcol->alloc_pages = 0;
  81. pcol->nr_pages = 0;
  82. pcol->length = 0;
  83. pcol->pg_first = -1;
  84. pcol->ios = NULL;
  85. pcol->that_locked_page = NULL;
  86. /* this is probably the end of the loop but in writes
  87. * it might not end here. don't be left with nothing
  88. */
  89. if (!pcol->expected_pages)
  90. pcol->expected_pages = MAX_PAGES_KMALLOC;
  91. }
  92. static int pcol_try_alloc(struct page_collect *pcol)
  93. {
  94. unsigned pages;
  95. /* TODO: easily support bio chaining */
  96. pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
  97. for (; pages; pages >>= 1) {
  98. pcol->pages = kmalloc(pages * sizeof(struct page *),
  99. GFP_KERNEL);
  100. if (likely(pcol->pages)) {
  101. pcol->alloc_pages = pages;
  102. return 0;
  103. }
  104. }
  105. EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
  106. pcol->expected_pages);
  107. return -ENOMEM;
  108. }
  109. static void pcol_free(struct page_collect *pcol)
  110. {
  111. kfree(pcol->pages);
  112. pcol->pages = NULL;
  113. if (pcol->ios) {
  114. ore_put_io_state(pcol->ios);
  115. pcol->ios = NULL;
  116. }
  117. }
  118. static int pcol_add_page(struct page_collect *pcol, struct page *page,
  119. unsigned len)
  120. {
  121. if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
  122. return -ENOMEM;
  123. pcol->pages[pcol->nr_pages++] = page;
  124. pcol->length += len;
  125. return 0;
  126. }
  127. enum {PAGE_WAS_NOT_IN_IO = 17};
  128. static int update_read_page(struct page *page, int ret)
  129. {
  130. switch (ret) {
  131. case 0:
  132. /* Everything is OK */
  133. SetPageUptodate(page);
  134. if (PageError(page))
  135. ClearPageError(page);
  136. break;
  137. case -EFAULT:
  138. /* In this case we were trying to read something that wasn't on
  139. * disk yet - return a page full of zeroes. This should be OK,
  140. * because the object should be empty (if there was a write
  141. * before this read, the read would be waiting with the page
  142. * locked */
  143. clear_highpage(page);
  144. SetPageUptodate(page);
  145. if (PageError(page))
  146. ClearPageError(page);
  147. EXOFS_DBGMSG("recovered read error\n");
  148. /* fall through */
  149. case PAGE_WAS_NOT_IN_IO:
  150. ret = 0; /* recovered error */
  151. break;
  152. default:
  153. SetPageError(page);
  154. }
  155. return ret;
  156. }
  157. static void update_write_page(struct page *page, int ret)
  158. {
  159. if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
  160. return; /* don't pass start don't collect $200 */
  161. if (ret) {
  162. mapping_set_error(page->mapping, ret);
  163. SetPageError(page);
  164. }
  165. end_page_writeback(page);
  166. }
  167. /* Called at the end of reads, to optionally unlock pages and update their
  168. * status.
  169. */
  170. static int __readpages_done(struct page_collect *pcol)
  171. {
  172. int i;
  173. u64 good_bytes;
  174. u64 length = 0;
  175. int ret = ore_check_io(pcol->ios, NULL);
  176. if (likely(!ret)) {
  177. good_bytes = pcol->length;
  178. ret = PAGE_WAS_NOT_IN_IO;
  179. } else {
  180. good_bytes = 0;
  181. }
  182. EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
  183. " length=0x%lx nr_pages=%u\n",
  184. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  185. pcol->nr_pages);
  186. for (i = 0; i < pcol->nr_pages; i++) {
  187. struct page *page = pcol->pages[i];
  188. struct inode *inode = page->mapping->host;
  189. int page_stat;
  190. if (inode != pcol->inode)
  191. continue; /* osd might add more pages at end */
  192. if (likely(length < good_bytes))
  193. page_stat = 0;
  194. else
  195. page_stat = ret;
  196. EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
  197. inode->i_ino, page->index,
  198. page_stat ? "bad_bytes" : "good_bytes");
  199. ret = update_read_page(page, page_stat);
  200. if (!pcol->read_4_write)
  201. unlock_page(page);
  202. length += PAGE_SIZE;
  203. }
  204. pcol_free(pcol);
  205. EXOFS_DBGMSG2("readpages_done END\n");
  206. return ret;
  207. }
  208. /* callback of async reads */
  209. static void readpages_done(struct ore_io_state *ios, void *p)
  210. {
  211. struct page_collect *pcol = p;
  212. __readpages_done(pcol);
  213. atomic_dec(&pcol->sbi->s_curr_pending);
  214. kfree(pcol);
  215. }
  216. static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
  217. {
  218. int i;
  219. for (i = 0; i < pcol->nr_pages; i++) {
  220. struct page *page = pcol->pages[i];
  221. if (rw == READ)
  222. update_read_page(page, ret);
  223. else
  224. update_write_page(page, ret);
  225. unlock_page(page);
  226. }
  227. }
  228. static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
  229. struct page_collect *pcol_src, struct page_collect *pcol)
  230. {
  231. /* length was wrong or offset was not page aligned */
  232. BUG_ON(pcol_src->nr_pages < ios->nr_pages);
  233. if (pcol_src->nr_pages > ios->nr_pages) {
  234. struct page **src_page;
  235. unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
  236. unsigned long len_less = pcol_src->length - ios->length;
  237. unsigned i;
  238. int ret;
  239. /* This IO was trimmed */
  240. pcol_src->nr_pages = ios->nr_pages;
  241. pcol_src->length = ios->length;
  242. /* Left over pages are passed to the next io */
  243. pcol->expected_pages += pages_less;
  244. pcol->nr_pages = pages_less;
  245. pcol->length = len_less;
  246. src_page = pcol_src->pages + pcol_src->nr_pages;
  247. pcol->pg_first = (*src_page)->index;
  248. ret = pcol_try_alloc(pcol);
  249. if (unlikely(ret))
  250. return ret;
  251. for (i = 0; i < pages_less; ++i)
  252. pcol->pages[i] = *src_page++;
  253. EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
  254. "pages_less=0x%x expected_pages=0x%x "
  255. "next_offset=0x%llx next_len=0x%lx\n",
  256. pcol_src->nr_pages, pages_less, pcol->expected_pages,
  257. pcol->pg_first * PAGE_SIZE, pcol->length);
  258. }
  259. return 0;
  260. }
  261. static int read_exec(struct page_collect *pcol)
  262. {
  263. struct exofs_i_info *oi = exofs_i(pcol->inode);
  264. struct ore_io_state *ios;
  265. struct page_collect *pcol_copy = NULL;
  266. int ret;
  267. if (!pcol->pages)
  268. return 0;
  269. if (!pcol->ios) {
  270. int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
  271. pcol->pg_first << PAGE_CACHE_SHIFT,
  272. pcol->length, &pcol->ios);
  273. if (ret)
  274. return ret;
  275. }
  276. ios = pcol->ios;
  277. ios->pages = pcol->pages;
  278. if (pcol->read_4_write) {
  279. ore_read(pcol->ios);
  280. return __readpages_done(pcol);
  281. }
  282. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  283. if (!pcol_copy) {
  284. ret = -ENOMEM;
  285. goto err;
  286. }
  287. *pcol_copy = *pcol;
  288. ios->done = readpages_done;
  289. ios->private = pcol_copy;
  290. /* pages ownership was passed to pcol_copy */
  291. _pcol_reset(pcol);
  292. ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
  293. if (unlikely(ret))
  294. goto err;
  295. EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
  296. pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
  297. ret = ore_read(ios);
  298. if (unlikely(ret))
  299. goto err;
  300. atomic_inc(&pcol->sbi->s_curr_pending);
  301. return 0;
  302. err:
  303. if (!pcol->read_4_write)
  304. _unlock_pcol_pages(pcol, ret, READ);
  305. pcol_free(pcol);
  306. kfree(pcol_copy);
  307. return ret;
  308. }
  309. /* readpage_strip is called either directly from readpage() or by the VFS from
  310. * within read_cache_pages(), to add one more page to be read. It will try to
  311. * collect as many contiguous pages as posible. If a discontinuity is
  312. * encountered, or it runs out of resources, it will submit the previous segment
  313. * and will start a new collection. Eventually caller must submit the last
  314. * segment if present.
  315. */
  316. static int readpage_strip(void *data, struct page *page)
  317. {
  318. struct page_collect *pcol = data;
  319. struct inode *inode = pcol->inode;
  320. struct exofs_i_info *oi = exofs_i(inode);
  321. loff_t i_size = i_size_read(inode);
  322. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  323. size_t len;
  324. int ret;
  325. BUG_ON(!PageLocked(page));
  326. /* FIXME: Just for debugging, will be removed */
  327. if (PageUptodate(page))
  328. EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
  329. page->index);
  330. pcol->that_locked_page = page;
  331. if (page->index < end_index)
  332. len = PAGE_CACHE_SIZE;
  333. else if (page->index == end_index)
  334. len = i_size & ~PAGE_CACHE_MASK;
  335. else
  336. len = 0;
  337. if (!len || !obj_created(oi)) {
  338. /* this will be out of bounds, or doesn't exist yet.
  339. * Current page is cleared and the request is split
  340. */
  341. clear_highpage(page);
  342. SetPageUptodate(page);
  343. if (PageError(page))
  344. ClearPageError(page);
  345. if (!pcol->read_4_write)
  346. unlock_page(page);
  347. EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
  348. "read_4_write=%d index=0x%lx end_index=0x%lx "
  349. "splitting\n", inode->i_ino, len,
  350. pcol->read_4_write, page->index, end_index);
  351. return read_exec(pcol);
  352. }
  353. try_again:
  354. if (unlikely(pcol->pg_first == -1)) {
  355. pcol->pg_first = page->index;
  356. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  357. page->index)) {
  358. /* Discontinuity detected, split the request */
  359. ret = read_exec(pcol);
  360. if (unlikely(ret))
  361. goto fail;
  362. goto try_again;
  363. }
  364. if (!pcol->pages) {
  365. ret = pcol_try_alloc(pcol);
  366. if (unlikely(ret))
  367. goto fail;
  368. }
  369. if (len != PAGE_CACHE_SIZE)
  370. zero_user(page, len, PAGE_CACHE_SIZE - len);
  371. EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  372. inode->i_ino, page->index, len);
  373. ret = pcol_add_page(pcol, page, len);
  374. if (ret) {
  375. EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
  376. "this_len=0x%zx nr_pages=%u length=0x%lx\n",
  377. page, len, pcol->nr_pages, pcol->length);
  378. /* split the request, and start again with current page */
  379. ret = read_exec(pcol);
  380. if (unlikely(ret))
  381. goto fail;
  382. goto try_again;
  383. }
  384. return 0;
  385. fail:
  386. /* SetPageError(page); ??? */
  387. unlock_page(page);
  388. return ret;
  389. }
  390. static int exofs_readpages(struct file *file, struct address_space *mapping,
  391. struct list_head *pages, unsigned nr_pages)
  392. {
  393. struct page_collect pcol;
  394. int ret;
  395. _pcol_init(&pcol, nr_pages, mapping->host);
  396. ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
  397. if (ret) {
  398. EXOFS_ERR("read_cache_pages => %d\n", ret);
  399. return ret;
  400. }
  401. ret = read_exec(&pcol);
  402. if (unlikely(ret))
  403. return ret;
  404. return read_exec(&pcol);
  405. }
  406. static int _readpage(struct page *page, bool read_4_write)
  407. {
  408. struct page_collect pcol;
  409. int ret;
  410. _pcol_init(&pcol, 1, page->mapping->host);
  411. pcol.read_4_write = read_4_write;
  412. ret = readpage_strip(&pcol, page);
  413. if (ret) {
  414. EXOFS_ERR("_readpage => %d\n", ret);
  415. return ret;
  416. }
  417. return read_exec(&pcol);
  418. }
  419. /*
  420. * We don't need the file
  421. */
  422. static int exofs_readpage(struct file *file, struct page *page)
  423. {
  424. return _readpage(page, false);
  425. }
  426. /* Callback for osd_write. All writes are asynchronous */
  427. static void writepages_done(struct ore_io_state *ios, void *p)
  428. {
  429. struct page_collect *pcol = p;
  430. int i;
  431. u64 good_bytes;
  432. u64 length = 0;
  433. int ret = ore_check_io(ios, NULL);
  434. atomic_dec(&pcol->sbi->s_curr_pending);
  435. if (likely(!ret)) {
  436. good_bytes = pcol->length;
  437. ret = PAGE_WAS_NOT_IN_IO;
  438. } else {
  439. good_bytes = 0;
  440. }
  441. EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
  442. " length=0x%lx nr_pages=%u\n",
  443. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  444. pcol->nr_pages);
  445. for (i = 0; i < pcol->nr_pages; i++) {
  446. struct page *page = pcol->pages[i];
  447. struct inode *inode = page->mapping->host;
  448. int page_stat;
  449. if (inode != pcol->inode)
  450. continue; /* osd might add more pages to a bio */
  451. if (likely(length < good_bytes))
  452. page_stat = 0;
  453. else
  454. page_stat = ret;
  455. update_write_page(page, page_stat);
  456. unlock_page(page);
  457. EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
  458. inode->i_ino, page->index, page_stat);
  459. length += PAGE_SIZE;
  460. }
  461. pcol_free(pcol);
  462. kfree(pcol);
  463. EXOFS_DBGMSG2("writepages_done END\n");
  464. }
  465. static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
  466. {
  467. struct page_collect *pcol = priv;
  468. pgoff_t index = offset / PAGE_SIZE;
  469. if (!pcol->that_locked_page ||
  470. (pcol->that_locked_page->index != index)) {
  471. struct page *page = find_get_page(pcol->inode->i_mapping, index);
  472. if (!page) {
  473. page = find_or_create_page(pcol->inode->i_mapping,
  474. index, GFP_NOFS);
  475. if (unlikely(!page)) {
  476. EXOFS_DBGMSG("grab_cache_page Failed "
  477. "index=0x%llx\n", _LLU(index));
  478. return NULL;
  479. }
  480. unlock_page(page);
  481. }
  482. if (PageDirty(page) || PageWriteback(page))
  483. *uptodate = true;
  484. else
  485. *uptodate = PageUptodate(page);
  486. EXOFS_DBGMSG("index=0x%lx uptodate=%d\n", index, *uptodate);
  487. return page;
  488. } else {
  489. EXOFS_DBGMSG("YES that_locked_page index=0x%lx\n",
  490. pcol->that_locked_page->index);
  491. *uptodate = true;
  492. return pcol->that_locked_page;
  493. }
  494. }
  495. static void __r4w_put_page(void *priv, struct page *page)
  496. {
  497. struct page_collect *pcol = priv;
  498. if (pcol->that_locked_page != page) {
  499. EXOFS_DBGMSG("index=0x%lx\n", page->index);
  500. page_cache_release(page);
  501. return;
  502. }
  503. EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
  504. }
  505. static const struct _ore_r4w_op _r4w_op = {
  506. .get_page = &__r4w_get_page,
  507. .put_page = &__r4w_put_page,
  508. };
  509. static int write_exec(struct page_collect *pcol)
  510. {
  511. struct exofs_i_info *oi = exofs_i(pcol->inode);
  512. struct ore_io_state *ios;
  513. struct page_collect *pcol_copy = NULL;
  514. int ret;
  515. if (!pcol->pages)
  516. return 0;
  517. BUG_ON(pcol->ios);
  518. ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
  519. pcol->pg_first << PAGE_CACHE_SHIFT,
  520. pcol->length, &pcol->ios);
  521. if (unlikely(ret))
  522. goto err;
  523. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  524. if (!pcol_copy) {
  525. EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
  526. ret = -ENOMEM;
  527. goto err;
  528. }
  529. *pcol_copy = *pcol;
  530. ios = pcol->ios;
  531. ios->pages = pcol_copy->pages;
  532. ios->done = writepages_done;
  533. ios->r4w = &_r4w_op;
  534. ios->private = pcol_copy;
  535. /* pages ownership was passed to pcol_copy */
  536. _pcol_reset(pcol);
  537. ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
  538. if (unlikely(ret))
  539. goto err;
  540. EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
  541. pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
  542. ret = ore_write(ios);
  543. if (unlikely(ret)) {
  544. EXOFS_ERR("write_exec: ore_write() Failed\n");
  545. goto err;
  546. }
  547. atomic_inc(&pcol->sbi->s_curr_pending);
  548. return 0;
  549. err:
  550. _unlock_pcol_pages(pcol, ret, WRITE);
  551. pcol_free(pcol);
  552. kfree(pcol_copy);
  553. return ret;
  554. }
  555. /* writepage_strip is called either directly from writepage() or by the VFS from
  556. * within write_cache_pages(), to add one more page to be written to storage.
  557. * It will try to collect as many contiguous pages as possible. If a
  558. * discontinuity is encountered or it runs out of resources it will submit the
  559. * previous segment and will start a new collection.
  560. * Eventually caller must submit the last segment if present.
  561. */
  562. static int writepage_strip(struct page *page,
  563. struct writeback_control *wbc_unused, void *data)
  564. {
  565. struct page_collect *pcol = data;
  566. struct inode *inode = pcol->inode;
  567. struct exofs_i_info *oi = exofs_i(inode);
  568. loff_t i_size = i_size_read(inode);
  569. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  570. size_t len;
  571. int ret;
  572. BUG_ON(!PageLocked(page));
  573. ret = wait_obj_created(oi);
  574. if (unlikely(ret))
  575. goto fail;
  576. if (page->index < end_index)
  577. /* in this case, the page is within the limits of the file */
  578. len = PAGE_CACHE_SIZE;
  579. else {
  580. len = i_size & ~PAGE_CACHE_MASK;
  581. if (page->index > end_index || !len) {
  582. /* in this case, the page is outside the limits
  583. * (truncate in progress)
  584. */
  585. ret = write_exec(pcol);
  586. if (unlikely(ret))
  587. goto fail;
  588. if (PageError(page))
  589. ClearPageError(page);
  590. unlock_page(page);
  591. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
  592. "outside the limits\n",
  593. inode->i_ino, page->index);
  594. return 0;
  595. }
  596. }
  597. try_again:
  598. if (unlikely(pcol->pg_first == -1)) {
  599. pcol->pg_first = page->index;
  600. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  601. page->index)) {
  602. /* Discontinuity detected, split the request */
  603. ret = write_exec(pcol);
  604. if (unlikely(ret))
  605. goto fail;
  606. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
  607. inode->i_ino, page->index);
  608. goto try_again;
  609. }
  610. if (!pcol->pages) {
  611. ret = pcol_try_alloc(pcol);
  612. if (unlikely(ret))
  613. goto fail;
  614. }
  615. EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  616. inode->i_ino, page->index, len);
  617. ret = pcol_add_page(pcol, page, len);
  618. if (unlikely(ret)) {
  619. EXOFS_DBGMSG2("Failed pcol_add_page "
  620. "nr_pages=%u total_length=0x%lx\n",
  621. pcol->nr_pages, pcol->length);
  622. /* split the request, next loop will start again */
  623. ret = write_exec(pcol);
  624. if (unlikely(ret)) {
  625. EXOFS_DBGMSG("write_exec failed => %d", ret);
  626. goto fail;
  627. }
  628. goto try_again;
  629. }
  630. BUG_ON(PageWriteback(page));
  631. set_page_writeback(page);
  632. return 0;
  633. fail:
  634. EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
  635. inode->i_ino, page->index, ret);
  636. set_bit(AS_EIO, &page->mapping->flags);
  637. unlock_page(page);
  638. return ret;
  639. }
  640. static int exofs_writepages(struct address_space *mapping,
  641. struct writeback_control *wbc)
  642. {
  643. struct page_collect pcol;
  644. long start, end, expected_pages;
  645. int ret;
  646. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  647. end = (wbc->range_end == LLONG_MAX) ?
  648. start + mapping->nrpages :
  649. wbc->range_end >> PAGE_CACHE_SHIFT;
  650. if (start || end)
  651. expected_pages = end - start + 1;
  652. else
  653. expected_pages = mapping->nrpages;
  654. if (expected_pages < 32L)
  655. expected_pages = 32L;
  656. EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
  657. "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
  658. mapping->host->i_ino, wbc->range_start, wbc->range_end,
  659. mapping->nrpages, start, end, expected_pages);
  660. _pcol_init(&pcol, expected_pages, mapping->host);
  661. ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
  662. if (unlikely(ret)) {
  663. EXOFS_ERR("write_cache_pages => %d\n", ret);
  664. return ret;
  665. }
  666. ret = write_exec(&pcol);
  667. if (unlikely(ret))
  668. return ret;
  669. if (wbc->sync_mode == WB_SYNC_ALL) {
  670. return write_exec(&pcol); /* pump the last reminder */
  671. } else if (pcol.nr_pages) {
  672. /* not SYNC let the reminder join the next writeout */
  673. unsigned i;
  674. for (i = 0; i < pcol.nr_pages; i++) {
  675. struct page *page = pcol.pages[i];
  676. end_page_writeback(page);
  677. set_page_dirty(page);
  678. unlock_page(page);
  679. }
  680. }
  681. return 0;
  682. }
  683. /*
  684. static int exofs_writepage(struct page *page, struct writeback_control *wbc)
  685. {
  686. struct page_collect pcol;
  687. int ret;
  688. _pcol_init(&pcol, 1, page->mapping->host);
  689. ret = writepage_strip(page, NULL, &pcol);
  690. if (ret) {
  691. EXOFS_ERR("exofs_writepage => %d\n", ret);
  692. return ret;
  693. }
  694. return write_exec(&pcol);
  695. }
  696. */
  697. /* i_mutex held using inode->i_size directly */
  698. static void _write_failed(struct inode *inode, loff_t to)
  699. {
  700. if (to > inode->i_size)
  701. truncate_pagecache(inode, to, inode->i_size);
  702. }
  703. int exofs_write_begin(struct file *file, struct address_space *mapping,
  704. loff_t pos, unsigned len, unsigned flags,
  705. struct page **pagep, void **fsdata)
  706. {
  707. int ret = 0;
  708. struct page *page;
  709. page = *pagep;
  710. if (page == NULL) {
  711. ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
  712. fsdata);
  713. if (ret) {
  714. EXOFS_DBGMSG("simple_write_begin failed\n");
  715. goto out;
  716. }
  717. page = *pagep;
  718. }
  719. /* read modify write */
  720. if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
  721. loff_t i_size = i_size_read(mapping->host);
  722. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  723. size_t rlen;
  724. if (page->index < end_index)
  725. rlen = PAGE_CACHE_SIZE;
  726. else if (page->index == end_index)
  727. rlen = i_size & ~PAGE_CACHE_MASK;
  728. else
  729. rlen = 0;
  730. if (!rlen) {
  731. clear_highpage(page);
  732. SetPageUptodate(page);
  733. goto out;
  734. }
  735. ret = _readpage(page, true);
  736. if (ret) {
  737. /*SetPageError was done by _readpage. Is it ok?*/
  738. unlock_page(page);
  739. EXOFS_DBGMSG("__readpage failed\n");
  740. }
  741. }
  742. out:
  743. if (unlikely(ret))
  744. _write_failed(mapping->host, pos + len);
  745. return ret;
  746. }
  747. static int exofs_write_begin_export(struct file *file,
  748. struct address_space *mapping,
  749. loff_t pos, unsigned len, unsigned flags,
  750. struct page **pagep, void **fsdata)
  751. {
  752. *pagep = NULL;
  753. return exofs_write_begin(file, mapping, pos, len, flags, pagep,
  754. fsdata);
  755. }
  756. static int exofs_write_end(struct file *file, struct address_space *mapping,
  757. loff_t pos, unsigned len, unsigned copied,
  758. struct page *page, void *fsdata)
  759. {
  760. struct inode *inode = mapping->host;
  761. /* According to comment in simple_write_end i_mutex is held */
  762. loff_t i_size = inode->i_size;
  763. int ret;
  764. ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
  765. if (unlikely(ret))
  766. _write_failed(inode, pos + len);
  767. /* TODO: once simple_write_end marks inode dirty remove */
  768. if (i_size != inode->i_size)
  769. mark_inode_dirty(inode);
  770. return ret;
  771. }
  772. static int exofs_releasepage(struct page *page, gfp_t gfp)
  773. {
  774. EXOFS_DBGMSG("page 0x%lx\n", page->index);
  775. WARN_ON(1);
  776. return 0;
  777. }
  778. static void exofs_invalidatepage(struct page *page, unsigned long offset)
  779. {
  780. EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
  781. WARN_ON(1);
  782. }
  783. const struct address_space_operations exofs_aops = {
  784. .readpage = exofs_readpage,
  785. .readpages = exofs_readpages,
  786. .writepage = NULL,
  787. .writepages = exofs_writepages,
  788. .write_begin = exofs_write_begin_export,
  789. .write_end = exofs_write_end,
  790. .releasepage = exofs_releasepage,
  791. .set_page_dirty = __set_page_dirty_nobuffers,
  792. .invalidatepage = exofs_invalidatepage,
  793. /* Not implemented Yet */
  794. .bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
  795. .direct_IO = NULL, /* TODO: Should be trivial to do */
  796. /* With these NULL has special meaning or default is not exported */
  797. .get_xip_mem = NULL,
  798. .migratepage = NULL,
  799. .launder_page = NULL,
  800. .is_partially_uptodate = NULL,
  801. .error_remove_page = NULL,
  802. };
  803. /******************************************************************************
  804. * INODE OPERATIONS
  805. *****************************************************************************/
  806. /*
  807. * Test whether an inode is a fast symlink.
  808. */
  809. static inline int exofs_inode_is_fast_symlink(struct inode *inode)
  810. {
  811. struct exofs_i_info *oi = exofs_i(inode);
  812. return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
  813. }
  814. static int _do_truncate(struct inode *inode, loff_t newsize)
  815. {
  816. struct exofs_i_info *oi = exofs_i(inode);
  817. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  818. int ret;
  819. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  820. ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
  821. if (likely(!ret))
  822. truncate_setsize(inode, newsize);
  823. EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
  824. inode->i_ino, newsize, ret);
  825. return ret;
  826. }
  827. /*
  828. * Set inode attributes - update size attribute on OSD if needed,
  829. * otherwise just call generic functions.
  830. */
  831. int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
  832. {
  833. struct inode *inode = dentry->d_inode;
  834. int error;
  835. /* if we are about to modify an object, and it hasn't been
  836. * created yet, wait
  837. */
  838. error = wait_obj_created(exofs_i(inode));
  839. if (unlikely(error))
  840. return error;
  841. error = inode_change_ok(inode, iattr);
  842. if (unlikely(error))
  843. return error;
  844. if ((iattr->ia_valid & ATTR_SIZE) &&
  845. iattr->ia_size != i_size_read(inode)) {
  846. error = _do_truncate(inode, iattr->ia_size);
  847. if (unlikely(error))
  848. return error;
  849. }
  850. setattr_copy(inode, iattr);
  851. mark_inode_dirty(inode);
  852. return 0;
  853. }
  854. static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
  855. EXOFS_APAGE_FS_DATA,
  856. EXOFS_ATTR_INODE_FILE_LAYOUT,
  857. 0);
  858. static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
  859. EXOFS_APAGE_FS_DATA,
  860. EXOFS_ATTR_INODE_DIR_LAYOUT,
  861. 0);
  862. /*
  863. * Read the Linux inode info from the OSD, and return it as is. In exofs the
  864. * inode info is in an application specific page/attribute of the osd-object.
  865. */
  866. static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
  867. struct exofs_fcb *inode)
  868. {
  869. struct exofs_sb_info *sbi = sb->s_fs_info;
  870. struct osd_attr attrs[] = {
  871. [0] = g_attr_inode_data,
  872. [1] = g_attr_inode_file_layout,
  873. [2] = g_attr_inode_dir_layout,
  874. };
  875. struct ore_io_state *ios;
  876. struct exofs_on_disk_inode_layout *layout;
  877. int ret;
  878. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  879. if (unlikely(ret)) {
  880. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  881. return ret;
  882. }
  883. attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
  884. attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
  885. ios->in_attr = attrs;
  886. ios->in_attr_len = ARRAY_SIZE(attrs);
  887. ret = ore_read(ios);
  888. if (unlikely(ret)) {
  889. EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
  890. _LLU(oi->one_comp.obj.id), ret);
  891. memset(inode, 0, sizeof(*inode));
  892. inode->i_mode = 0040000 | (0777 & ~022);
  893. /* If object is lost on target we might as well enable it's
  894. * delete.
  895. */
  896. if ((ret == -ENOENT) || (ret == -EINVAL))
  897. ret = 0;
  898. goto out;
  899. }
  900. ret = extract_attr_from_ios(ios, &attrs[0]);
  901. if (ret) {
  902. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  903. goto out;
  904. }
  905. WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
  906. memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
  907. ret = extract_attr_from_ios(ios, &attrs[1]);
  908. if (ret) {
  909. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  910. goto out;
  911. }
  912. if (attrs[1].len) {
  913. layout = attrs[1].val_ptr;
  914. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  915. EXOFS_ERR("%s: unsupported files layout %d\n",
  916. __func__, layout->gen_func);
  917. ret = -ENOTSUPP;
  918. goto out;
  919. }
  920. }
  921. ret = extract_attr_from_ios(ios, &attrs[2]);
  922. if (ret) {
  923. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  924. goto out;
  925. }
  926. if (attrs[2].len) {
  927. layout = attrs[2].val_ptr;
  928. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  929. EXOFS_ERR("%s: unsupported meta-data layout %d\n",
  930. __func__, layout->gen_func);
  931. ret = -ENOTSUPP;
  932. goto out;
  933. }
  934. }
  935. out:
  936. ore_put_io_state(ios);
  937. return ret;
  938. }
  939. static void __oi_init(struct exofs_i_info *oi)
  940. {
  941. init_waitqueue_head(&oi->i_wq);
  942. oi->i_flags = 0;
  943. }
  944. /*
  945. * Fill in an inode read from the OSD and set it up for use
  946. */
  947. struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
  948. {
  949. struct exofs_i_info *oi;
  950. struct exofs_fcb fcb;
  951. struct inode *inode;
  952. int ret;
  953. inode = iget_locked(sb, ino);
  954. if (!inode)
  955. return ERR_PTR(-ENOMEM);
  956. if (!(inode->i_state & I_NEW))
  957. return inode;
  958. oi = exofs_i(inode);
  959. __oi_init(oi);
  960. exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
  961. exofs_oi_objno(oi));
  962. /* read the inode from the osd */
  963. ret = exofs_get_inode(sb, oi, &fcb);
  964. if (ret)
  965. goto bad_inode;
  966. set_obj_created(oi);
  967. /* copy stuff from on-disk struct to in-memory struct */
  968. inode->i_mode = le16_to_cpu(fcb.i_mode);
  969. inode->i_uid = le32_to_cpu(fcb.i_uid);
  970. inode->i_gid = le32_to_cpu(fcb.i_gid);
  971. set_nlink(inode, le16_to_cpu(fcb.i_links_count));
  972. inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
  973. inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
  974. inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
  975. inode->i_ctime.tv_nsec =
  976. inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
  977. oi->i_commit_size = le64_to_cpu(fcb.i_size);
  978. i_size_write(inode, oi->i_commit_size);
  979. inode->i_blkbits = EXOFS_BLKSHIFT;
  980. inode->i_generation = le32_to_cpu(fcb.i_generation);
  981. oi->i_dir_start_lookup = 0;
  982. if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
  983. ret = -ESTALE;
  984. goto bad_inode;
  985. }
  986. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  987. if (fcb.i_data[0])
  988. inode->i_rdev =
  989. old_decode_dev(le32_to_cpu(fcb.i_data[0]));
  990. else
  991. inode->i_rdev =
  992. new_decode_dev(le32_to_cpu(fcb.i_data[1]));
  993. } else {
  994. memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
  995. }
  996. inode->i_mapping->backing_dev_info = sb->s_bdi;
  997. if (S_ISREG(inode->i_mode)) {
  998. inode->i_op = &exofs_file_inode_operations;
  999. inode->i_fop = &exofs_file_operations;
  1000. inode->i_mapping->a_ops = &exofs_aops;
  1001. } else if (S_ISDIR(inode->i_mode)) {
  1002. inode->i_op = &exofs_dir_inode_operations;
  1003. inode->i_fop = &exofs_dir_operations;
  1004. inode->i_mapping->a_ops = &exofs_aops;
  1005. } else if (S_ISLNK(inode->i_mode)) {
  1006. if (exofs_inode_is_fast_symlink(inode))
  1007. inode->i_op = &exofs_fast_symlink_inode_operations;
  1008. else {
  1009. inode->i_op = &exofs_symlink_inode_operations;
  1010. inode->i_mapping->a_ops = &exofs_aops;
  1011. }
  1012. } else {
  1013. inode->i_op = &exofs_special_inode_operations;
  1014. if (fcb.i_data[0])
  1015. init_special_inode(inode, inode->i_mode,
  1016. old_decode_dev(le32_to_cpu(fcb.i_data[0])));
  1017. else
  1018. init_special_inode(inode, inode->i_mode,
  1019. new_decode_dev(le32_to_cpu(fcb.i_data[1])));
  1020. }
  1021. unlock_new_inode(inode);
  1022. return inode;
  1023. bad_inode:
  1024. iget_failed(inode);
  1025. return ERR_PTR(ret);
  1026. }
  1027. int __exofs_wait_obj_created(struct exofs_i_info *oi)
  1028. {
  1029. if (!obj_created(oi)) {
  1030. EXOFS_DBGMSG("!obj_created\n");
  1031. BUG_ON(!obj_2bcreated(oi));
  1032. wait_event(oi->i_wq, obj_created(oi));
  1033. EXOFS_DBGMSG("wait_event done\n");
  1034. }
  1035. return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
  1036. }
  1037. /*
  1038. * Callback function from exofs_new_inode(). The important thing is that we
  1039. * set the obj_created flag so that other methods know that the object exists on
  1040. * the OSD.
  1041. */
  1042. static void create_done(struct ore_io_state *ios, void *p)
  1043. {
  1044. struct inode *inode = p;
  1045. struct exofs_i_info *oi = exofs_i(inode);
  1046. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  1047. int ret;
  1048. ret = ore_check_io(ios, NULL);
  1049. ore_put_io_state(ios);
  1050. atomic_dec(&sbi->s_curr_pending);
  1051. if (unlikely(ret)) {
  1052. EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
  1053. _LLU(exofs_oi_objno(oi)),
  1054. _LLU(oi->one_comp.obj.partition));
  1055. /*TODO: When FS is corrupted creation can fail, object already
  1056. * exist. Get rid of this asynchronous creation, if exist
  1057. * increment the obj counter and try the next object. Until we
  1058. * succeed. All these dangling objects will be made into lost
  1059. * files by chkfs.exofs
  1060. */
  1061. }
  1062. set_obj_created(oi);
  1063. wake_up(&oi->i_wq);
  1064. }
  1065. /*
  1066. * Set up a new inode and create an object for it on the OSD
  1067. */
  1068. struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
  1069. {
  1070. struct super_block *sb = dir->i_sb;
  1071. struct exofs_sb_info *sbi = sb->s_fs_info;
  1072. struct inode *inode;
  1073. struct exofs_i_info *oi;
  1074. struct ore_io_state *ios;
  1075. int ret;
  1076. inode = new_inode(sb);
  1077. if (!inode)
  1078. return ERR_PTR(-ENOMEM);
  1079. oi = exofs_i(inode);
  1080. __oi_init(oi);
  1081. set_obj_2bcreated(oi);
  1082. inode->i_mapping->backing_dev_info = sb->s_bdi;
  1083. inode_init_owner(inode, dir, mode);
  1084. inode->i_ino = sbi->s_nextid++;
  1085. inode->i_blkbits = EXOFS_BLKSHIFT;
  1086. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1087. oi->i_commit_size = inode->i_size = 0;
  1088. spin_lock(&sbi->s_next_gen_lock);
  1089. inode->i_generation = sbi->s_next_generation++;
  1090. spin_unlock(&sbi->s_next_gen_lock);
  1091. insert_inode_hash(inode);
  1092. exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
  1093. exofs_oi_objno(oi));
  1094. exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
  1095. mark_inode_dirty(inode);
  1096. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1097. if (unlikely(ret)) {
  1098. EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
  1099. return ERR_PTR(ret);
  1100. }
  1101. ios->done = create_done;
  1102. ios->private = inode;
  1103. ret = ore_create(ios);
  1104. if (ret) {
  1105. ore_put_io_state(ios);
  1106. return ERR_PTR(ret);
  1107. }
  1108. atomic_inc(&sbi->s_curr_pending);
  1109. return inode;
  1110. }
  1111. /*
  1112. * struct to pass two arguments to update_inode's callback
  1113. */
  1114. struct updatei_args {
  1115. struct exofs_sb_info *sbi;
  1116. struct exofs_fcb fcb;
  1117. };
  1118. /*
  1119. * Callback function from exofs_update_inode().
  1120. */
  1121. static void updatei_done(struct ore_io_state *ios, void *p)
  1122. {
  1123. struct updatei_args *args = p;
  1124. ore_put_io_state(ios);
  1125. atomic_dec(&args->sbi->s_curr_pending);
  1126. kfree(args);
  1127. }
  1128. /*
  1129. * Write the inode to the OSD. Just fill up the struct, and set the attribute
  1130. * synchronously or asynchronously depending on the do_sync flag.
  1131. */
  1132. static int exofs_update_inode(struct inode *inode, int do_sync)
  1133. {
  1134. struct exofs_i_info *oi = exofs_i(inode);
  1135. struct super_block *sb = inode->i_sb;
  1136. struct exofs_sb_info *sbi = sb->s_fs_info;
  1137. struct ore_io_state *ios;
  1138. struct osd_attr attr;
  1139. struct exofs_fcb *fcb;
  1140. struct updatei_args *args;
  1141. int ret;
  1142. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1143. if (!args) {
  1144. EXOFS_DBGMSG("Failed kzalloc of args\n");
  1145. return -ENOMEM;
  1146. }
  1147. fcb = &args->fcb;
  1148. fcb->i_mode = cpu_to_le16(inode->i_mode);
  1149. fcb->i_uid = cpu_to_le32(inode->i_uid);
  1150. fcb->i_gid = cpu_to_le32(inode->i_gid);
  1151. fcb->i_links_count = cpu_to_le16(inode->i_nlink);
  1152. fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
  1153. fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
  1154. fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
  1155. oi->i_commit_size = i_size_read(inode);
  1156. fcb->i_size = cpu_to_le64(oi->i_commit_size);
  1157. fcb->i_generation = cpu_to_le32(inode->i_generation);
  1158. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  1159. if (old_valid_dev(inode->i_rdev)) {
  1160. fcb->i_data[0] =
  1161. cpu_to_le32(old_encode_dev(inode->i_rdev));
  1162. fcb->i_data[1] = 0;
  1163. } else {
  1164. fcb->i_data[0] = 0;
  1165. fcb->i_data[1] =
  1166. cpu_to_le32(new_encode_dev(inode->i_rdev));
  1167. fcb->i_data[2] = 0;
  1168. }
  1169. } else
  1170. memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
  1171. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1172. if (unlikely(ret)) {
  1173. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  1174. goto free_args;
  1175. }
  1176. attr = g_attr_inode_data;
  1177. attr.val_ptr = fcb;
  1178. ios->out_attr_len = 1;
  1179. ios->out_attr = &attr;
  1180. wait_obj_created(oi);
  1181. if (!do_sync) {
  1182. args->sbi = sbi;
  1183. ios->done = updatei_done;
  1184. ios->private = args;
  1185. }
  1186. ret = ore_write(ios);
  1187. if (!do_sync && !ret) {
  1188. atomic_inc(&sbi->s_curr_pending);
  1189. goto out; /* deallocation in updatei_done */
  1190. }
  1191. ore_put_io_state(ios);
  1192. free_args:
  1193. kfree(args);
  1194. out:
  1195. EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
  1196. inode->i_ino, do_sync, ret);
  1197. return ret;
  1198. }
  1199. int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
  1200. {
  1201. /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
  1202. return exofs_update_inode(inode, 1);
  1203. }
  1204. /*
  1205. * Callback function from exofs_delete_inode() - don't have much cleaning up to
  1206. * do.
  1207. */
  1208. static void delete_done(struct ore_io_state *ios, void *p)
  1209. {
  1210. struct exofs_sb_info *sbi = p;
  1211. ore_put_io_state(ios);
  1212. atomic_dec(&sbi->s_curr_pending);
  1213. }
  1214. /*
  1215. * Called when the refcount of an inode reaches zero. We remove the object
  1216. * from the OSD here. We make sure the object was created before we try and
  1217. * delete it.
  1218. */
  1219. void exofs_evict_inode(struct inode *inode)
  1220. {
  1221. struct exofs_i_info *oi = exofs_i(inode);
  1222. struct super_block *sb = inode->i_sb;
  1223. struct exofs_sb_info *sbi = sb->s_fs_info;
  1224. struct ore_io_state *ios;
  1225. int ret;
  1226. truncate_inode_pages(&inode->i_data, 0);
  1227. /* TODO: should do better here */
  1228. if (inode->i_nlink || is_bad_inode(inode))
  1229. goto no_delete;
  1230. inode->i_size = 0;
  1231. clear_inode(inode);
  1232. /* if we are deleting an obj that hasn't been created yet, wait.
  1233. * This also makes sure that create_done cannot be called with an
  1234. * already evicted inode.
  1235. */
  1236. wait_obj_created(oi);
  1237. /* ignore the error, attempt a remove anyway */
  1238. /* Now Remove the OSD objects */
  1239. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1240. if (unlikely(ret)) {
  1241. EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
  1242. return;
  1243. }
  1244. ios->done = delete_done;
  1245. ios->private = sbi;
  1246. ret = ore_remove(ios);
  1247. if (ret) {
  1248. EXOFS_ERR("%s: ore_remove failed\n", __func__);
  1249. ore_put_io_state(ios);
  1250. return;
  1251. }
  1252. atomic_inc(&sbi->s_curr_pending);
  1253. return;
  1254. no_delete:
  1255. clear_inode(inode);
  1256. }