inode.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com)
  4. * Copyright (C) 2008, 2009
  5. * Boaz Harrosh <bharrosh@panasas.com>
  6. *
  7. * Copyrights for code taken from ext2:
  8. * Copyright (C) 1992, 1993, 1994, 1995
  9. * Remy Card (card@masi.ibp.fr)
  10. * Laboratoire MASI - Institut Blaise Pascal
  11. * Universite Pierre et Marie Curie (Paris VI)
  12. * from
  13. * linux/fs/minix/inode.c
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is part of exofs.
  17. *
  18. * exofs is free software; you can redistribute it and/or modify
  19. * it under the terms of the GNU General Public License as published by
  20. * the Free Software Foundation. Since it is based on ext2, and the only
  21. * valid version of GPL for the Linux kernel is version 2, the only valid
  22. * version of GPL for exofs is version 2.
  23. *
  24. * exofs is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with exofs; if not, write to the Free Software
  31. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  32. */
  33. #include <linux/slab.h>
  34. #include "exofs.h"
  35. #define EXOFS_DBGMSG2(M...) do {} while (0)
  36. enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
  37. unsigned exofs_max_io_pages(struct ore_layout *layout,
  38. unsigned expected_pages)
  39. {
  40. unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
  41. /* TODO: easily support bio chaining */
  42. pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
  43. return pages;
  44. }
  45. struct page_collect {
  46. struct exofs_sb_info *sbi;
  47. struct inode *inode;
  48. unsigned expected_pages;
  49. struct ore_io_state *ios;
  50. struct page **pages;
  51. unsigned alloc_pages;
  52. unsigned nr_pages;
  53. unsigned long length;
  54. loff_t pg_first; /* keep 64bit also in 32-arches */
  55. bool read_4_write; /* This means two things: that the read is sync
  56. * And the pages should not be unlocked.
  57. */
  58. };
  59. static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
  60. struct inode *inode)
  61. {
  62. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  63. pcol->sbi = sbi;
  64. pcol->inode = inode;
  65. pcol->expected_pages = expected_pages;
  66. pcol->ios = NULL;
  67. pcol->pages = NULL;
  68. pcol->alloc_pages = 0;
  69. pcol->nr_pages = 0;
  70. pcol->length = 0;
  71. pcol->pg_first = -1;
  72. pcol->read_4_write = false;
  73. }
  74. static void _pcol_reset(struct page_collect *pcol)
  75. {
  76. pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
  77. pcol->pages = NULL;
  78. pcol->alloc_pages = 0;
  79. pcol->nr_pages = 0;
  80. pcol->length = 0;
  81. pcol->pg_first = -1;
  82. pcol->ios = NULL;
  83. /* this is probably the end of the loop but in writes
  84. * it might not end here. don't be left with nothing
  85. */
  86. if (!pcol->expected_pages)
  87. pcol->expected_pages = MAX_PAGES_KMALLOC;
  88. }
  89. static int pcol_try_alloc(struct page_collect *pcol)
  90. {
  91. unsigned pages;
  92. /* TODO: easily support bio chaining */
  93. pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
  94. for (; pages; pages >>= 1) {
  95. pcol->pages = kmalloc(pages * sizeof(struct page *),
  96. GFP_KERNEL);
  97. if (likely(pcol->pages)) {
  98. pcol->alloc_pages = pages;
  99. return 0;
  100. }
  101. }
  102. EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
  103. pcol->expected_pages);
  104. return -ENOMEM;
  105. }
  106. static void pcol_free(struct page_collect *pcol)
  107. {
  108. kfree(pcol->pages);
  109. pcol->pages = NULL;
  110. if (pcol->ios) {
  111. ore_put_io_state(pcol->ios);
  112. pcol->ios = NULL;
  113. }
  114. }
  115. static int pcol_add_page(struct page_collect *pcol, struct page *page,
  116. unsigned len)
  117. {
  118. if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
  119. return -ENOMEM;
  120. pcol->pages[pcol->nr_pages++] = page;
  121. pcol->length += len;
  122. return 0;
  123. }
  124. enum {PAGE_WAS_NOT_IN_IO = 17};
  125. static int update_read_page(struct page *page, int ret)
  126. {
  127. switch (ret) {
  128. case 0:
  129. /* Everything is OK */
  130. SetPageUptodate(page);
  131. if (PageError(page))
  132. ClearPageError(page);
  133. break;
  134. case -EFAULT:
  135. /* In this case we were trying to read something that wasn't on
  136. * disk yet - return a page full of zeroes. This should be OK,
  137. * because the object should be empty (if there was a write
  138. * before this read, the read would be waiting with the page
  139. * locked */
  140. clear_highpage(page);
  141. SetPageUptodate(page);
  142. if (PageError(page))
  143. ClearPageError(page);
  144. EXOFS_DBGMSG("recovered read error\n");
  145. /* fall through */
  146. case PAGE_WAS_NOT_IN_IO:
  147. ret = 0; /* recovered error */
  148. break;
  149. default:
  150. SetPageError(page);
  151. }
  152. return ret;
  153. }
  154. static void update_write_page(struct page *page, int ret)
  155. {
  156. if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
  157. return; /* don't pass start don't collect $200 */
  158. if (ret) {
  159. mapping_set_error(page->mapping, ret);
  160. SetPageError(page);
  161. }
  162. end_page_writeback(page);
  163. }
  164. /* Called at the end of reads, to optionally unlock pages and update their
  165. * status.
  166. */
  167. static int __readpages_done(struct page_collect *pcol)
  168. {
  169. int i;
  170. u64 resid;
  171. u64 good_bytes;
  172. u64 length = 0;
  173. int ret = ore_check_io(pcol->ios, &resid);
  174. if (likely(!ret)) {
  175. good_bytes = pcol->length;
  176. ret = PAGE_WAS_NOT_IN_IO;
  177. } else {
  178. good_bytes = pcol->length - resid;
  179. }
  180. if (good_bytes > pcol->ios->length)
  181. good_bytes = pcol->ios->length;
  182. EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
  183. " length=0x%lx nr_pages=%u\n",
  184. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  185. pcol->nr_pages);
  186. for (i = 0; i < pcol->nr_pages; i++) {
  187. struct page *page = pcol->pages[i];
  188. struct inode *inode = page->mapping->host;
  189. int page_stat;
  190. if (inode != pcol->inode)
  191. continue; /* osd might add more pages at end */
  192. if (likely(length < good_bytes))
  193. page_stat = 0;
  194. else
  195. page_stat = ret;
  196. EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
  197. inode->i_ino, page->index,
  198. page_stat ? "bad_bytes" : "good_bytes");
  199. ret = update_read_page(page, page_stat);
  200. if (!pcol->read_4_write)
  201. unlock_page(page);
  202. length += PAGE_SIZE;
  203. }
  204. pcol_free(pcol);
  205. EXOFS_DBGMSG2("readpages_done END\n");
  206. return ret;
  207. }
  208. /* callback of async reads */
  209. static void readpages_done(struct ore_io_state *ios, void *p)
  210. {
  211. struct page_collect *pcol = p;
  212. __readpages_done(pcol);
  213. atomic_dec(&pcol->sbi->s_curr_pending);
  214. kfree(pcol);
  215. }
  216. static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
  217. {
  218. int i;
  219. for (i = 0; i < pcol->nr_pages; i++) {
  220. struct page *page = pcol->pages[i];
  221. if (rw == READ)
  222. update_read_page(page, ret);
  223. else
  224. update_write_page(page, ret);
  225. unlock_page(page);
  226. }
  227. }
  228. static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
  229. struct page_collect *pcol_src, struct page_collect *pcol)
  230. {
  231. /* length was wrong or offset was not page aligned */
  232. BUG_ON(pcol_src->nr_pages < ios->nr_pages);
  233. if (pcol_src->nr_pages > ios->nr_pages) {
  234. struct page **src_page;
  235. unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
  236. unsigned long len_less = pcol_src->length - ios->length;
  237. unsigned i;
  238. int ret;
  239. /* This IO was trimmed */
  240. pcol_src->nr_pages = ios->nr_pages;
  241. pcol_src->length = ios->length;
  242. /* Left over pages are passed to the next io */
  243. pcol->expected_pages += pages_less;
  244. pcol->nr_pages = pages_less;
  245. pcol->length = len_less;
  246. src_page = pcol_src->pages + pcol_src->nr_pages;
  247. pcol->pg_first = (*src_page)->index;
  248. ret = pcol_try_alloc(pcol);
  249. if (unlikely(ret))
  250. return ret;
  251. for (i = 0; i < pages_less; ++i)
  252. pcol->pages[i] = *src_page++;
  253. EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
  254. "pages_less=0x%x expected_pages=0x%x "
  255. "next_offset=0x%llx next_len=0x%lx\n",
  256. pcol_src->nr_pages, pages_less, pcol->expected_pages,
  257. pcol->pg_first * PAGE_SIZE, pcol->length);
  258. }
  259. return 0;
  260. }
  261. static int read_exec(struct page_collect *pcol)
  262. {
  263. struct exofs_i_info *oi = exofs_i(pcol->inode);
  264. struct ore_io_state *ios;
  265. struct page_collect *pcol_copy = NULL;
  266. int ret;
  267. if (!pcol->pages)
  268. return 0;
  269. if (!pcol->ios) {
  270. int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
  271. pcol->pg_first << PAGE_CACHE_SHIFT,
  272. pcol->length, &pcol->ios);
  273. if (ret)
  274. return ret;
  275. }
  276. ios = pcol->ios;
  277. ios->pages = pcol->pages;
  278. if (pcol->read_4_write) {
  279. ore_read(pcol->ios);
  280. return __readpages_done(pcol);
  281. }
  282. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  283. if (!pcol_copy) {
  284. ret = -ENOMEM;
  285. goto err;
  286. }
  287. *pcol_copy = *pcol;
  288. ios->done = readpages_done;
  289. ios->private = pcol_copy;
  290. /* pages ownership was passed to pcol_copy */
  291. _pcol_reset(pcol);
  292. ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
  293. if (unlikely(ret))
  294. goto err;
  295. EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
  296. pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
  297. ret = ore_read(ios);
  298. if (unlikely(ret))
  299. goto err;
  300. atomic_inc(&pcol->sbi->s_curr_pending);
  301. return 0;
  302. err:
  303. if (!pcol->read_4_write)
  304. _unlock_pcol_pages(pcol, ret, READ);
  305. pcol_free(pcol);
  306. kfree(pcol_copy);
  307. return ret;
  308. }
  309. /* readpage_strip is called either directly from readpage() or by the VFS from
  310. * within read_cache_pages(), to add one more page to be read. It will try to
  311. * collect as many contiguous pages as posible. If a discontinuity is
  312. * encountered, or it runs out of resources, it will submit the previous segment
  313. * and will start a new collection. Eventually caller must submit the last
  314. * segment if present.
  315. */
  316. static int readpage_strip(void *data, struct page *page)
  317. {
  318. struct page_collect *pcol = data;
  319. struct inode *inode = pcol->inode;
  320. struct exofs_i_info *oi = exofs_i(inode);
  321. loff_t i_size = i_size_read(inode);
  322. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  323. size_t len;
  324. int ret;
  325. /* FIXME: Just for debugging, will be removed */
  326. if (PageUptodate(page))
  327. EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
  328. page->index);
  329. if (page->index < end_index)
  330. len = PAGE_CACHE_SIZE;
  331. else if (page->index == end_index)
  332. len = i_size & ~PAGE_CACHE_MASK;
  333. else
  334. len = 0;
  335. if (!len || !obj_created(oi)) {
  336. /* this will be out of bounds, or doesn't exist yet.
  337. * Current page is cleared and the request is split
  338. */
  339. clear_highpage(page);
  340. SetPageUptodate(page);
  341. if (PageError(page))
  342. ClearPageError(page);
  343. if (!pcol->read_4_write)
  344. unlock_page(page);
  345. EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
  346. "read_4_write=%d index=0x%lx end_index=0x%lx "
  347. "splitting\n", inode->i_ino, len,
  348. pcol->read_4_write, page->index, end_index);
  349. return read_exec(pcol);
  350. }
  351. try_again:
  352. if (unlikely(pcol->pg_first == -1)) {
  353. pcol->pg_first = page->index;
  354. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  355. page->index)) {
  356. /* Discontinuity detected, split the request */
  357. ret = read_exec(pcol);
  358. if (unlikely(ret))
  359. goto fail;
  360. goto try_again;
  361. }
  362. if (!pcol->pages) {
  363. ret = pcol_try_alloc(pcol);
  364. if (unlikely(ret))
  365. goto fail;
  366. }
  367. if (len != PAGE_CACHE_SIZE)
  368. zero_user(page, len, PAGE_CACHE_SIZE - len);
  369. EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  370. inode->i_ino, page->index, len);
  371. ret = pcol_add_page(pcol, page, len);
  372. if (ret) {
  373. EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
  374. "this_len=0x%zx nr_pages=%u length=0x%lx\n",
  375. page, len, pcol->nr_pages, pcol->length);
  376. /* split the request, and start again with current page */
  377. ret = read_exec(pcol);
  378. if (unlikely(ret))
  379. goto fail;
  380. goto try_again;
  381. }
  382. return 0;
  383. fail:
  384. /* SetPageError(page); ??? */
  385. unlock_page(page);
  386. return ret;
  387. }
  388. static int exofs_readpages(struct file *file, struct address_space *mapping,
  389. struct list_head *pages, unsigned nr_pages)
  390. {
  391. struct page_collect pcol;
  392. int ret;
  393. _pcol_init(&pcol, nr_pages, mapping->host);
  394. ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
  395. if (ret) {
  396. EXOFS_ERR("read_cache_pages => %d\n", ret);
  397. return ret;
  398. }
  399. ret = read_exec(&pcol);
  400. if (unlikely(ret))
  401. return ret;
  402. return read_exec(&pcol);
  403. }
  404. static int _readpage(struct page *page, bool read_4_write)
  405. {
  406. struct page_collect pcol;
  407. int ret;
  408. _pcol_init(&pcol, 1, page->mapping->host);
  409. pcol.read_4_write = read_4_write;
  410. ret = readpage_strip(&pcol, page);
  411. if (ret) {
  412. EXOFS_ERR("_readpage => %d\n", ret);
  413. return ret;
  414. }
  415. return read_exec(&pcol);
  416. }
  417. /*
  418. * We don't need the file
  419. */
  420. static int exofs_readpage(struct file *file, struct page *page)
  421. {
  422. return _readpage(page, false);
  423. }
  424. /* Callback for osd_write. All writes are asynchronous */
  425. static void writepages_done(struct ore_io_state *ios, void *p)
  426. {
  427. struct page_collect *pcol = p;
  428. int i;
  429. u64 resid;
  430. u64 good_bytes;
  431. u64 length = 0;
  432. int ret = ore_check_io(ios, &resid);
  433. atomic_dec(&pcol->sbi->s_curr_pending);
  434. if (likely(!ret)) {
  435. good_bytes = pcol->length;
  436. ret = PAGE_WAS_NOT_IN_IO;
  437. } else {
  438. good_bytes = pcol->length - resid;
  439. }
  440. if (good_bytes > pcol->ios->length)
  441. good_bytes = pcol->ios->length;
  442. EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
  443. " length=0x%lx nr_pages=%u\n",
  444. pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
  445. pcol->nr_pages);
  446. for (i = 0; i < pcol->nr_pages; i++) {
  447. struct page *page = pcol->pages[i];
  448. struct inode *inode = page->mapping->host;
  449. int page_stat;
  450. if (inode != pcol->inode)
  451. continue; /* osd might add more pages to a bio */
  452. if (likely(length < good_bytes))
  453. page_stat = 0;
  454. else
  455. page_stat = ret;
  456. update_write_page(page, page_stat);
  457. unlock_page(page);
  458. EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
  459. inode->i_ino, page->index, page_stat);
  460. length += PAGE_SIZE;
  461. }
  462. pcol_free(pcol);
  463. kfree(pcol);
  464. EXOFS_DBGMSG2("writepages_done END\n");
  465. }
  466. static int write_exec(struct page_collect *pcol)
  467. {
  468. struct exofs_i_info *oi = exofs_i(pcol->inode);
  469. struct ore_io_state *ios;
  470. struct page_collect *pcol_copy = NULL;
  471. int ret;
  472. if (!pcol->pages)
  473. return 0;
  474. BUG_ON(pcol->ios);
  475. ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
  476. pcol->pg_first << PAGE_CACHE_SHIFT,
  477. pcol->length, &pcol->ios);
  478. if (unlikely(ret))
  479. goto err;
  480. pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
  481. if (!pcol_copy) {
  482. EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
  483. ret = -ENOMEM;
  484. goto err;
  485. }
  486. *pcol_copy = *pcol;
  487. ios = pcol->ios;
  488. ios->pages = pcol_copy->pages;
  489. ios->done = writepages_done;
  490. ios->private = pcol_copy;
  491. /* pages ownership was passed to pcol_copy */
  492. _pcol_reset(pcol);
  493. ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
  494. if (unlikely(ret))
  495. goto err;
  496. EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
  497. pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
  498. ret = ore_write(ios);
  499. if (unlikely(ret)) {
  500. EXOFS_ERR("write_exec: ore_write() Failed\n");
  501. goto err;
  502. }
  503. atomic_inc(&pcol->sbi->s_curr_pending);
  504. return 0;
  505. err:
  506. _unlock_pcol_pages(pcol, ret, WRITE);
  507. pcol_free(pcol);
  508. kfree(pcol_copy);
  509. return ret;
  510. }
  511. /* writepage_strip is called either directly from writepage() or by the VFS from
  512. * within write_cache_pages(), to add one more page to be written to storage.
  513. * It will try to collect as many contiguous pages as possible. If a
  514. * discontinuity is encountered or it runs out of resources it will submit the
  515. * previous segment and will start a new collection.
  516. * Eventually caller must submit the last segment if present.
  517. */
  518. static int writepage_strip(struct page *page,
  519. struct writeback_control *wbc_unused, void *data)
  520. {
  521. struct page_collect *pcol = data;
  522. struct inode *inode = pcol->inode;
  523. struct exofs_i_info *oi = exofs_i(inode);
  524. loff_t i_size = i_size_read(inode);
  525. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  526. size_t len;
  527. int ret;
  528. BUG_ON(!PageLocked(page));
  529. ret = wait_obj_created(oi);
  530. if (unlikely(ret))
  531. goto fail;
  532. if (page->index < end_index)
  533. /* in this case, the page is within the limits of the file */
  534. len = PAGE_CACHE_SIZE;
  535. else {
  536. len = i_size & ~PAGE_CACHE_MASK;
  537. if (page->index > end_index || !len) {
  538. /* in this case, the page is outside the limits
  539. * (truncate in progress)
  540. */
  541. ret = write_exec(pcol);
  542. if (unlikely(ret))
  543. goto fail;
  544. if (PageError(page))
  545. ClearPageError(page);
  546. unlock_page(page);
  547. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
  548. "outside the limits\n",
  549. inode->i_ino, page->index);
  550. return 0;
  551. }
  552. }
  553. try_again:
  554. if (unlikely(pcol->pg_first == -1)) {
  555. pcol->pg_first = page->index;
  556. } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
  557. page->index)) {
  558. /* Discontinuity detected, split the request */
  559. ret = write_exec(pcol);
  560. if (unlikely(ret))
  561. goto fail;
  562. EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
  563. inode->i_ino, page->index);
  564. goto try_again;
  565. }
  566. if (!pcol->pages) {
  567. ret = pcol_try_alloc(pcol);
  568. if (unlikely(ret))
  569. goto fail;
  570. }
  571. EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
  572. inode->i_ino, page->index, len);
  573. ret = pcol_add_page(pcol, page, len);
  574. if (unlikely(ret)) {
  575. EXOFS_DBGMSG2("Failed pcol_add_page "
  576. "nr_pages=%u total_length=0x%lx\n",
  577. pcol->nr_pages, pcol->length);
  578. /* split the request, next loop will start again */
  579. ret = write_exec(pcol);
  580. if (unlikely(ret)) {
  581. EXOFS_DBGMSG("write_exec failed => %d", ret);
  582. goto fail;
  583. }
  584. goto try_again;
  585. }
  586. BUG_ON(PageWriteback(page));
  587. set_page_writeback(page);
  588. return 0;
  589. fail:
  590. EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
  591. inode->i_ino, page->index, ret);
  592. set_bit(AS_EIO, &page->mapping->flags);
  593. unlock_page(page);
  594. return ret;
  595. }
  596. static int exofs_writepages(struct address_space *mapping,
  597. struct writeback_control *wbc)
  598. {
  599. struct page_collect pcol;
  600. long start, end, expected_pages;
  601. int ret;
  602. start = wbc->range_start >> PAGE_CACHE_SHIFT;
  603. end = (wbc->range_end == LLONG_MAX) ?
  604. start + mapping->nrpages :
  605. wbc->range_end >> PAGE_CACHE_SHIFT;
  606. if (start || end)
  607. expected_pages = end - start + 1;
  608. else
  609. expected_pages = mapping->nrpages;
  610. if (expected_pages < 32L)
  611. expected_pages = 32L;
  612. EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
  613. "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
  614. mapping->host->i_ino, wbc->range_start, wbc->range_end,
  615. mapping->nrpages, start, end, expected_pages);
  616. _pcol_init(&pcol, expected_pages, mapping->host);
  617. ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
  618. if (unlikely(ret)) {
  619. EXOFS_ERR("write_cache_pages => %d\n", ret);
  620. return ret;
  621. }
  622. ret = write_exec(&pcol);
  623. if (unlikely(ret))
  624. return ret;
  625. if (wbc->sync_mode == WB_SYNC_ALL) {
  626. return write_exec(&pcol); /* pump the last reminder */
  627. } else if (pcol.nr_pages) {
  628. /* not SYNC let the reminder join the next writeout */
  629. unsigned i;
  630. for (i = 0; i < pcol.nr_pages; i++) {
  631. struct page *page = pcol.pages[i];
  632. end_page_writeback(page);
  633. set_page_dirty(page);
  634. unlock_page(page);
  635. }
  636. }
  637. return 0;
  638. }
  639. static int exofs_writepage(struct page *page, struct writeback_control *wbc)
  640. {
  641. struct page_collect pcol;
  642. int ret;
  643. _pcol_init(&pcol, 1, page->mapping->host);
  644. ret = writepage_strip(page, NULL, &pcol);
  645. if (ret) {
  646. EXOFS_ERR("exofs_writepage => %d\n", ret);
  647. return ret;
  648. }
  649. return write_exec(&pcol);
  650. }
  651. /* i_mutex held using inode->i_size directly */
  652. static void _write_failed(struct inode *inode, loff_t to)
  653. {
  654. if (to > inode->i_size)
  655. truncate_pagecache(inode, to, inode->i_size);
  656. }
  657. int exofs_write_begin(struct file *file, struct address_space *mapping,
  658. loff_t pos, unsigned len, unsigned flags,
  659. struct page **pagep, void **fsdata)
  660. {
  661. int ret = 0;
  662. struct page *page;
  663. page = *pagep;
  664. if (page == NULL) {
  665. ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
  666. fsdata);
  667. if (ret) {
  668. EXOFS_DBGMSG("simple_write_begin failed\n");
  669. goto out;
  670. }
  671. page = *pagep;
  672. }
  673. /* read modify write */
  674. if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
  675. loff_t i_size = i_size_read(mapping->host);
  676. pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
  677. size_t rlen;
  678. if (page->index < end_index)
  679. rlen = PAGE_CACHE_SIZE;
  680. else if (page->index == end_index)
  681. rlen = i_size & ~PAGE_CACHE_MASK;
  682. else
  683. rlen = 0;
  684. if (!rlen) {
  685. clear_highpage(page);
  686. SetPageUptodate(page);
  687. goto out;
  688. }
  689. ret = _readpage(page, true);
  690. if (ret) {
  691. /*SetPageError was done by _readpage. Is it ok?*/
  692. unlock_page(page);
  693. EXOFS_DBGMSG("__readpage failed\n");
  694. }
  695. }
  696. out:
  697. if (unlikely(ret))
  698. _write_failed(mapping->host, pos + len);
  699. return ret;
  700. }
  701. static int exofs_write_begin_export(struct file *file,
  702. struct address_space *mapping,
  703. loff_t pos, unsigned len, unsigned flags,
  704. struct page **pagep, void **fsdata)
  705. {
  706. *pagep = NULL;
  707. return exofs_write_begin(file, mapping, pos, len, flags, pagep,
  708. fsdata);
  709. }
  710. static int exofs_write_end(struct file *file, struct address_space *mapping,
  711. loff_t pos, unsigned len, unsigned copied,
  712. struct page *page, void *fsdata)
  713. {
  714. struct inode *inode = mapping->host;
  715. /* According to comment in simple_write_end i_mutex is held */
  716. loff_t i_size = inode->i_size;
  717. int ret;
  718. ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
  719. if (unlikely(ret))
  720. _write_failed(inode, pos + len);
  721. /* TODO: once simple_write_end marks inode dirty remove */
  722. if (i_size != inode->i_size)
  723. mark_inode_dirty(inode);
  724. return ret;
  725. }
  726. static int exofs_releasepage(struct page *page, gfp_t gfp)
  727. {
  728. EXOFS_DBGMSG("page 0x%lx\n", page->index);
  729. WARN_ON(1);
  730. return 0;
  731. }
  732. static void exofs_invalidatepage(struct page *page, unsigned long offset)
  733. {
  734. EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
  735. WARN_ON(1);
  736. }
  737. const struct address_space_operations exofs_aops = {
  738. .readpage = exofs_readpage,
  739. .readpages = exofs_readpages,
  740. .writepage = exofs_writepage,
  741. .writepages = exofs_writepages,
  742. .write_begin = exofs_write_begin_export,
  743. .write_end = exofs_write_end,
  744. .releasepage = exofs_releasepage,
  745. .set_page_dirty = __set_page_dirty_nobuffers,
  746. .invalidatepage = exofs_invalidatepage,
  747. /* Not implemented Yet */
  748. .bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
  749. .direct_IO = NULL, /* TODO: Should be trivial to do */
  750. /* With these NULL has special meaning or default is not exported */
  751. .get_xip_mem = NULL,
  752. .migratepage = NULL,
  753. .launder_page = NULL,
  754. .is_partially_uptodate = NULL,
  755. .error_remove_page = NULL,
  756. };
  757. /******************************************************************************
  758. * INODE OPERATIONS
  759. *****************************************************************************/
  760. /*
  761. * Test whether an inode is a fast symlink.
  762. */
  763. static inline int exofs_inode_is_fast_symlink(struct inode *inode)
  764. {
  765. struct exofs_i_info *oi = exofs_i(inode);
  766. return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
  767. }
  768. static int _do_truncate(struct inode *inode, loff_t newsize)
  769. {
  770. struct exofs_i_info *oi = exofs_i(inode);
  771. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  772. int ret;
  773. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  774. ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
  775. if (likely(!ret))
  776. truncate_setsize(inode, newsize);
  777. EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
  778. inode->i_ino, newsize, ret);
  779. return ret;
  780. }
  781. /*
  782. * Set inode attributes - update size attribute on OSD if needed,
  783. * otherwise just call generic functions.
  784. */
  785. int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
  786. {
  787. struct inode *inode = dentry->d_inode;
  788. int error;
  789. /* if we are about to modify an object, and it hasn't been
  790. * created yet, wait
  791. */
  792. error = wait_obj_created(exofs_i(inode));
  793. if (unlikely(error))
  794. return error;
  795. error = inode_change_ok(inode, iattr);
  796. if (unlikely(error))
  797. return error;
  798. if ((iattr->ia_valid & ATTR_SIZE) &&
  799. iattr->ia_size != i_size_read(inode)) {
  800. error = _do_truncate(inode, iattr->ia_size);
  801. if (unlikely(error))
  802. return error;
  803. }
  804. setattr_copy(inode, iattr);
  805. mark_inode_dirty(inode);
  806. return 0;
  807. }
  808. static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
  809. EXOFS_APAGE_FS_DATA,
  810. EXOFS_ATTR_INODE_FILE_LAYOUT,
  811. 0);
  812. static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
  813. EXOFS_APAGE_FS_DATA,
  814. EXOFS_ATTR_INODE_DIR_LAYOUT,
  815. 0);
  816. /*
  817. * Read the Linux inode info from the OSD, and return it as is. In exofs the
  818. * inode info is in an application specific page/attribute of the osd-object.
  819. */
  820. static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
  821. struct exofs_fcb *inode)
  822. {
  823. struct exofs_sb_info *sbi = sb->s_fs_info;
  824. struct osd_attr attrs[] = {
  825. [0] = g_attr_inode_data,
  826. [1] = g_attr_inode_file_layout,
  827. [2] = g_attr_inode_dir_layout,
  828. };
  829. struct ore_io_state *ios;
  830. struct exofs_on_disk_inode_layout *layout;
  831. int ret;
  832. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  833. if (unlikely(ret)) {
  834. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  835. return ret;
  836. }
  837. attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
  838. attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
  839. ios->in_attr = attrs;
  840. ios->in_attr_len = ARRAY_SIZE(attrs);
  841. ret = ore_read(ios);
  842. if (unlikely(ret)) {
  843. EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
  844. _LLU(oi->one_comp.obj.id), ret);
  845. memset(inode, 0, sizeof(*inode));
  846. inode->i_mode = 0040000 | (0777 & ~022);
  847. /* If object is lost on target we might as well enable it's
  848. * delete.
  849. */
  850. if ((ret == -ENOENT) || (ret == -EINVAL))
  851. ret = 0;
  852. goto out;
  853. }
  854. ret = extract_attr_from_ios(ios, &attrs[0]);
  855. if (ret) {
  856. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  857. goto out;
  858. }
  859. WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
  860. memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
  861. ret = extract_attr_from_ios(ios, &attrs[1]);
  862. if (ret) {
  863. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  864. goto out;
  865. }
  866. if (attrs[1].len) {
  867. layout = attrs[1].val_ptr;
  868. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  869. EXOFS_ERR("%s: unsupported files layout %d\n",
  870. __func__, layout->gen_func);
  871. ret = -ENOTSUPP;
  872. goto out;
  873. }
  874. }
  875. ret = extract_attr_from_ios(ios, &attrs[2]);
  876. if (ret) {
  877. EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
  878. goto out;
  879. }
  880. if (attrs[2].len) {
  881. layout = attrs[2].val_ptr;
  882. if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
  883. EXOFS_ERR("%s: unsupported meta-data layout %d\n",
  884. __func__, layout->gen_func);
  885. ret = -ENOTSUPP;
  886. goto out;
  887. }
  888. }
  889. out:
  890. ore_put_io_state(ios);
  891. return ret;
  892. }
  893. static void __oi_init(struct exofs_i_info *oi)
  894. {
  895. init_waitqueue_head(&oi->i_wq);
  896. oi->i_flags = 0;
  897. }
  898. /*
  899. * Fill in an inode read from the OSD and set it up for use
  900. */
  901. struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
  902. {
  903. struct exofs_i_info *oi;
  904. struct exofs_fcb fcb;
  905. struct inode *inode;
  906. int ret;
  907. inode = iget_locked(sb, ino);
  908. if (!inode)
  909. return ERR_PTR(-ENOMEM);
  910. if (!(inode->i_state & I_NEW))
  911. return inode;
  912. oi = exofs_i(inode);
  913. __oi_init(oi);
  914. exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
  915. exofs_oi_objno(oi));
  916. /* read the inode from the osd */
  917. ret = exofs_get_inode(sb, oi, &fcb);
  918. if (ret)
  919. goto bad_inode;
  920. set_obj_created(oi);
  921. /* copy stuff from on-disk struct to in-memory struct */
  922. inode->i_mode = le16_to_cpu(fcb.i_mode);
  923. inode->i_uid = le32_to_cpu(fcb.i_uid);
  924. inode->i_gid = le32_to_cpu(fcb.i_gid);
  925. inode->i_nlink = le16_to_cpu(fcb.i_links_count);
  926. inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
  927. inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
  928. inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
  929. inode->i_ctime.tv_nsec =
  930. inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
  931. oi->i_commit_size = le64_to_cpu(fcb.i_size);
  932. i_size_write(inode, oi->i_commit_size);
  933. inode->i_blkbits = EXOFS_BLKSHIFT;
  934. inode->i_generation = le32_to_cpu(fcb.i_generation);
  935. oi->i_dir_start_lookup = 0;
  936. if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
  937. ret = -ESTALE;
  938. goto bad_inode;
  939. }
  940. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  941. if (fcb.i_data[0])
  942. inode->i_rdev =
  943. old_decode_dev(le32_to_cpu(fcb.i_data[0]));
  944. else
  945. inode->i_rdev =
  946. new_decode_dev(le32_to_cpu(fcb.i_data[1]));
  947. } else {
  948. memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
  949. }
  950. inode->i_mapping->backing_dev_info = sb->s_bdi;
  951. if (S_ISREG(inode->i_mode)) {
  952. inode->i_op = &exofs_file_inode_operations;
  953. inode->i_fop = &exofs_file_operations;
  954. inode->i_mapping->a_ops = &exofs_aops;
  955. } else if (S_ISDIR(inode->i_mode)) {
  956. inode->i_op = &exofs_dir_inode_operations;
  957. inode->i_fop = &exofs_dir_operations;
  958. inode->i_mapping->a_ops = &exofs_aops;
  959. } else if (S_ISLNK(inode->i_mode)) {
  960. if (exofs_inode_is_fast_symlink(inode))
  961. inode->i_op = &exofs_fast_symlink_inode_operations;
  962. else {
  963. inode->i_op = &exofs_symlink_inode_operations;
  964. inode->i_mapping->a_ops = &exofs_aops;
  965. }
  966. } else {
  967. inode->i_op = &exofs_special_inode_operations;
  968. if (fcb.i_data[0])
  969. init_special_inode(inode, inode->i_mode,
  970. old_decode_dev(le32_to_cpu(fcb.i_data[0])));
  971. else
  972. init_special_inode(inode, inode->i_mode,
  973. new_decode_dev(le32_to_cpu(fcb.i_data[1])));
  974. }
  975. unlock_new_inode(inode);
  976. return inode;
  977. bad_inode:
  978. iget_failed(inode);
  979. return ERR_PTR(ret);
  980. }
  981. int __exofs_wait_obj_created(struct exofs_i_info *oi)
  982. {
  983. if (!obj_created(oi)) {
  984. EXOFS_DBGMSG("!obj_created\n");
  985. BUG_ON(!obj_2bcreated(oi));
  986. wait_event(oi->i_wq, obj_created(oi));
  987. EXOFS_DBGMSG("wait_event done\n");
  988. }
  989. return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
  990. }
  991. /*
  992. * Callback function from exofs_new_inode(). The important thing is that we
  993. * set the obj_created flag so that other methods know that the object exists on
  994. * the OSD.
  995. */
  996. static void create_done(struct ore_io_state *ios, void *p)
  997. {
  998. struct inode *inode = p;
  999. struct exofs_i_info *oi = exofs_i(inode);
  1000. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  1001. int ret;
  1002. ret = ore_check_io(ios, NULL);
  1003. ore_put_io_state(ios);
  1004. atomic_dec(&sbi->s_curr_pending);
  1005. if (unlikely(ret)) {
  1006. EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
  1007. _LLU(exofs_oi_objno(oi)),
  1008. _LLU(oi->one_comp.obj.partition));
  1009. /*TODO: When FS is corrupted creation can fail, object already
  1010. * exist. Get rid of this asynchronous creation, if exist
  1011. * increment the obj counter and try the next object. Until we
  1012. * succeed. All these dangling objects will be made into lost
  1013. * files by chkfs.exofs
  1014. */
  1015. }
  1016. set_obj_created(oi);
  1017. wake_up(&oi->i_wq);
  1018. }
  1019. /*
  1020. * Set up a new inode and create an object for it on the OSD
  1021. */
  1022. struct inode *exofs_new_inode(struct inode *dir, int mode)
  1023. {
  1024. struct super_block *sb = dir->i_sb;
  1025. struct exofs_sb_info *sbi = sb->s_fs_info;
  1026. struct inode *inode;
  1027. struct exofs_i_info *oi;
  1028. struct ore_io_state *ios;
  1029. int ret;
  1030. inode = new_inode(sb);
  1031. if (!inode)
  1032. return ERR_PTR(-ENOMEM);
  1033. oi = exofs_i(inode);
  1034. __oi_init(oi);
  1035. set_obj_2bcreated(oi);
  1036. inode->i_mapping->backing_dev_info = sb->s_bdi;
  1037. inode_init_owner(inode, dir, mode);
  1038. inode->i_ino = sbi->s_nextid++;
  1039. inode->i_blkbits = EXOFS_BLKSHIFT;
  1040. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1041. oi->i_commit_size = inode->i_size = 0;
  1042. spin_lock(&sbi->s_next_gen_lock);
  1043. inode->i_generation = sbi->s_next_generation++;
  1044. spin_unlock(&sbi->s_next_gen_lock);
  1045. insert_inode_hash(inode);
  1046. exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
  1047. exofs_oi_objno(oi));
  1048. exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
  1049. mark_inode_dirty(inode);
  1050. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1051. if (unlikely(ret)) {
  1052. EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
  1053. return ERR_PTR(ret);
  1054. }
  1055. ios->done = create_done;
  1056. ios->private = inode;
  1057. ret = ore_create(ios);
  1058. if (ret) {
  1059. ore_put_io_state(ios);
  1060. return ERR_PTR(ret);
  1061. }
  1062. atomic_inc(&sbi->s_curr_pending);
  1063. return inode;
  1064. }
  1065. /*
  1066. * struct to pass two arguments to update_inode's callback
  1067. */
  1068. struct updatei_args {
  1069. struct exofs_sb_info *sbi;
  1070. struct exofs_fcb fcb;
  1071. };
  1072. /*
  1073. * Callback function from exofs_update_inode().
  1074. */
  1075. static void updatei_done(struct ore_io_state *ios, void *p)
  1076. {
  1077. struct updatei_args *args = p;
  1078. ore_put_io_state(ios);
  1079. atomic_dec(&args->sbi->s_curr_pending);
  1080. kfree(args);
  1081. }
  1082. /*
  1083. * Write the inode to the OSD. Just fill up the struct, and set the attribute
  1084. * synchronously or asynchronously depending on the do_sync flag.
  1085. */
  1086. static int exofs_update_inode(struct inode *inode, int do_sync)
  1087. {
  1088. struct exofs_i_info *oi = exofs_i(inode);
  1089. struct super_block *sb = inode->i_sb;
  1090. struct exofs_sb_info *sbi = sb->s_fs_info;
  1091. struct ore_io_state *ios;
  1092. struct osd_attr attr;
  1093. struct exofs_fcb *fcb;
  1094. struct updatei_args *args;
  1095. int ret;
  1096. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1097. if (!args) {
  1098. EXOFS_DBGMSG("Failed kzalloc of args\n");
  1099. return -ENOMEM;
  1100. }
  1101. fcb = &args->fcb;
  1102. fcb->i_mode = cpu_to_le16(inode->i_mode);
  1103. fcb->i_uid = cpu_to_le32(inode->i_uid);
  1104. fcb->i_gid = cpu_to_le32(inode->i_gid);
  1105. fcb->i_links_count = cpu_to_le16(inode->i_nlink);
  1106. fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
  1107. fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
  1108. fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
  1109. oi->i_commit_size = i_size_read(inode);
  1110. fcb->i_size = cpu_to_le64(oi->i_commit_size);
  1111. fcb->i_generation = cpu_to_le32(inode->i_generation);
  1112. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  1113. if (old_valid_dev(inode->i_rdev)) {
  1114. fcb->i_data[0] =
  1115. cpu_to_le32(old_encode_dev(inode->i_rdev));
  1116. fcb->i_data[1] = 0;
  1117. } else {
  1118. fcb->i_data[0] = 0;
  1119. fcb->i_data[1] =
  1120. cpu_to_le32(new_encode_dev(inode->i_rdev));
  1121. fcb->i_data[2] = 0;
  1122. }
  1123. } else
  1124. memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
  1125. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1126. if (unlikely(ret)) {
  1127. EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
  1128. goto free_args;
  1129. }
  1130. attr = g_attr_inode_data;
  1131. attr.val_ptr = fcb;
  1132. ios->out_attr_len = 1;
  1133. ios->out_attr = &attr;
  1134. wait_obj_created(oi);
  1135. if (!do_sync) {
  1136. args->sbi = sbi;
  1137. ios->done = updatei_done;
  1138. ios->private = args;
  1139. }
  1140. ret = ore_write(ios);
  1141. if (!do_sync && !ret) {
  1142. atomic_inc(&sbi->s_curr_pending);
  1143. goto out; /* deallocation in updatei_done */
  1144. }
  1145. ore_put_io_state(ios);
  1146. free_args:
  1147. kfree(args);
  1148. out:
  1149. EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
  1150. inode->i_ino, do_sync, ret);
  1151. return ret;
  1152. }
  1153. int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
  1154. {
  1155. /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
  1156. return exofs_update_inode(inode, 1);
  1157. }
  1158. /*
  1159. * Callback function from exofs_delete_inode() - don't have much cleaning up to
  1160. * do.
  1161. */
  1162. static void delete_done(struct ore_io_state *ios, void *p)
  1163. {
  1164. struct exofs_sb_info *sbi = p;
  1165. ore_put_io_state(ios);
  1166. atomic_dec(&sbi->s_curr_pending);
  1167. }
  1168. /*
  1169. * Called when the refcount of an inode reaches zero. We remove the object
  1170. * from the OSD here. We make sure the object was created before we try and
  1171. * delete it.
  1172. */
  1173. void exofs_evict_inode(struct inode *inode)
  1174. {
  1175. struct exofs_i_info *oi = exofs_i(inode);
  1176. struct super_block *sb = inode->i_sb;
  1177. struct exofs_sb_info *sbi = sb->s_fs_info;
  1178. struct ore_io_state *ios;
  1179. int ret;
  1180. truncate_inode_pages(&inode->i_data, 0);
  1181. /* TODO: should do better here */
  1182. if (inode->i_nlink || is_bad_inode(inode))
  1183. goto no_delete;
  1184. inode->i_size = 0;
  1185. end_writeback(inode);
  1186. /* if we are deleting an obj that hasn't been created yet, wait.
  1187. * This also makes sure that create_done cannot be called with an
  1188. * already evicted inode.
  1189. */
  1190. wait_obj_created(oi);
  1191. /* ignore the error, attempt a remove anyway */
  1192. /* Now Remove the OSD objects */
  1193. ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
  1194. if (unlikely(ret)) {
  1195. EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
  1196. return;
  1197. }
  1198. ios->done = delete_done;
  1199. ios->private = sbi;
  1200. ret = ore_remove(ios);
  1201. if (ret) {
  1202. EXOFS_ERR("%s: ore_remove failed\n", __func__);
  1203. ore_put_io_state(ios);
  1204. return;
  1205. }
  1206. atomic_inc(&sbi->s_curr_pending);
  1207. return;
  1208. no_delete:
  1209. end_writeback(inode);
  1210. }