file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = do_sync_read,
  27. .aio_read = generic_file_aio_read,
  28. .write = do_sync_write,
  29. .aio_write = generic_file_aio_write,
  30. .mmap = generic_file_mmap,
  31. .open = affs_file_open,
  32. .release = affs_file_release,
  33. .fsync = affs_file_fsync,
  34. .splice_read = generic_file_splice_read,
  35. };
  36. const struct inode_operations affs_file_inode_operations = {
  37. .truncate = affs_truncate,
  38. .setattr = affs_notify_change,
  39. };
  40. static int
  41. affs_file_open(struct inode *inode, struct file *filp)
  42. {
  43. pr_debug("AFFS: open(%lu,%d)\n",
  44. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  45. atomic_inc(&AFFS_I(inode)->i_opencnt);
  46. return 0;
  47. }
  48. static int
  49. affs_file_release(struct inode *inode, struct file *filp)
  50. {
  51. pr_debug("AFFS: release(%lu, %d)\n",
  52. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  53. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  54. mutex_lock(&inode->i_mutex);
  55. if (inode->i_size != AFFS_I(inode)->mmu_private)
  56. affs_truncate(inode);
  57. affs_free_prealloc(inode);
  58. mutex_unlock(&inode->i_mutex);
  59. }
  60. return 0;
  61. }
  62. static int
  63. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  64. {
  65. struct super_block *sb = inode->i_sb;
  66. struct buffer_head *bh;
  67. u32 lc_max;
  68. int i, j, key;
  69. if (!AFFS_I(inode)->i_lc) {
  70. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  71. if (!ptr)
  72. return -ENOMEM;
  73. AFFS_I(inode)->i_lc = (u32 *)ptr;
  74. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  75. }
  76. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  77. if (AFFS_I(inode)->i_extcnt > lc_max) {
  78. u32 lc_shift, lc_mask, tmp, off;
  79. /* need to recalculate linear cache, start from old size */
  80. lc_shift = AFFS_I(inode)->i_lc_shift;
  81. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  82. for (; tmp; tmp >>= 1)
  83. lc_shift++;
  84. lc_mask = (1 << lc_shift) - 1;
  85. /* fix idx and old size to new shift */
  86. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  87. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  88. /* first shrink old cache to make more space */
  89. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  90. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  91. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  92. AFFS_I(inode)->i_lc_shift = lc_shift;
  93. AFFS_I(inode)->i_lc_mask = lc_mask;
  94. }
  95. /* fill cache to the needed index */
  96. i = AFFS_I(inode)->i_lc_size;
  97. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  98. for (; i <= lc_idx; i++) {
  99. if (!i) {
  100. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  101. continue;
  102. }
  103. key = AFFS_I(inode)->i_lc[i - 1];
  104. j = AFFS_I(inode)->i_lc_mask + 1;
  105. // unlock cache
  106. for (; j > 0; j--) {
  107. bh = affs_bread(sb, key);
  108. if (!bh)
  109. goto err;
  110. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  111. affs_brelse(bh);
  112. }
  113. // lock cache
  114. AFFS_I(inode)->i_lc[i] = key;
  115. }
  116. return 0;
  117. err:
  118. // lock cache
  119. return -EIO;
  120. }
  121. static struct buffer_head *
  122. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  123. {
  124. struct super_block *sb = inode->i_sb;
  125. struct buffer_head *new_bh;
  126. u32 blocknr, tmp;
  127. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  128. if (!blocknr)
  129. return ERR_PTR(-ENOSPC);
  130. new_bh = affs_getzeroblk(sb, blocknr);
  131. if (!new_bh) {
  132. affs_free_block(sb, blocknr);
  133. return ERR_PTR(-EIO);
  134. }
  135. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  136. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  137. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  138. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  139. affs_fix_checksum(sb, new_bh);
  140. mark_buffer_dirty_inode(new_bh, inode);
  141. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  142. if (tmp)
  143. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  144. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  145. affs_adjust_checksum(bh, blocknr - tmp);
  146. mark_buffer_dirty_inode(bh, inode);
  147. AFFS_I(inode)->i_extcnt++;
  148. mark_inode_dirty(inode);
  149. return new_bh;
  150. }
  151. static inline struct buffer_head *
  152. affs_get_extblock(struct inode *inode, u32 ext)
  153. {
  154. /* inline the simplest case: same extended block as last time */
  155. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  156. if (ext == AFFS_I(inode)->i_ext_last)
  157. get_bh(bh);
  158. else
  159. /* we have to do more (not inlined) */
  160. bh = affs_get_extblock_slow(inode, ext);
  161. return bh;
  162. }
  163. static struct buffer_head *
  164. affs_get_extblock_slow(struct inode *inode, u32 ext)
  165. {
  166. struct super_block *sb = inode->i_sb;
  167. struct buffer_head *bh;
  168. u32 ext_key;
  169. u32 lc_idx, lc_off, ac_idx;
  170. u32 tmp, idx;
  171. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  172. /* read the next extended block from the current one */
  173. bh = AFFS_I(inode)->i_ext_bh;
  174. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  175. if (ext < AFFS_I(inode)->i_extcnt)
  176. goto read_ext;
  177. if (ext > AFFS_I(inode)->i_extcnt)
  178. BUG();
  179. bh = affs_alloc_extblock(inode, bh, ext);
  180. if (IS_ERR(bh))
  181. return bh;
  182. goto store_ext;
  183. }
  184. if (ext == 0) {
  185. /* we seek back to the file header block */
  186. ext_key = inode->i_ino;
  187. goto read_ext;
  188. }
  189. if (ext >= AFFS_I(inode)->i_extcnt) {
  190. struct buffer_head *prev_bh;
  191. /* allocate a new extended block */
  192. if (ext > AFFS_I(inode)->i_extcnt)
  193. BUG();
  194. /* get previous extended block */
  195. prev_bh = affs_get_extblock(inode, ext - 1);
  196. if (IS_ERR(prev_bh))
  197. return prev_bh;
  198. bh = affs_alloc_extblock(inode, prev_bh, ext);
  199. affs_brelse(prev_bh);
  200. if (IS_ERR(bh))
  201. return bh;
  202. goto store_ext;
  203. }
  204. again:
  205. /* check if there is an extended cache and whether it's large enough */
  206. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  207. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  208. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  209. int err;
  210. err = affs_grow_extcache(inode, lc_idx);
  211. if (err)
  212. return ERR_PTR(err);
  213. goto again;
  214. }
  215. /* every n'th key we find in the linear cache */
  216. if (!lc_off) {
  217. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  218. goto read_ext;
  219. }
  220. /* maybe it's still in the associative cache */
  221. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  222. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  223. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  224. goto read_ext;
  225. }
  226. /* try to find one of the previous extended blocks */
  227. tmp = ext;
  228. idx = ac_idx;
  229. while (--tmp, --lc_off > 0) {
  230. idx = (idx - 1) & AFFS_AC_MASK;
  231. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  232. ext_key = AFFS_I(inode)->i_ac[idx].key;
  233. goto find_ext;
  234. }
  235. }
  236. /* fall back to the linear cache */
  237. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  238. find_ext:
  239. /* read all extended blocks until we find the one we need */
  240. //unlock cache
  241. do {
  242. bh = affs_bread(sb, ext_key);
  243. if (!bh)
  244. goto err_bread;
  245. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  246. affs_brelse(bh);
  247. tmp++;
  248. } while (tmp < ext);
  249. //lock cache
  250. /* store it in the associative cache */
  251. // recalculate ac_idx?
  252. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  253. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  254. read_ext:
  255. /* finally read the right extended block */
  256. //unlock cache
  257. bh = affs_bread(sb, ext_key);
  258. if (!bh)
  259. goto err_bread;
  260. //lock cache
  261. store_ext:
  262. /* release old cached extended block and store the new one */
  263. affs_brelse(AFFS_I(inode)->i_ext_bh);
  264. AFFS_I(inode)->i_ext_last = ext;
  265. AFFS_I(inode)->i_ext_bh = bh;
  266. get_bh(bh);
  267. return bh;
  268. err_bread:
  269. affs_brelse(bh);
  270. return ERR_PTR(-EIO);
  271. }
  272. static int
  273. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  274. {
  275. struct super_block *sb = inode->i_sb;
  276. struct buffer_head *ext_bh;
  277. u32 ext;
  278. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  279. BUG_ON(block > (sector_t)0x7fffffffUL);
  280. if (block >= AFFS_I(inode)->i_blkcnt) {
  281. if (block > AFFS_I(inode)->i_blkcnt || !create)
  282. goto err_big;
  283. } else
  284. create = 0;
  285. //lock cache
  286. affs_lock_ext(inode);
  287. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  288. block -= ext * AFFS_SB(sb)->s_hashsize;
  289. ext_bh = affs_get_extblock(inode, ext);
  290. if (IS_ERR(ext_bh))
  291. goto err_ext;
  292. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  293. if (create) {
  294. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  295. if (!blocknr)
  296. goto err_alloc;
  297. set_buffer_new(bh_result);
  298. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  299. AFFS_I(inode)->i_blkcnt++;
  300. /* store new block */
  301. if (bh_result->b_blocknr)
  302. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  303. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  304. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  305. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  306. bh_result->b_blocknr = blocknr;
  307. if (!block) {
  308. /* insert first block into header block */
  309. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  310. if (tmp)
  311. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  312. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  313. affs_adjust_checksum(ext_bh, blocknr - tmp);
  314. }
  315. }
  316. affs_brelse(ext_bh);
  317. //unlock cache
  318. affs_unlock_ext(inode);
  319. return 0;
  320. err_big:
  321. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  322. return -EIO;
  323. err_ext:
  324. // unlock cache
  325. affs_unlock_ext(inode);
  326. return PTR_ERR(ext_bh);
  327. err_alloc:
  328. brelse(ext_bh);
  329. clear_buffer_mapped(bh_result);
  330. bh_result->b_bdev = NULL;
  331. // unlock cache
  332. affs_unlock_ext(inode);
  333. return -ENOSPC;
  334. }
  335. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  336. {
  337. return block_write_full_page(page, affs_get_block, wbc);
  338. }
  339. static int affs_readpage(struct file *file, struct page *page)
  340. {
  341. return block_read_full_page(page, affs_get_block);
  342. }
  343. static int affs_write_begin(struct file *file, struct address_space *mapping,
  344. loff_t pos, unsigned len, unsigned flags,
  345. struct page **pagep, void **fsdata)
  346. {
  347. int ret;
  348. *pagep = NULL;
  349. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  350. affs_get_block,
  351. &AFFS_I(mapping->host)->mmu_private);
  352. if (unlikely(ret)) {
  353. loff_t isize = mapping->host->i_size;
  354. if (pos + len > isize)
  355. vmtruncate(mapping->host, isize);
  356. }
  357. return ret;
  358. }
  359. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  360. {
  361. return generic_block_bmap(mapping,block,affs_get_block);
  362. }
  363. const struct address_space_operations affs_aops = {
  364. .readpage = affs_readpage,
  365. .writepage = affs_writepage,
  366. .sync_page = block_sync_page,
  367. .write_begin = affs_write_begin,
  368. .write_end = generic_write_end,
  369. .bmap = _affs_bmap
  370. };
  371. static inline struct buffer_head *
  372. affs_bread_ino(struct inode *inode, int block, int create)
  373. {
  374. struct buffer_head *bh, tmp_bh;
  375. int err;
  376. tmp_bh.b_state = 0;
  377. err = affs_get_block(inode, block, &tmp_bh, create);
  378. if (!err) {
  379. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  380. if (bh) {
  381. bh->b_state |= tmp_bh.b_state;
  382. return bh;
  383. }
  384. err = -EIO;
  385. }
  386. return ERR_PTR(err);
  387. }
  388. static inline struct buffer_head *
  389. affs_getzeroblk_ino(struct inode *inode, int block)
  390. {
  391. struct buffer_head *bh, tmp_bh;
  392. int err;
  393. tmp_bh.b_state = 0;
  394. err = affs_get_block(inode, block, &tmp_bh, 1);
  395. if (!err) {
  396. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  397. if (bh) {
  398. bh->b_state |= tmp_bh.b_state;
  399. return bh;
  400. }
  401. err = -EIO;
  402. }
  403. return ERR_PTR(err);
  404. }
  405. static inline struct buffer_head *
  406. affs_getemptyblk_ino(struct inode *inode, int block)
  407. {
  408. struct buffer_head *bh, tmp_bh;
  409. int err;
  410. tmp_bh.b_state = 0;
  411. err = affs_get_block(inode, block, &tmp_bh, 1);
  412. if (!err) {
  413. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  414. if (bh) {
  415. bh->b_state |= tmp_bh.b_state;
  416. return bh;
  417. }
  418. err = -EIO;
  419. }
  420. return ERR_PTR(err);
  421. }
  422. static int
  423. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  424. {
  425. struct inode *inode = page->mapping->host;
  426. struct super_block *sb = inode->i_sb;
  427. struct buffer_head *bh;
  428. char *data;
  429. u32 bidx, boff, bsize;
  430. u32 tmp;
  431. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  432. BUG_ON(from > to || to > PAGE_CACHE_SIZE);
  433. kmap(page);
  434. data = page_address(page);
  435. bsize = AFFS_SB(sb)->s_data_blksize;
  436. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  437. bidx = tmp / bsize;
  438. boff = tmp % bsize;
  439. while (from < to) {
  440. bh = affs_bread_ino(inode, bidx, 0);
  441. if (IS_ERR(bh))
  442. return PTR_ERR(bh);
  443. tmp = min(bsize - boff, to - from);
  444. BUG_ON(from + tmp > to || tmp > bsize);
  445. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  446. affs_brelse(bh);
  447. bidx++;
  448. from += tmp;
  449. boff = 0;
  450. }
  451. flush_dcache_page(page);
  452. kunmap(page);
  453. return 0;
  454. }
  455. static int
  456. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  457. {
  458. struct super_block *sb = inode->i_sb;
  459. struct buffer_head *bh, *prev_bh;
  460. u32 bidx, boff;
  461. u32 size, bsize;
  462. u32 tmp;
  463. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  464. bsize = AFFS_SB(sb)->s_data_blksize;
  465. bh = NULL;
  466. size = AFFS_I(inode)->mmu_private;
  467. bidx = size / bsize;
  468. boff = size % bsize;
  469. if (boff) {
  470. bh = affs_bread_ino(inode, bidx, 0);
  471. if (IS_ERR(bh))
  472. return PTR_ERR(bh);
  473. tmp = min(bsize - boff, newsize - size);
  474. BUG_ON(boff + tmp > bsize || tmp > bsize);
  475. memset(AFFS_DATA(bh) + boff, 0, tmp);
  476. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  477. affs_fix_checksum(sb, bh);
  478. mark_buffer_dirty_inode(bh, inode);
  479. size += tmp;
  480. bidx++;
  481. } else if (bidx) {
  482. bh = affs_bread_ino(inode, bidx - 1, 0);
  483. if (IS_ERR(bh))
  484. return PTR_ERR(bh);
  485. }
  486. while (size < newsize) {
  487. prev_bh = bh;
  488. bh = affs_getzeroblk_ino(inode, bidx);
  489. if (IS_ERR(bh))
  490. goto out;
  491. tmp = min(bsize, newsize - size);
  492. BUG_ON(tmp > bsize);
  493. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  494. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  495. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  496. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  497. affs_fix_checksum(sb, bh);
  498. bh->b_state &= ~(1UL << BH_New);
  499. mark_buffer_dirty_inode(bh, inode);
  500. if (prev_bh) {
  501. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  502. if (tmp)
  503. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  504. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  505. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  506. mark_buffer_dirty_inode(prev_bh, inode);
  507. affs_brelse(prev_bh);
  508. }
  509. size += bsize;
  510. bidx++;
  511. }
  512. affs_brelse(bh);
  513. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  514. return 0;
  515. out:
  516. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  517. return PTR_ERR(bh);
  518. }
  519. static int
  520. affs_readpage_ofs(struct file *file, struct page *page)
  521. {
  522. struct inode *inode = page->mapping->host;
  523. u32 to;
  524. int err;
  525. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  526. to = PAGE_CACHE_SIZE;
  527. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  528. to = inode->i_size & ~PAGE_CACHE_MASK;
  529. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  530. }
  531. err = affs_do_readpage_ofs(file, page, 0, to);
  532. if (!err)
  533. SetPageUptodate(page);
  534. unlock_page(page);
  535. return err;
  536. }
  537. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  538. loff_t pos, unsigned len, unsigned flags,
  539. struct page **pagep, void **fsdata)
  540. {
  541. struct inode *inode = mapping->host;
  542. struct page *page;
  543. pgoff_t index;
  544. int err = 0;
  545. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  546. if (pos > AFFS_I(inode)->mmu_private) {
  547. /* XXX: this probably leaves a too-big i_size in case of
  548. * failure. Should really be updating i_size at write_end time
  549. */
  550. err = affs_extent_file_ofs(inode, pos);
  551. if (err)
  552. return err;
  553. }
  554. index = pos >> PAGE_CACHE_SHIFT;
  555. page = grab_cache_page_write_begin(mapping, index, flags);
  556. if (!page)
  557. return -ENOMEM;
  558. *pagep = page;
  559. if (PageUptodate(page))
  560. return 0;
  561. /* XXX: inefficient but safe in the face of short writes */
  562. err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE);
  563. if (err) {
  564. unlock_page(page);
  565. page_cache_release(page);
  566. }
  567. return err;
  568. }
  569. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  570. loff_t pos, unsigned len, unsigned copied,
  571. struct page *page, void *fsdata)
  572. {
  573. struct inode *inode = mapping->host;
  574. struct super_block *sb = inode->i_sb;
  575. struct buffer_head *bh, *prev_bh;
  576. char *data;
  577. u32 bidx, boff, bsize;
  578. unsigned from, to;
  579. u32 tmp;
  580. int written;
  581. from = pos & (PAGE_CACHE_SIZE - 1);
  582. to = pos + len;
  583. /*
  584. * XXX: not sure if this can handle short copies (len < copied), but
  585. * we don't have to, because the page should always be uptodate here,
  586. * due to write_begin.
  587. */
  588. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  589. bsize = AFFS_SB(sb)->s_data_blksize;
  590. data = page_address(page);
  591. bh = NULL;
  592. written = 0;
  593. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  594. bidx = tmp / bsize;
  595. boff = tmp % bsize;
  596. if (boff) {
  597. bh = affs_bread_ino(inode, bidx, 0);
  598. if (IS_ERR(bh))
  599. return PTR_ERR(bh);
  600. tmp = min(bsize - boff, to - from);
  601. BUG_ON(boff + tmp > bsize || tmp > bsize);
  602. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  603. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  604. affs_fix_checksum(sb, bh);
  605. mark_buffer_dirty_inode(bh, inode);
  606. written += tmp;
  607. from += tmp;
  608. bidx++;
  609. } else if (bidx) {
  610. bh = affs_bread_ino(inode, bidx - 1, 0);
  611. if (IS_ERR(bh))
  612. return PTR_ERR(bh);
  613. }
  614. while (from + bsize <= to) {
  615. prev_bh = bh;
  616. bh = affs_getemptyblk_ino(inode, bidx);
  617. if (IS_ERR(bh))
  618. goto out;
  619. memcpy(AFFS_DATA(bh), data + from, bsize);
  620. if (buffer_new(bh)) {
  621. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  622. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  623. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  624. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  625. AFFS_DATA_HEAD(bh)->next = 0;
  626. bh->b_state &= ~(1UL << BH_New);
  627. if (prev_bh) {
  628. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  629. if (tmp)
  630. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  631. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  632. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  633. mark_buffer_dirty_inode(prev_bh, inode);
  634. }
  635. }
  636. affs_brelse(prev_bh);
  637. affs_fix_checksum(sb, bh);
  638. mark_buffer_dirty_inode(bh, inode);
  639. written += bsize;
  640. from += bsize;
  641. bidx++;
  642. }
  643. if (from < to) {
  644. prev_bh = bh;
  645. bh = affs_bread_ino(inode, bidx, 1);
  646. if (IS_ERR(bh))
  647. goto out;
  648. tmp = min(bsize, to - from);
  649. BUG_ON(tmp > bsize);
  650. memcpy(AFFS_DATA(bh), data + from, tmp);
  651. if (buffer_new(bh)) {
  652. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  653. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  654. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  655. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  656. AFFS_DATA_HEAD(bh)->next = 0;
  657. bh->b_state &= ~(1UL << BH_New);
  658. if (prev_bh) {
  659. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  660. if (tmp)
  661. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  662. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  663. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  664. mark_buffer_dirty_inode(prev_bh, inode);
  665. }
  666. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  667. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  668. affs_brelse(prev_bh);
  669. affs_fix_checksum(sb, bh);
  670. mark_buffer_dirty_inode(bh, inode);
  671. written += tmp;
  672. from += tmp;
  673. bidx++;
  674. }
  675. SetPageUptodate(page);
  676. done:
  677. affs_brelse(bh);
  678. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  679. if (tmp > inode->i_size)
  680. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  681. unlock_page(page);
  682. page_cache_release(page);
  683. return written;
  684. out:
  685. bh = prev_bh;
  686. if (!written)
  687. written = PTR_ERR(bh);
  688. goto done;
  689. }
  690. const struct address_space_operations affs_aops_ofs = {
  691. .readpage = affs_readpage_ofs,
  692. //.writepage = affs_writepage_ofs,
  693. //.sync_page = affs_sync_page_ofs,
  694. .write_begin = affs_write_begin_ofs,
  695. .write_end = affs_write_end_ofs
  696. };
  697. /* Free any preallocated blocks. */
  698. void
  699. affs_free_prealloc(struct inode *inode)
  700. {
  701. struct super_block *sb = inode->i_sb;
  702. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  703. while (AFFS_I(inode)->i_pa_cnt) {
  704. AFFS_I(inode)->i_pa_cnt--;
  705. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  706. }
  707. }
  708. /* Truncate (or enlarge) a file to the requested size. */
  709. void
  710. affs_truncate(struct inode *inode)
  711. {
  712. struct super_block *sb = inode->i_sb;
  713. u32 ext, ext_key;
  714. u32 last_blk, blkcnt, blk;
  715. u32 size;
  716. struct buffer_head *ext_bh;
  717. int i;
  718. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  719. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  720. last_blk = 0;
  721. ext = 0;
  722. if (inode->i_size) {
  723. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  724. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  725. }
  726. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  727. struct address_space *mapping = inode->i_mapping;
  728. struct page *page;
  729. void *fsdata;
  730. u32 size = inode->i_size;
  731. int res;
  732. res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
  733. if (!res)
  734. res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
  735. else
  736. inode->i_size = AFFS_I(inode)->mmu_private;
  737. mark_inode_dirty(inode);
  738. return;
  739. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  740. return;
  741. // lock cache
  742. ext_bh = affs_get_extblock(inode, ext);
  743. if (IS_ERR(ext_bh)) {
  744. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  745. ext, PTR_ERR(ext_bh));
  746. return;
  747. }
  748. if (AFFS_I(inode)->i_lc) {
  749. /* clear linear cache */
  750. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  751. if (AFFS_I(inode)->i_lc_size > i) {
  752. AFFS_I(inode)->i_lc_size = i;
  753. for (; i < AFFS_LC_SIZE; i++)
  754. AFFS_I(inode)->i_lc[i] = 0;
  755. }
  756. /* clear associative cache */
  757. for (i = 0; i < AFFS_AC_SIZE; i++)
  758. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  759. AFFS_I(inode)->i_ac[i].ext = 0;
  760. }
  761. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  762. blkcnt = AFFS_I(inode)->i_blkcnt;
  763. i = 0;
  764. blk = last_blk;
  765. if (inode->i_size) {
  766. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  767. blk++;
  768. } else
  769. AFFS_HEAD(ext_bh)->first_data = 0;
  770. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  771. size = AFFS_SB(sb)->s_hashsize;
  772. if (size > blkcnt - blk + i)
  773. size = blkcnt - blk + i;
  774. for (; i < size; i++, blk++) {
  775. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  776. AFFS_BLOCK(sb, ext_bh, i) = 0;
  777. }
  778. AFFS_TAIL(sb, ext_bh)->extension = 0;
  779. affs_fix_checksum(sb, ext_bh);
  780. mark_buffer_dirty_inode(ext_bh, inode);
  781. affs_brelse(ext_bh);
  782. if (inode->i_size) {
  783. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  784. AFFS_I(inode)->i_extcnt = ext + 1;
  785. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  786. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  787. u32 tmp;
  788. if (IS_ERR(bh)) {
  789. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  790. ext, PTR_ERR(bh));
  791. return;
  792. }
  793. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  794. AFFS_DATA_HEAD(bh)->next = 0;
  795. affs_adjust_checksum(bh, -tmp);
  796. affs_brelse(bh);
  797. }
  798. } else {
  799. AFFS_I(inode)->i_blkcnt = 0;
  800. AFFS_I(inode)->i_extcnt = 1;
  801. }
  802. AFFS_I(inode)->mmu_private = inode->i_size;
  803. // unlock cache
  804. while (ext_key) {
  805. ext_bh = affs_bread(sb, ext_key);
  806. size = AFFS_SB(sb)->s_hashsize;
  807. if (size > blkcnt - blk)
  808. size = blkcnt - blk;
  809. for (i = 0; i < size; i++, blk++)
  810. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  811. affs_free_block(sb, ext_key);
  812. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  813. affs_brelse(ext_bh);
  814. }
  815. affs_free_prealloc(inode);
  816. }
  817. int affs_file_fsync(struct file *filp, int datasync)
  818. {
  819. struct inode *inode = filp->f_mapping->host;
  820. int ret, err;
  821. ret = write_inode_now(inode, 0);
  822. err = sync_blockdev(inode->i_sb->s_bdev);
  823. if (!ret)
  824. ret = err;
  825. return ret;
  826. }