file.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = do_sync_read,
  27. .aio_read = generic_file_aio_read,
  28. .write = do_sync_write,
  29. .aio_write = generic_file_aio_write,
  30. .mmap = generic_file_mmap,
  31. .open = affs_file_open,
  32. .release = affs_file_release,
  33. .fsync = file_fsync,
  34. .splice_read = generic_file_splice_read,
  35. };
  36. const struct inode_operations affs_file_inode_operations = {
  37. .truncate = affs_truncate,
  38. .setattr = affs_notify_change,
  39. };
  40. static int
  41. affs_file_open(struct inode *inode, struct file *filp)
  42. {
  43. if (atomic_read(&filp->f_count) != 1)
  44. return 0;
  45. pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt);
  46. AFFS_I(inode)->i_opencnt++;
  47. return 0;
  48. }
  49. static int
  50. affs_file_release(struct inode *inode, struct file *filp)
  51. {
  52. if (atomic_read(&filp->f_count) != 0)
  53. return 0;
  54. pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt);
  55. AFFS_I(inode)->i_opencnt--;
  56. if (!AFFS_I(inode)->i_opencnt)
  57. affs_free_prealloc(inode);
  58. return 0;
  59. }
  60. static int
  61. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  62. {
  63. struct super_block *sb = inode->i_sb;
  64. struct buffer_head *bh;
  65. u32 lc_max;
  66. int i, j, key;
  67. if (!AFFS_I(inode)->i_lc) {
  68. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  69. if (!ptr)
  70. return -ENOMEM;
  71. AFFS_I(inode)->i_lc = (u32 *)ptr;
  72. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  73. }
  74. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  75. if (AFFS_I(inode)->i_extcnt > lc_max) {
  76. u32 lc_shift, lc_mask, tmp, off;
  77. /* need to recalculate linear cache, start from old size */
  78. lc_shift = AFFS_I(inode)->i_lc_shift;
  79. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  80. for (; tmp; tmp >>= 1)
  81. lc_shift++;
  82. lc_mask = (1 << lc_shift) - 1;
  83. /* fix idx and old size to new shift */
  84. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  85. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  86. /* first shrink old cache to make more space */
  87. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  88. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  89. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  90. AFFS_I(inode)->i_lc_shift = lc_shift;
  91. AFFS_I(inode)->i_lc_mask = lc_mask;
  92. }
  93. /* fill cache to the needed index */
  94. i = AFFS_I(inode)->i_lc_size;
  95. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  96. for (; i <= lc_idx; i++) {
  97. if (!i) {
  98. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  99. continue;
  100. }
  101. key = AFFS_I(inode)->i_lc[i - 1];
  102. j = AFFS_I(inode)->i_lc_mask + 1;
  103. // unlock cache
  104. for (; j > 0; j--) {
  105. bh = affs_bread(sb, key);
  106. if (!bh)
  107. goto err;
  108. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  109. affs_brelse(bh);
  110. }
  111. // lock cache
  112. AFFS_I(inode)->i_lc[i] = key;
  113. }
  114. return 0;
  115. err:
  116. // lock cache
  117. return -EIO;
  118. }
  119. static struct buffer_head *
  120. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  121. {
  122. struct super_block *sb = inode->i_sb;
  123. struct buffer_head *new_bh;
  124. u32 blocknr, tmp;
  125. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  126. if (!blocknr)
  127. return ERR_PTR(-ENOSPC);
  128. new_bh = affs_getzeroblk(sb, blocknr);
  129. if (!new_bh) {
  130. affs_free_block(sb, blocknr);
  131. return ERR_PTR(-EIO);
  132. }
  133. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  134. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  135. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  136. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  137. affs_fix_checksum(sb, new_bh);
  138. mark_buffer_dirty_inode(new_bh, inode);
  139. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  140. if (tmp)
  141. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  142. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  143. affs_adjust_checksum(bh, blocknr - tmp);
  144. mark_buffer_dirty_inode(bh, inode);
  145. AFFS_I(inode)->i_extcnt++;
  146. mark_inode_dirty(inode);
  147. return new_bh;
  148. }
  149. static inline struct buffer_head *
  150. affs_get_extblock(struct inode *inode, u32 ext)
  151. {
  152. /* inline the simplest case: same extended block as last time */
  153. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  154. if (ext == AFFS_I(inode)->i_ext_last)
  155. atomic_inc(&bh->b_count);
  156. else
  157. /* we have to do more (not inlined) */
  158. bh = affs_get_extblock_slow(inode, ext);
  159. return bh;
  160. }
  161. static struct buffer_head *
  162. affs_get_extblock_slow(struct inode *inode, u32 ext)
  163. {
  164. struct super_block *sb = inode->i_sb;
  165. struct buffer_head *bh;
  166. u32 ext_key;
  167. u32 lc_idx, lc_off, ac_idx;
  168. u32 tmp, idx;
  169. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  170. /* read the next extended block from the current one */
  171. bh = AFFS_I(inode)->i_ext_bh;
  172. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  173. if (ext < AFFS_I(inode)->i_extcnt)
  174. goto read_ext;
  175. if (ext > AFFS_I(inode)->i_extcnt)
  176. BUG();
  177. bh = affs_alloc_extblock(inode, bh, ext);
  178. if (IS_ERR(bh))
  179. return bh;
  180. goto store_ext;
  181. }
  182. if (ext == 0) {
  183. /* we seek back to the file header block */
  184. ext_key = inode->i_ino;
  185. goto read_ext;
  186. }
  187. if (ext >= AFFS_I(inode)->i_extcnt) {
  188. struct buffer_head *prev_bh;
  189. /* allocate a new extended block */
  190. if (ext > AFFS_I(inode)->i_extcnt)
  191. BUG();
  192. /* get previous extended block */
  193. prev_bh = affs_get_extblock(inode, ext - 1);
  194. if (IS_ERR(prev_bh))
  195. return prev_bh;
  196. bh = affs_alloc_extblock(inode, prev_bh, ext);
  197. affs_brelse(prev_bh);
  198. if (IS_ERR(bh))
  199. return bh;
  200. goto store_ext;
  201. }
  202. again:
  203. /* check if there is an extended cache and whether it's large enough */
  204. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  205. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  206. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  207. int err;
  208. err = affs_grow_extcache(inode, lc_idx);
  209. if (err)
  210. return ERR_PTR(err);
  211. goto again;
  212. }
  213. /* every n'th key we find in the linear cache */
  214. if (!lc_off) {
  215. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  216. goto read_ext;
  217. }
  218. /* maybe it's still in the associative cache */
  219. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  220. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  221. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  222. goto read_ext;
  223. }
  224. /* try to find one of the previous extended blocks */
  225. tmp = ext;
  226. idx = ac_idx;
  227. while (--tmp, --lc_off > 0) {
  228. idx = (idx - 1) & AFFS_AC_MASK;
  229. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  230. ext_key = AFFS_I(inode)->i_ac[idx].key;
  231. goto find_ext;
  232. }
  233. }
  234. /* fall back to the linear cache */
  235. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  236. find_ext:
  237. /* read all extended blocks until we find the one we need */
  238. //unlock cache
  239. do {
  240. bh = affs_bread(sb, ext_key);
  241. if (!bh)
  242. goto err_bread;
  243. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  244. affs_brelse(bh);
  245. tmp++;
  246. } while (tmp < ext);
  247. //lock cache
  248. /* store it in the associative cache */
  249. // recalculate ac_idx?
  250. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  251. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  252. read_ext:
  253. /* finally read the right extended block */
  254. //unlock cache
  255. bh = affs_bread(sb, ext_key);
  256. if (!bh)
  257. goto err_bread;
  258. //lock cache
  259. store_ext:
  260. /* release old cached extended block and store the new one */
  261. affs_brelse(AFFS_I(inode)->i_ext_bh);
  262. AFFS_I(inode)->i_ext_last = ext;
  263. AFFS_I(inode)->i_ext_bh = bh;
  264. atomic_inc(&bh->b_count);
  265. return bh;
  266. err_bread:
  267. affs_brelse(bh);
  268. return ERR_PTR(-EIO);
  269. }
  270. static int
  271. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  272. {
  273. struct super_block *sb = inode->i_sb;
  274. struct buffer_head *ext_bh;
  275. u32 ext;
  276. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  277. BUG_ON(block > (sector_t)0x7fffffffUL);
  278. if (block >= AFFS_I(inode)->i_blkcnt) {
  279. if (block > AFFS_I(inode)->i_blkcnt || !create)
  280. goto err_big;
  281. } else
  282. create = 0;
  283. //lock cache
  284. affs_lock_ext(inode);
  285. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  286. block -= ext * AFFS_SB(sb)->s_hashsize;
  287. ext_bh = affs_get_extblock(inode, ext);
  288. if (IS_ERR(ext_bh))
  289. goto err_ext;
  290. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  291. if (create) {
  292. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  293. if (!blocknr)
  294. goto err_alloc;
  295. set_buffer_new(bh_result);
  296. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  297. AFFS_I(inode)->i_blkcnt++;
  298. /* store new block */
  299. if (bh_result->b_blocknr)
  300. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  301. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  302. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  303. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  304. bh_result->b_blocknr = blocknr;
  305. if (!block) {
  306. /* insert first block into header block */
  307. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  308. if (tmp)
  309. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  310. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  311. affs_adjust_checksum(ext_bh, blocknr - tmp);
  312. }
  313. }
  314. affs_brelse(ext_bh);
  315. //unlock cache
  316. affs_unlock_ext(inode);
  317. return 0;
  318. err_big:
  319. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  320. return -EIO;
  321. err_ext:
  322. // unlock cache
  323. affs_unlock_ext(inode);
  324. return PTR_ERR(ext_bh);
  325. err_alloc:
  326. brelse(ext_bh);
  327. clear_buffer_mapped(bh_result);
  328. bh_result->b_bdev = NULL;
  329. // unlock cache
  330. affs_unlock_ext(inode);
  331. return -ENOSPC;
  332. }
  333. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  334. {
  335. return block_write_full_page(page, affs_get_block, wbc);
  336. }
  337. static int affs_readpage(struct file *file, struct page *page)
  338. {
  339. return block_read_full_page(page, affs_get_block);
  340. }
  341. static int affs_write_begin(struct file *file, struct address_space *mapping,
  342. loff_t pos, unsigned len, unsigned flags,
  343. struct page **pagep, void **fsdata)
  344. {
  345. *pagep = NULL;
  346. return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  347. affs_get_block,
  348. &AFFS_I(mapping->host)->mmu_private);
  349. }
  350. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  351. {
  352. return generic_block_bmap(mapping,block,affs_get_block);
  353. }
  354. const struct address_space_operations affs_aops = {
  355. .readpage = affs_readpage,
  356. .writepage = affs_writepage,
  357. .sync_page = block_sync_page,
  358. .write_begin = affs_write_begin,
  359. .write_end = generic_write_end,
  360. .bmap = _affs_bmap
  361. };
  362. static inline struct buffer_head *
  363. affs_bread_ino(struct inode *inode, int block, int create)
  364. {
  365. struct buffer_head *bh, tmp_bh;
  366. int err;
  367. tmp_bh.b_state = 0;
  368. err = affs_get_block(inode, block, &tmp_bh, create);
  369. if (!err) {
  370. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  371. if (bh) {
  372. bh->b_state |= tmp_bh.b_state;
  373. return bh;
  374. }
  375. err = -EIO;
  376. }
  377. return ERR_PTR(err);
  378. }
  379. static inline struct buffer_head *
  380. affs_getzeroblk_ino(struct inode *inode, int block)
  381. {
  382. struct buffer_head *bh, tmp_bh;
  383. int err;
  384. tmp_bh.b_state = 0;
  385. err = affs_get_block(inode, block, &tmp_bh, 1);
  386. if (!err) {
  387. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  388. if (bh) {
  389. bh->b_state |= tmp_bh.b_state;
  390. return bh;
  391. }
  392. err = -EIO;
  393. }
  394. return ERR_PTR(err);
  395. }
  396. static inline struct buffer_head *
  397. affs_getemptyblk_ino(struct inode *inode, int block)
  398. {
  399. struct buffer_head *bh, tmp_bh;
  400. int err;
  401. tmp_bh.b_state = 0;
  402. err = affs_get_block(inode, block, &tmp_bh, 1);
  403. if (!err) {
  404. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  405. if (bh) {
  406. bh->b_state |= tmp_bh.b_state;
  407. return bh;
  408. }
  409. err = -EIO;
  410. }
  411. return ERR_PTR(err);
  412. }
  413. static int
  414. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  415. {
  416. struct inode *inode = page->mapping->host;
  417. struct super_block *sb = inode->i_sb;
  418. struct buffer_head *bh;
  419. char *data;
  420. u32 bidx, boff, bsize;
  421. u32 tmp;
  422. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  423. BUG_ON(from > to || to > PAGE_CACHE_SIZE);
  424. kmap(page);
  425. data = page_address(page);
  426. bsize = AFFS_SB(sb)->s_data_blksize;
  427. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  428. bidx = tmp / bsize;
  429. boff = tmp % bsize;
  430. while (from < to) {
  431. bh = affs_bread_ino(inode, bidx, 0);
  432. if (IS_ERR(bh))
  433. return PTR_ERR(bh);
  434. tmp = min(bsize - boff, to - from);
  435. BUG_ON(from + tmp > to || tmp > bsize);
  436. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  437. affs_brelse(bh);
  438. bidx++;
  439. from += tmp;
  440. boff = 0;
  441. }
  442. flush_dcache_page(page);
  443. kunmap(page);
  444. return 0;
  445. }
  446. static int
  447. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  448. {
  449. struct super_block *sb = inode->i_sb;
  450. struct buffer_head *bh, *prev_bh;
  451. u32 bidx, boff;
  452. u32 size, bsize;
  453. u32 tmp;
  454. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  455. bsize = AFFS_SB(sb)->s_data_blksize;
  456. bh = NULL;
  457. size = AFFS_I(inode)->mmu_private;
  458. bidx = size / bsize;
  459. boff = size % bsize;
  460. if (boff) {
  461. bh = affs_bread_ino(inode, bidx, 0);
  462. if (IS_ERR(bh))
  463. return PTR_ERR(bh);
  464. tmp = min(bsize - boff, newsize - size);
  465. BUG_ON(boff + tmp > bsize || tmp > bsize);
  466. memset(AFFS_DATA(bh) + boff, 0, tmp);
  467. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  468. affs_fix_checksum(sb, bh);
  469. mark_buffer_dirty_inode(bh, inode);
  470. size += tmp;
  471. bidx++;
  472. } else if (bidx) {
  473. bh = affs_bread_ino(inode, bidx - 1, 0);
  474. if (IS_ERR(bh))
  475. return PTR_ERR(bh);
  476. }
  477. while (size < newsize) {
  478. prev_bh = bh;
  479. bh = affs_getzeroblk_ino(inode, bidx);
  480. if (IS_ERR(bh))
  481. goto out;
  482. tmp = min(bsize, newsize - size);
  483. BUG_ON(tmp > bsize);
  484. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  485. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  486. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  487. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  488. affs_fix_checksum(sb, bh);
  489. bh->b_state &= ~(1UL << BH_New);
  490. mark_buffer_dirty_inode(bh, inode);
  491. if (prev_bh) {
  492. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  493. if (tmp)
  494. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  495. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  496. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  497. mark_buffer_dirty_inode(prev_bh, inode);
  498. affs_brelse(prev_bh);
  499. }
  500. size += bsize;
  501. bidx++;
  502. }
  503. affs_brelse(bh);
  504. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  505. return 0;
  506. out:
  507. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  508. return PTR_ERR(bh);
  509. }
  510. static int
  511. affs_readpage_ofs(struct file *file, struct page *page)
  512. {
  513. struct inode *inode = page->mapping->host;
  514. u32 to;
  515. int err;
  516. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  517. to = PAGE_CACHE_SIZE;
  518. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  519. to = inode->i_size & ~PAGE_CACHE_MASK;
  520. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  521. }
  522. err = affs_do_readpage_ofs(file, page, 0, to);
  523. if (!err)
  524. SetPageUptodate(page);
  525. unlock_page(page);
  526. return err;
  527. }
  528. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  529. loff_t pos, unsigned len, unsigned flags,
  530. struct page **pagep, void **fsdata)
  531. {
  532. struct inode *inode = mapping->host;
  533. struct page *page;
  534. pgoff_t index;
  535. int err = 0;
  536. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  537. if (pos > AFFS_I(inode)->mmu_private) {
  538. /* XXX: this probably leaves a too-big i_size in case of
  539. * failure. Should really be updating i_size at write_end time
  540. */
  541. err = affs_extent_file_ofs(inode, pos);
  542. if (err)
  543. return err;
  544. }
  545. index = pos >> PAGE_CACHE_SHIFT;
  546. page = __grab_cache_page(mapping, index);
  547. if (!page)
  548. return -ENOMEM;
  549. *pagep = page;
  550. if (PageUptodate(page))
  551. return 0;
  552. /* XXX: inefficient but safe in the face of short writes */
  553. err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE);
  554. if (err) {
  555. unlock_page(page);
  556. page_cache_release(page);
  557. }
  558. return err;
  559. }
  560. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  561. loff_t pos, unsigned len, unsigned copied,
  562. struct page *page, void *fsdata)
  563. {
  564. struct inode *inode = mapping->host;
  565. struct super_block *sb = inode->i_sb;
  566. struct buffer_head *bh, *prev_bh;
  567. char *data;
  568. u32 bidx, boff, bsize;
  569. unsigned from, to;
  570. u32 tmp;
  571. int written;
  572. from = pos & (PAGE_CACHE_SIZE - 1);
  573. to = pos + len;
  574. /*
  575. * XXX: not sure if this can handle short copies (len < copied), but
  576. * we don't have to, because the page should always be uptodate here,
  577. * due to write_begin.
  578. */
  579. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  580. bsize = AFFS_SB(sb)->s_data_blksize;
  581. data = page_address(page);
  582. bh = NULL;
  583. written = 0;
  584. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  585. bidx = tmp / bsize;
  586. boff = tmp % bsize;
  587. if (boff) {
  588. bh = affs_bread_ino(inode, bidx, 0);
  589. if (IS_ERR(bh))
  590. return PTR_ERR(bh);
  591. tmp = min(bsize - boff, to - from);
  592. BUG_ON(boff + tmp > bsize || tmp > bsize);
  593. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  594. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  595. affs_fix_checksum(sb, bh);
  596. mark_buffer_dirty_inode(bh, inode);
  597. written += tmp;
  598. from += tmp;
  599. bidx++;
  600. } else if (bidx) {
  601. bh = affs_bread_ino(inode, bidx - 1, 0);
  602. if (IS_ERR(bh))
  603. return PTR_ERR(bh);
  604. }
  605. while (from + bsize <= to) {
  606. prev_bh = bh;
  607. bh = affs_getemptyblk_ino(inode, bidx);
  608. if (IS_ERR(bh))
  609. goto out;
  610. memcpy(AFFS_DATA(bh), data + from, bsize);
  611. if (buffer_new(bh)) {
  612. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  613. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  614. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  615. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  616. AFFS_DATA_HEAD(bh)->next = 0;
  617. bh->b_state &= ~(1UL << BH_New);
  618. if (prev_bh) {
  619. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  620. if (tmp)
  621. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  622. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  623. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  624. mark_buffer_dirty_inode(prev_bh, inode);
  625. }
  626. }
  627. affs_brelse(prev_bh);
  628. affs_fix_checksum(sb, bh);
  629. mark_buffer_dirty_inode(bh, inode);
  630. written += bsize;
  631. from += bsize;
  632. bidx++;
  633. }
  634. if (from < to) {
  635. prev_bh = bh;
  636. bh = affs_bread_ino(inode, bidx, 1);
  637. if (IS_ERR(bh))
  638. goto out;
  639. tmp = min(bsize, to - from);
  640. BUG_ON(tmp > bsize);
  641. memcpy(AFFS_DATA(bh), data + from, tmp);
  642. if (buffer_new(bh)) {
  643. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  644. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  645. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  646. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  647. AFFS_DATA_HEAD(bh)->next = 0;
  648. bh->b_state &= ~(1UL << BH_New);
  649. if (prev_bh) {
  650. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  651. if (tmp)
  652. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  653. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  654. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  655. mark_buffer_dirty_inode(prev_bh, inode);
  656. }
  657. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  658. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  659. affs_brelse(prev_bh);
  660. affs_fix_checksum(sb, bh);
  661. mark_buffer_dirty_inode(bh, inode);
  662. written += tmp;
  663. from += tmp;
  664. bidx++;
  665. }
  666. SetPageUptodate(page);
  667. done:
  668. affs_brelse(bh);
  669. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  670. if (tmp > inode->i_size)
  671. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  672. unlock_page(page);
  673. page_cache_release(page);
  674. return written;
  675. out:
  676. bh = prev_bh;
  677. if (!written)
  678. written = PTR_ERR(bh);
  679. goto done;
  680. }
  681. const struct address_space_operations affs_aops_ofs = {
  682. .readpage = affs_readpage_ofs,
  683. //.writepage = affs_writepage_ofs,
  684. //.sync_page = affs_sync_page_ofs,
  685. .write_begin = affs_write_begin_ofs,
  686. .write_end = affs_write_end_ofs
  687. };
  688. /* Free any preallocated blocks. */
  689. void
  690. affs_free_prealloc(struct inode *inode)
  691. {
  692. struct super_block *sb = inode->i_sb;
  693. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  694. while (AFFS_I(inode)->i_pa_cnt) {
  695. AFFS_I(inode)->i_pa_cnt--;
  696. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  697. }
  698. }
  699. /* Truncate (or enlarge) a file to the requested size. */
  700. void
  701. affs_truncate(struct inode *inode)
  702. {
  703. struct super_block *sb = inode->i_sb;
  704. u32 ext, ext_key;
  705. u32 last_blk, blkcnt, blk;
  706. u32 size;
  707. struct buffer_head *ext_bh;
  708. int i;
  709. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  710. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  711. last_blk = 0;
  712. ext = 0;
  713. if (inode->i_size) {
  714. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  715. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  716. }
  717. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  718. struct address_space *mapping = inode->i_mapping;
  719. struct page *page;
  720. void *fsdata;
  721. u32 size = inode->i_size;
  722. int res;
  723. res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
  724. if (!res)
  725. res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
  726. mark_inode_dirty(inode);
  727. return;
  728. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  729. return;
  730. // lock cache
  731. ext_bh = affs_get_extblock(inode, ext);
  732. if (IS_ERR(ext_bh)) {
  733. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  734. ext, PTR_ERR(ext_bh));
  735. return;
  736. }
  737. if (AFFS_I(inode)->i_lc) {
  738. /* clear linear cache */
  739. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  740. if (AFFS_I(inode)->i_lc_size > i) {
  741. AFFS_I(inode)->i_lc_size = i;
  742. for (; i < AFFS_LC_SIZE; i++)
  743. AFFS_I(inode)->i_lc[i] = 0;
  744. }
  745. /* clear associative cache */
  746. for (i = 0; i < AFFS_AC_SIZE; i++)
  747. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  748. AFFS_I(inode)->i_ac[i].ext = 0;
  749. }
  750. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  751. blkcnt = AFFS_I(inode)->i_blkcnt;
  752. i = 0;
  753. blk = last_blk;
  754. if (inode->i_size) {
  755. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  756. blk++;
  757. } else
  758. AFFS_HEAD(ext_bh)->first_data = 0;
  759. size = AFFS_SB(sb)->s_hashsize;
  760. if (size > blkcnt - blk + i)
  761. size = blkcnt - blk + i;
  762. for (; i < size; i++, blk++) {
  763. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  764. AFFS_BLOCK(sb, ext_bh, i) = 0;
  765. }
  766. AFFS_TAIL(sb, ext_bh)->extension = 0;
  767. affs_fix_checksum(sb, ext_bh);
  768. mark_buffer_dirty_inode(ext_bh, inode);
  769. affs_brelse(ext_bh);
  770. if (inode->i_size) {
  771. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  772. AFFS_I(inode)->i_extcnt = ext + 1;
  773. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  774. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  775. u32 tmp;
  776. if (IS_ERR(ext_bh)) {
  777. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  778. ext, PTR_ERR(ext_bh));
  779. return;
  780. }
  781. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  782. AFFS_DATA_HEAD(bh)->next = 0;
  783. affs_adjust_checksum(bh, -tmp);
  784. affs_brelse(bh);
  785. }
  786. } else {
  787. AFFS_I(inode)->i_blkcnt = 0;
  788. AFFS_I(inode)->i_extcnt = 1;
  789. }
  790. AFFS_I(inode)->mmu_private = inode->i_size;
  791. // unlock cache
  792. while (ext_key) {
  793. ext_bh = affs_bread(sb, ext_key);
  794. size = AFFS_SB(sb)->s_hashsize;
  795. if (size > blkcnt - blk)
  796. size = blkcnt - blk;
  797. for (i = 0; i < size; i++, blk++)
  798. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  799. affs_free_block(sb, ext_key);
  800. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  801. affs_brelse(ext_bh);
  802. }
  803. affs_free_prealloc(inode);
  804. }