file.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = generic_file_read,
  27. .write = generic_file_write,
  28. .mmap = generic_file_mmap,
  29. .open = affs_file_open,
  30. .release = affs_file_release,
  31. .fsync = file_fsync,
  32. .sendfile = generic_file_sendfile,
  33. };
  34. struct inode_operations affs_file_inode_operations = {
  35. .truncate = affs_truncate,
  36. .setattr = affs_notify_change,
  37. };
  38. static int
  39. affs_file_open(struct inode *inode, struct file *filp)
  40. {
  41. if (atomic_read(&filp->f_count) != 1)
  42. return 0;
  43. pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt);
  44. AFFS_I(inode)->i_opencnt++;
  45. return 0;
  46. }
  47. static int
  48. affs_file_release(struct inode *inode, struct file *filp)
  49. {
  50. if (atomic_read(&filp->f_count) != 0)
  51. return 0;
  52. pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt);
  53. AFFS_I(inode)->i_opencnt--;
  54. if (!AFFS_I(inode)->i_opencnt)
  55. affs_free_prealloc(inode);
  56. return 0;
  57. }
  58. static int
  59. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  60. {
  61. struct super_block *sb = inode->i_sb;
  62. struct buffer_head *bh;
  63. u32 lc_max;
  64. int i, j, key;
  65. if (!AFFS_I(inode)->i_lc) {
  66. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  67. if (!ptr)
  68. return -ENOMEM;
  69. AFFS_I(inode)->i_lc = (u32 *)ptr;
  70. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  71. }
  72. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  73. if (AFFS_I(inode)->i_extcnt > lc_max) {
  74. u32 lc_shift, lc_mask, tmp, off;
  75. /* need to recalculate linear cache, start from old size */
  76. lc_shift = AFFS_I(inode)->i_lc_shift;
  77. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  78. for (; tmp; tmp >>= 1)
  79. lc_shift++;
  80. lc_mask = (1 << lc_shift) - 1;
  81. /* fix idx and old size to new shift */
  82. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  83. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  84. /* first shrink old cache to make more space */
  85. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  86. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  87. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  88. AFFS_I(inode)->i_lc_shift = lc_shift;
  89. AFFS_I(inode)->i_lc_mask = lc_mask;
  90. }
  91. /* fill cache to the needed index */
  92. i = AFFS_I(inode)->i_lc_size;
  93. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  94. for (; i <= lc_idx; i++) {
  95. if (!i) {
  96. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  97. continue;
  98. }
  99. key = AFFS_I(inode)->i_lc[i - 1];
  100. j = AFFS_I(inode)->i_lc_mask + 1;
  101. // unlock cache
  102. for (; j > 0; j--) {
  103. bh = affs_bread(sb, key);
  104. if (!bh)
  105. goto err;
  106. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  107. affs_brelse(bh);
  108. }
  109. // lock cache
  110. AFFS_I(inode)->i_lc[i] = key;
  111. }
  112. return 0;
  113. err:
  114. // lock cache
  115. return -EIO;
  116. }
  117. static struct buffer_head *
  118. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  119. {
  120. struct super_block *sb = inode->i_sb;
  121. struct buffer_head *new_bh;
  122. u32 blocknr, tmp;
  123. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  124. if (!blocknr)
  125. return ERR_PTR(-ENOSPC);
  126. new_bh = affs_getzeroblk(sb, blocknr);
  127. if (!new_bh) {
  128. affs_free_block(sb, blocknr);
  129. return ERR_PTR(-EIO);
  130. }
  131. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  132. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  133. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  134. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  135. affs_fix_checksum(sb, new_bh);
  136. mark_buffer_dirty_inode(new_bh, inode);
  137. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  138. if (tmp)
  139. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  140. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  141. affs_adjust_checksum(bh, blocknr - tmp);
  142. mark_buffer_dirty_inode(bh, inode);
  143. AFFS_I(inode)->i_extcnt++;
  144. mark_inode_dirty(inode);
  145. return new_bh;
  146. }
  147. static inline struct buffer_head *
  148. affs_get_extblock(struct inode *inode, u32 ext)
  149. {
  150. /* inline the simplest case: same extended block as last time */
  151. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  152. if (ext == AFFS_I(inode)->i_ext_last)
  153. atomic_inc(&bh->b_count);
  154. else
  155. /* we have to do more (not inlined) */
  156. bh = affs_get_extblock_slow(inode, ext);
  157. return bh;
  158. }
  159. static struct buffer_head *
  160. affs_get_extblock_slow(struct inode *inode, u32 ext)
  161. {
  162. struct super_block *sb = inode->i_sb;
  163. struct buffer_head *bh;
  164. u32 ext_key;
  165. u32 lc_idx, lc_off, ac_idx;
  166. u32 tmp, idx;
  167. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  168. /* read the next extended block from the current one */
  169. bh = AFFS_I(inode)->i_ext_bh;
  170. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  171. if (ext < AFFS_I(inode)->i_extcnt)
  172. goto read_ext;
  173. if (ext > AFFS_I(inode)->i_extcnt)
  174. BUG();
  175. bh = affs_alloc_extblock(inode, bh, ext);
  176. if (IS_ERR(bh))
  177. return bh;
  178. goto store_ext;
  179. }
  180. if (ext == 0) {
  181. /* we seek back to the file header block */
  182. ext_key = inode->i_ino;
  183. goto read_ext;
  184. }
  185. if (ext >= AFFS_I(inode)->i_extcnt) {
  186. struct buffer_head *prev_bh;
  187. /* allocate a new extended block */
  188. if (ext > AFFS_I(inode)->i_extcnt)
  189. BUG();
  190. /* get previous extended block */
  191. prev_bh = affs_get_extblock(inode, ext - 1);
  192. if (IS_ERR(prev_bh))
  193. return prev_bh;
  194. bh = affs_alloc_extblock(inode, prev_bh, ext);
  195. affs_brelse(prev_bh);
  196. if (IS_ERR(bh))
  197. return bh;
  198. goto store_ext;
  199. }
  200. again:
  201. /* check if there is an extended cache and whether it's large enough */
  202. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  203. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  204. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  205. int err;
  206. err = affs_grow_extcache(inode, lc_idx);
  207. if (err)
  208. return ERR_PTR(err);
  209. goto again;
  210. }
  211. /* every n'th key we find in the linear cache */
  212. if (!lc_off) {
  213. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  214. goto read_ext;
  215. }
  216. /* maybe it's still in the associative cache */
  217. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  218. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  219. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  220. goto read_ext;
  221. }
  222. /* try to find one of the previous extended blocks */
  223. tmp = ext;
  224. idx = ac_idx;
  225. while (--tmp, --lc_off > 0) {
  226. idx = (idx - 1) & AFFS_AC_MASK;
  227. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  228. ext_key = AFFS_I(inode)->i_ac[idx].key;
  229. goto find_ext;
  230. }
  231. }
  232. /* fall back to the linear cache */
  233. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  234. find_ext:
  235. /* read all extended blocks until we find the one we need */
  236. //unlock cache
  237. do {
  238. bh = affs_bread(sb, ext_key);
  239. if (!bh)
  240. goto err_bread;
  241. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  242. affs_brelse(bh);
  243. tmp++;
  244. } while (tmp < ext);
  245. //lock cache
  246. /* store it in the associative cache */
  247. // recalculate ac_idx?
  248. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  249. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  250. read_ext:
  251. /* finally read the right extended block */
  252. //unlock cache
  253. bh = affs_bread(sb, ext_key);
  254. if (!bh)
  255. goto err_bread;
  256. //lock cache
  257. store_ext:
  258. /* release old cached extended block and store the new one */
  259. affs_brelse(AFFS_I(inode)->i_ext_bh);
  260. AFFS_I(inode)->i_ext_last = ext;
  261. AFFS_I(inode)->i_ext_bh = bh;
  262. atomic_inc(&bh->b_count);
  263. return bh;
  264. err_bread:
  265. affs_brelse(bh);
  266. return ERR_PTR(-EIO);
  267. }
  268. static int
  269. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  270. {
  271. struct super_block *sb = inode->i_sb;
  272. struct buffer_head *ext_bh;
  273. u32 ext;
  274. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  275. if (block > (sector_t)0x7fffffffUL)
  276. BUG();
  277. if (block >= AFFS_I(inode)->i_blkcnt) {
  278. if (block > AFFS_I(inode)->i_blkcnt || !create)
  279. goto err_big;
  280. } else
  281. create = 0;
  282. //lock cache
  283. affs_lock_ext(inode);
  284. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  285. block -= ext * AFFS_SB(sb)->s_hashsize;
  286. ext_bh = affs_get_extblock(inode, ext);
  287. if (IS_ERR(ext_bh))
  288. goto err_ext;
  289. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  290. if (create) {
  291. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  292. if (!blocknr)
  293. goto err_alloc;
  294. set_buffer_new(bh_result);
  295. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  296. AFFS_I(inode)->i_blkcnt++;
  297. /* store new block */
  298. if (bh_result->b_blocknr)
  299. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  300. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  301. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  302. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  303. bh_result->b_blocknr = blocknr;
  304. if (!block) {
  305. /* insert first block into header block */
  306. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  307. if (tmp)
  308. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  309. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  310. affs_adjust_checksum(ext_bh, blocknr - tmp);
  311. }
  312. }
  313. affs_brelse(ext_bh);
  314. //unlock cache
  315. affs_unlock_ext(inode);
  316. return 0;
  317. err_big:
  318. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  319. return -EIO;
  320. err_ext:
  321. // unlock cache
  322. affs_unlock_ext(inode);
  323. return PTR_ERR(ext_bh);
  324. err_alloc:
  325. brelse(ext_bh);
  326. clear_buffer_mapped(bh_result);
  327. bh_result->b_bdev = NULL;
  328. // unlock cache
  329. affs_unlock_ext(inode);
  330. return -ENOSPC;
  331. }
  332. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  333. {
  334. return block_write_full_page(page, affs_get_block, wbc);
  335. }
  336. static int affs_readpage(struct file *file, struct page *page)
  337. {
  338. return block_read_full_page(page, affs_get_block);
  339. }
  340. static int affs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
  341. {
  342. return cont_prepare_write(page, from, to, affs_get_block,
  343. &AFFS_I(page->mapping->host)->mmu_private);
  344. }
  345. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  346. {
  347. return generic_block_bmap(mapping,block,affs_get_block);
  348. }
  349. const struct address_space_operations affs_aops = {
  350. .readpage = affs_readpage,
  351. .writepage = affs_writepage,
  352. .sync_page = block_sync_page,
  353. .prepare_write = affs_prepare_write,
  354. .commit_write = generic_commit_write,
  355. .bmap = _affs_bmap
  356. };
  357. static inline struct buffer_head *
  358. affs_bread_ino(struct inode *inode, int block, int create)
  359. {
  360. struct buffer_head *bh, tmp_bh;
  361. int err;
  362. tmp_bh.b_state = 0;
  363. err = affs_get_block(inode, block, &tmp_bh, create);
  364. if (!err) {
  365. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  366. if (bh) {
  367. bh->b_state |= tmp_bh.b_state;
  368. return bh;
  369. }
  370. err = -EIO;
  371. }
  372. return ERR_PTR(err);
  373. }
  374. static inline struct buffer_head *
  375. affs_getzeroblk_ino(struct inode *inode, int block)
  376. {
  377. struct buffer_head *bh, tmp_bh;
  378. int err;
  379. tmp_bh.b_state = 0;
  380. err = affs_get_block(inode, block, &tmp_bh, 1);
  381. if (!err) {
  382. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  383. if (bh) {
  384. bh->b_state |= tmp_bh.b_state;
  385. return bh;
  386. }
  387. err = -EIO;
  388. }
  389. return ERR_PTR(err);
  390. }
  391. static inline struct buffer_head *
  392. affs_getemptyblk_ino(struct inode *inode, int block)
  393. {
  394. struct buffer_head *bh, tmp_bh;
  395. int err;
  396. tmp_bh.b_state = 0;
  397. err = affs_get_block(inode, block, &tmp_bh, 1);
  398. if (!err) {
  399. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  400. if (bh) {
  401. bh->b_state |= tmp_bh.b_state;
  402. return bh;
  403. }
  404. err = -EIO;
  405. }
  406. return ERR_PTR(err);
  407. }
  408. static int
  409. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  410. {
  411. struct inode *inode = page->mapping->host;
  412. struct super_block *sb = inode->i_sb;
  413. struct buffer_head *bh;
  414. char *data;
  415. u32 bidx, boff, bsize;
  416. u32 tmp;
  417. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  418. if (from > to || to > PAGE_CACHE_SIZE)
  419. BUG();
  420. kmap(page);
  421. data = page_address(page);
  422. bsize = AFFS_SB(sb)->s_data_blksize;
  423. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  424. bidx = tmp / bsize;
  425. boff = tmp % bsize;
  426. while (from < to) {
  427. bh = affs_bread_ino(inode, bidx, 0);
  428. if (IS_ERR(bh))
  429. return PTR_ERR(bh);
  430. tmp = min(bsize - boff, to - from);
  431. if (from + tmp > to || tmp > bsize)
  432. BUG();
  433. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  434. affs_brelse(bh);
  435. bidx++;
  436. from += tmp;
  437. boff = 0;
  438. }
  439. flush_dcache_page(page);
  440. kunmap(page);
  441. return 0;
  442. }
  443. static int
  444. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  445. {
  446. struct super_block *sb = inode->i_sb;
  447. struct buffer_head *bh, *prev_bh;
  448. u32 bidx, boff;
  449. u32 size, bsize;
  450. u32 tmp;
  451. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  452. bsize = AFFS_SB(sb)->s_data_blksize;
  453. bh = NULL;
  454. size = AFFS_I(inode)->mmu_private;
  455. bidx = size / bsize;
  456. boff = size % bsize;
  457. if (boff) {
  458. bh = affs_bread_ino(inode, bidx, 0);
  459. if (IS_ERR(bh))
  460. return PTR_ERR(bh);
  461. tmp = min(bsize - boff, newsize - size);
  462. if (boff + tmp > bsize || tmp > bsize)
  463. BUG();
  464. memset(AFFS_DATA(bh) + boff, 0, tmp);
  465. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
  466. affs_fix_checksum(sb, bh);
  467. mark_buffer_dirty_inode(bh, inode);
  468. size += tmp;
  469. bidx++;
  470. } else if (bidx) {
  471. bh = affs_bread_ino(inode, bidx - 1, 0);
  472. if (IS_ERR(bh))
  473. return PTR_ERR(bh);
  474. }
  475. while (size < newsize) {
  476. prev_bh = bh;
  477. bh = affs_getzeroblk_ino(inode, bidx);
  478. if (IS_ERR(bh))
  479. goto out;
  480. tmp = min(bsize, newsize - size);
  481. if (tmp > bsize)
  482. BUG();
  483. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  484. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  485. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  486. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  487. affs_fix_checksum(sb, bh);
  488. bh->b_state &= ~(1UL << BH_New);
  489. mark_buffer_dirty_inode(bh, inode);
  490. if (prev_bh) {
  491. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  492. if (tmp)
  493. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  494. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  495. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  496. mark_buffer_dirty_inode(prev_bh, inode);
  497. affs_brelse(prev_bh);
  498. }
  499. size += bsize;
  500. bidx++;
  501. }
  502. affs_brelse(bh);
  503. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  504. return 0;
  505. out:
  506. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  507. return PTR_ERR(bh);
  508. }
  509. static int
  510. affs_readpage_ofs(struct file *file, struct page *page)
  511. {
  512. struct inode *inode = page->mapping->host;
  513. u32 to;
  514. int err;
  515. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  516. to = PAGE_CACHE_SIZE;
  517. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  518. to = inode->i_size & ~PAGE_CACHE_MASK;
  519. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  520. }
  521. err = affs_do_readpage_ofs(file, page, 0, to);
  522. if (!err)
  523. SetPageUptodate(page);
  524. unlock_page(page);
  525. return err;
  526. }
  527. static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  528. {
  529. struct inode *inode = page->mapping->host;
  530. u32 size, offset;
  531. u32 tmp;
  532. int err = 0;
  533. pr_debug("AFFS: prepare_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  534. offset = page->index << PAGE_CACHE_SHIFT;
  535. if (offset + from > AFFS_I(inode)->mmu_private) {
  536. err = affs_extent_file_ofs(inode, offset + from);
  537. if (err)
  538. return err;
  539. }
  540. size = inode->i_size;
  541. if (PageUptodate(page))
  542. return 0;
  543. if (from) {
  544. err = affs_do_readpage_ofs(file, page, 0, from);
  545. if (err)
  546. return err;
  547. }
  548. if (to < PAGE_CACHE_SIZE) {
  549. char *kaddr = kmap_atomic(page, KM_USER0);
  550. memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
  551. flush_dcache_page(page);
  552. kunmap_atomic(kaddr, KM_USER0);
  553. if (size > offset + to) {
  554. if (size < offset + PAGE_CACHE_SIZE)
  555. tmp = size & ~PAGE_CACHE_MASK;
  556. else
  557. tmp = PAGE_CACHE_SIZE;
  558. err = affs_do_readpage_ofs(file, page, to, tmp);
  559. }
  560. }
  561. return err;
  562. }
  563. static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  564. {
  565. struct inode *inode = page->mapping->host;
  566. struct super_block *sb = inode->i_sb;
  567. struct buffer_head *bh, *prev_bh;
  568. char *data;
  569. u32 bidx, boff, bsize;
  570. u32 tmp;
  571. int written;
  572. pr_debug("AFFS: commit_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  573. bsize = AFFS_SB(sb)->s_data_blksize;
  574. data = page_address(page);
  575. bh = NULL;
  576. written = 0;
  577. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  578. bidx = tmp / bsize;
  579. boff = tmp % bsize;
  580. if (boff) {
  581. bh = affs_bread_ino(inode, bidx, 0);
  582. if (IS_ERR(bh))
  583. return PTR_ERR(bh);
  584. tmp = min(bsize - boff, to - from);
  585. if (boff + tmp > bsize || tmp > bsize)
  586. BUG();
  587. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  588. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
  589. affs_fix_checksum(sb, bh);
  590. mark_buffer_dirty_inode(bh, inode);
  591. written += tmp;
  592. from += tmp;
  593. bidx++;
  594. } else if (bidx) {
  595. bh = affs_bread_ino(inode, bidx - 1, 0);
  596. if (IS_ERR(bh))
  597. return PTR_ERR(bh);
  598. }
  599. while (from + bsize <= to) {
  600. prev_bh = bh;
  601. bh = affs_getemptyblk_ino(inode, bidx);
  602. if (IS_ERR(bh))
  603. goto out;
  604. memcpy(AFFS_DATA(bh), data + from, bsize);
  605. if (buffer_new(bh)) {
  606. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  607. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  608. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  609. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  610. AFFS_DATA_HEAD(bh)->next = 0;
  611. bh->b_state &= ~(1UL << BH_New);
  612. if (prev_bh) {
  613. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  614. if (tmp)
  615. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  616. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  617. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  618. mark_buffer_dirty_inode(prev_bh, inode);
  619. }
  620. }
  621. affs_brelse(prev_bh);
  622. affs_fix_checksum(sb, bh);
  623. mark_buffer_dirty_inode(bh, inode);
  624. written += bsize;
  625. from += bsize;
  626. bidx++;
  627. }
  628. if (from < to) {
  629. prev_bh = bh;
  630. bh = affs_bread_ino(inode, bidx, 1);
  631. if (IS_ERR(bh))
  632. goto out;
  633. tmp = min(bsize, to - from);
  634. if (tmp > bsize)
  635. BUG();
  636. memcpy(AFFS_DATA(bh), data + from, tmp);
  637. if (buffer_new(bh)) {
  638. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  639. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  640. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  641. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  642. AFFS_DATA_HEAD(bh)->next = 0;
  643. bh->b_state &= ~(1UL << BH_New);
  644. if (prev_bh) {
  645. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  646. if (tmp)
  647. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  648. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  649. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  650. mark_buffer_dirty_inode(prev_bh, inode);
  651. }
  652. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  653. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  654. affs_brelse(prev_bh);
  655. affs_fix_checksum(sb, bh);
  656. mark_buffer_dirty_inode(bh, inode);
  657. written += tmp;
  658. from += tmp;
  659. bidx++;
  660. }
  661. SetPageUptodate(page);
  662. done:
  663. affs_brelse(bh);
  664. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  665. if (tmp > inode->i_size)
  666. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  667. return written;
  668. out:
  669. bh = prev_bh;
  670. if (!written)
  671. written = PTR_ERR(bh);
  672. goto done;
  673. }
  674. const struct address_space_operations affs_aops_ofs = {
  675. .readpage = affs_readpage_ofs,
  676. //.writepage = affs_writepage_ofs,
  677. //.sync_page = affs_sync_page_ofs,
  678. .prepare_write = affs_prepare_write_ofs,
  679. .commit_write = affs_commit_write_ofs
  680. };
  681. /* Free any preallocated blocks. */
  682. void
  683. affs_free_prealloc(struct inode *inode)
  684. {
  685. struct super_block *sb = inode->i_sb;
  686. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  687. while (AFFS_I(inode)->i_pa_cnt) {
  688. AFFS_I(inode)->i_pa_cnt--;
  689. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  690. }
  691. }
  692. /* Truncate (or enlarge) a file to the requested size. */
  693. void
  694. affs_truncate(struct inode *inode)
  695. {
  696. struct super_block *sb = inode->i_sb;
  697. u32 ext, ext_key;
  698. u32 last_blk, blkcnt, blk;
  699. u32 size;
  700. struct buffer_head *ext_bh;
  701. int i;
  702. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  703. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  704. last_blk = 0;
  705. ext = 0;
  706. if (inode->i_size) {
  707. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  708. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  709. }
  710. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  711. struct address_space *mapping = inode->i_mapping;
  712. struct page *page;
  713. u32 size = inode->i_size - 1;
  714. int res;
  715. page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
  716. if (!page)
  717. return;
  718. size = (size & (PAGE_CACHE_SIZE - 1)) + 1;
  719. res = mapping->a_ops->prepare_write(NULL, page, size, size);
  720. if (!res)
  721. res = mapping->a_ops->commit_write(NULL, page, size, size);
  722. unlock_page(page);
  723. page_cache_release(page);
  724. mark_inode_dirty(inode);
  725. return;
  726. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  727. return;
  728. // lock cache
  729. ext_bh = affs_get_extblock(inode, ext);
  730. if (IS_ERR(ext_bh)) {
  731. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  732. ext, PTR_ERR(ext_bh));
  733. return;
  734. }
  735. if (AFFS_I(inode)->i_lc) {
  736. /* clear linear cache */
  737. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  738. if (AFFS_I(inode)->i_lc_size > i) {
  739. AFFS_I(inode)->i_lc_size = i;
  740. for (; i < AFFS_LC_SIZE; i++)
  741. AFFS_I(inode)->i_lc[i] = 0;
  742. }
  743. /* clear associative cache */
  744. for (i = 0; i < AFFS_AC_SIZE; i++)
  745. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  746. AFFS_I(inode)->i_ac[i].ext = 0;
  747. }
  748. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  749. blkcnt = AFFS_I(inode)->i_blkcnt;
  750. i = 0;
  751. blk = last_blk;
  752. if (inode->i_size) {
  753. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  754. blk++;
  755. } else
  756. AFFS_HEAD(ext_bh)->first_data = 0;
  757. size = AFFS_SB(sb)->s_hashsize;
  758. if (size > blkcnt - blk + i)
  759. size = blkcnt - blk + i;
  760. for (; i < size; i++, blk++) {
  761. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  762. AFFS_BLOCK(sb, ext_bh, i) = 0;
  763. }
  764. AFFS_TAIL(sb, ext_bh)->extension = 0;
  765. affs_fix_checksum(sb, ext_bh);
  766. mark_buffer_dirty_inode(ext_bh, inode);
  767. affs_brelse(ext_bh);
  768. if (inode->i_size) {
  769. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  770. AFFS_I(inode)->i_extcnt = ext + 1;
  771. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  772. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  773. u32 tmp;
  774. if (IS_ERR(ext_bh)) {
  775. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  776. ext, PTR_ERR(ext_bh));
  777. return;
  778. }
  779. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  780. AFFS_DATA_HEAD(bh)->next = 0;
  781. affs_adjust_checksum(bh, -tmp);
  782. affs_brelse(bh);
  783. }
  784. } else {
  785. AFFS_I(inode)->i_blkcnt = 0;
  786. AFFS_I(inode)->i_extcnt = 1;
  787. }
  788. AFFS_I(inode)->mmu_private = inode->i_size;
  789. // unlock cache
  790. while (ext_key) {
  791. ext_bh = affs_bread(sb, ext_key);
  792. size = AFFS_SB(sb)->s_hashsize;
  793. if (size > blkcnt - blk)
  794. size = blkcnt - blk;
  795. for (i = 0; i < size; i++, blk++)
  796. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  797. affs_free_block(sb, ext_key);
  798. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  799. affs_brelse(ext_bh);
  800. }
  801. affs_free_prealloc(inode);
  802. }