file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static ssize_t affs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos);
  23. static int affs_file_open(struct inode *inode, struct file *filp);
  24. static int affs_file_release(struct inode *inode, struct file *filp);
  25. struct file_operations affs_file_operations = {
  26. .llseek = generic_file_llseek,
  27. .read = generic_file_read,
  28. .write = affs_file_write,
  29. .mmap = generic_file_mmap,
  30. .open = affs_file_open,
  31. .release = affs_file_release,
  32. .fsync = file_fsync,
  33. .sendfile = generic_file_sendfile,
  34. };
  35. struct inode_operations affs_file_inode_operations = {
  36. .truncate = affs_truncate,
  37. .setattr = affs_notify_change,
  38. };
  39. static int
  40. affs_file_open(struct inode *inode, struct file *filp)
  41. {
  42. if (atomic_read(&filp->f_count) != 1)
  43. return 0;
  44. pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt);
  45. AFFS_I(inode)->i_opencnt++;
  46. return 0;
  47. }
  48. static int
  49. affs_file_release(struct inode *inode, struct file *filp)
  50. {
  51. if (atomic_read(&filp->f_count) != 0)
  52. return 0;
  53. pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt);
  54. AFFS_I(inode)->i_opencnt--;
  55. if (!AFFS_I(inode)->i_opencnt)
  56. affs_free_prealloc(inode);
  57. return 0;
  58. }
  59. static int
  60. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  61. {
  62. struct super_block *sb = inode->i_sb;
  63. struct buffer_head *bh;
  64. u32 lc_max;
  65. int i, j, key;
  66. if (!AFFS_I(inode)->i_lc) {
  67. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  68. if (!ptr)
  69. return -ENOMEM;
  70. AFFS_I(inode)->i_lc = (u32 *)ptr;
  71. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  72. }
  73. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  74. if (AFFS_I(inode)->i_extcnt > lc_max) {
  75. u32 lc_shift, lc_mask, tmp, off;
  76. /* need to recalculate linear cache, start from old size */
  77. lc_shift = AFFS_I(inode)->i_lc_shift;
  78. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  79. for (; tmp; tmp >>= 1)
  80. lc_shift++;
  81. lc_mask = (1 << lc_shift) - 1;
  82. /* fix idx and old size to new shift */
  83. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  84. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  85. /* first shrink old cache to make more space */
  86. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  87. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  88. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  89. AFFS_I(inode)->i_lc_shift = lc_shift;
  90. AFFS_I(inode)->i_lc_mask = lc_mask;
  91. }
  92. /* fill cache to the needed index */
  93. i = AFFS_I(inode)->i_lc_size;
  94. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  95. for (; i <= lc_idx; i++) {
  96. if (!i) {
  97. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  98. continue;
  99. }
  100. key = AFFS_I(inode)->i_lc[i - 1];
  101. j = AFFS_I(inode)->i_lc_mask + 1;
  102. // unlock cache
  103. for (; j > 0; j--) {
  104. bh = affs_bread(sb, key);
  105. if (!bh)
  106. goto err;
  107. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  108. affs_brelse(bh);
  109. }
  110. // lock cache
  111. AFFS_I(inode)->i_lc[i] = key;
  112. }
  113. return 0;
  114. err:
  115. // lock cache
  116. return -EIO;
  117. }
  118. static struct buffer_head *
  119. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  120. {
  121. struct super_block *sb = inode->i_sb;
  122. struct buffer_head *new_bh;
  123. u32 blocknr, tmp;
  124. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  125. if (!blocknr)
  126. return ERR_PTR(-ENOSPC);
  127. new_bh = affs_getzeroblk(sb, blocknr);
  128. if (!new_bh) {
  129. affs_free_block(sb, blocknr);
  130. return ERR_PTR(-EIO);
  131. }
  132. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  133. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  134. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  135. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  136. affs_fix_checksum(sb, new_bh);
  137. mark_buffer_dirty_inode(new_bh, inode);
  138. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  139. if (tmp)
  140. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  141. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  142. affs_adjust_checksum(bh, blocknr - tmp);
  143. mark_buffer_dirty_inode(bh, inode);
  144. AFFS_I(inode)->i_extcnt++;
  145. mark_inode_dirty(inode);
  146. return new_bh;
  147. }
  148. static inline struct buffer_head *
  149. affs_get_extblock(struct inode *inode, u32 ext)
  150. {
  151. /* inline the simplest case: same extended block as last time */
  152. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  153. if (ext == AFFS_I(inode)->i_ext_last)
  154. atomic_inc(&bh->b_count);
  155. else
  156. /* we have to do more (not inlined) */
  157. bh = affs_get_extblock_slow(inode, ext);
  158. return bh;
  159. }
  160. static struct buffer_head *
  161. affs_get_extblock_slow(struct inode *inode, u32 ext)
  162. {
  163. struct super_block *sb = inode->i_sb;
  164. struct buffer_head *bh;
  165. u32 ext_key;
  166. u32 lc_idx, lc_off, ac_idx;
  167. u32 tmp, idx;
  168. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  169. /* read the next extended block from the current one */
  170. bh = AFFS_I(inode)->i_ext_bh;
  171. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  172. if (ext < AFFS_I(inode)->i_extcnt)
  173. goto read_ext;
  174. if (ext > AFFS_I(inode)->i_extcnt)
  175. BUG();
  176. bh = affs_alloc_extblock(inode, bh, ext);
  177. if (IS_ERR(bh))
  178. return bh;
  179. goto store_ext;
  180. }
  181. if (ext == 0) {
  182. /* we seek back to the file header block */
  183. ext_key = inode->i_ino;
  184. goto read_ext;
  185. }
  186. if (ext >= AFFS_I(inode)->i_extcnt) {
  187. struct buffer_head *prev_bh;
  188. /* allocate a new extended block */
  189. if (ext > AFFS_I(inode)->i_extcnt)
  190. BUG();
  191. /* get previous extended block */
  192. prev_bh = affs_get_extblock(inode, ext - 1);
  193. if (IS_ERR(prev_bh))
  194. return prev_bh;
  195. bh = affs_alloc_extblock(inode, prev_bh, ext);
  196. affs_brelse(prev_bh);
  197. if (IS_ERR(bh))
  198. return bh;
  199. goto store_ext;
  200. }
  201. again:
  202. /* check if there is an extended cache and whether it's large enough */
  203. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  204. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  205. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  206. int err;
  207. err = affs_grow_extcache(inode, lc_idx);
  208. if (err)
  209. return ERR_PTR(err);
  210. goto again;
  211. }
  212. /* every n'th key we find in the linear cache */
  213. if (!lc_off) {
  214. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  215. goto read_ext;
  216. }
  217. /* maybe it's still in the associative cache */
  218. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  219. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  220. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  221. goto read_ext;
  222. }
  223. /* try to find one of the previous extended blocks */
  224. tmp = ext;
  225. idx = ac_idx;
  226. while (--tmp, --lc_off > 0) {
  227. idx = (idx - 1) & AFFS_AC_MASK;
  228. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  229. ext_key = AFFS_I(inode)->i_ac[idx].key;
  230. goto find_ext;
  231. }
  232. }
  233. /* fall back to the linear cache */
  234. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  235. find_ext:
  236. /* read all extended blocks until we find the one we need */
  237. //unlock cache
  238. do {
  239. bh = affs_bread(sb, ext_key);
  240. if (!bh)
  241. goto err_bread;
  242. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  243. affs_brelse(bh);
  244. tmp++;
  245. } while (tmp < ext);
  246. //lock cache
  247. /* store it in the associative cache */
  248. // recalculate ac_idx?
  249. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  250. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  251. read_ext:
  252. /* finally read the right extended block */
  253. //unlock cache
  254. bh = affs_bread(sb, ext_key);
  255. if (!bh)
  256. goto err_bread;
  257. //lock cache
  258. store_ext:
  259. /* release old cached extended block and store the new one */
  260. affs_brelse(AFFS_I(inode)->i_ext_bh);
  261. AFFS_I(inode)->i_ext_last = ext;
  262. AFFS_I(inode)->i_ext_bh = bh;
  263. atomic_inc(&bh->b_count);
  264. return bh;
  265. err_bread:
  266. affs_brelse(bh);
  267. return ERR_PTR(-EIO);
  268. }
  269. static int
  270. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  271. {
  272. struct super_block *sb = inode->i_sb;
  273. struct buffer_head *ext_bh;
  274. u32 ext;
  275. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  276. if (block > (sector_t)0x7fffffffUL)
  277. BUG();
  278. if (block >= AFFS_I(inode)->i_blkcnt) {
  279. if (block > AFFS_I(inode)->i_blkcnt || !create)
  280. goto err_big;
  281. } else
  282. create = 0;
  283. //lock cache
  284. affs_lock_ext(inode);
  285. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  286. block -= ext * AFFS_SB(sb)->s_hashsize;
  287. ext_bh = affs_get_extblock(inode, ext);
  288. if (IS_ERR(ext_bh))
  289. goto err_ext;
  290. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  291. if (create) {
  292. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  293. if (!blocknr)
  294. goto err_alloc;
  295. set_buffer_new(bh_result);
  296. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  297. AFFS_I(inode)->i_blkcnt++;
  298. /* store new block */
  299. if (bh_result->b_blocknr)
  300. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  301. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  302. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  303. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  304. bh_result->b_blocknr = blocknr;
  305. if (!block) {
  306. /* insert first block into header block */
  307. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  308. if (tmp)
  309. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  310. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  311. affs_adjust_checksum(ext_bh, blocknr - tmp);
  312. }
  313. }
  314. affs_brelse(ext_bh);
  315. //unlock cache
  316. affs_unlock_ext(inode);
  317. return 0;
  318. err_big:
  319. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  320. return -EIO;
  321. err_ext:
  322. // unlock cache
  323. affs_unlock_ext(inode);
  324. return PTR_ERR(ext_bh);
  325. err_alloc:
  326. brelse(ext_bh);
  327. clear_buffer_mapped(bh_result);
  328. bh_result->b_bdev = NULL;
  329. // unlock cache
  330. affs_unlock_ext(inode);
  331. return -ENOSPC;
  332. }
  333. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  334. {
  335. return block_write_full_page(page, affs_get_block, wbc);
  336. }
  337. static int affs_readpage(struct file *file, struct page *page)
  338. {
  339. return block_read_full_page(page, affs_get_block);
  340. }
  341. static int affs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
  342. {
  343. return cont_prepare_write(page, from, to, affs_get_block,
  344. &AFFS_I(page->mapping->host)->mmu_private);
  345. }
  346. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  347. {
  348. return generic_block_bmap(mapping,block,affs_get_block);
  349. }
  350. struct address_space_operations affs_aops = {
  351. .readpage = affs_readpage,
  352. .writepage = affs_writepage,
  353. .sync_page = block_sync_page,
  354. .prepare_write = affs_prepare_write,
  355. .commit_write = generic_commit_write,
  356. .bmap = _affs_bmap
  357. };
  358. static inline struct buffer_head *
  359. affs_bread_ino(struct inode *inode, int block, int create)
  360. {
  361. struct buffer_head *bh, tmp_bh;
  362. int err;
  363. tmp_bh.b_state = 0;
  364. err = affs_get_block(inode, block, &tmp_bh, create);
  365. if (!err) {
  366. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  367. if (bh) {
  368. bh->b_state |= tmp_bh.b_state;
  369. return bh;
  370. }
  371. err = -EIO;
  372. }
  373. return ERR_PTR(err);
  374. }
  375. static inline struct buffer_head *
  376. affs_getzeroblk_ino(struct inode *inode, int block)
  377. {
  378. struct buffer_head *bh, tmp_bh;
  379. int err;
  380. tmp_bh.b_state = 0;
  381. err = affs_get_block(inode, block, &tmp_bh, 1);
  382. if (!err) {
  383. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  384. if (bh) {
  385. bh->b_state |= tmp_bh.b_state;
  386. return bh;
  387. }
  388. err = -EIO;
  389. }
  390. return ERR_PTR(err);
  391. }
  392. static inline struct buffer_head *
  393. affs_getemptyblk_ino(struct inode *inode, int block)
  394. {
  395. struct buffer_head *bh, tmp_bh;
  396. int err;
  397. tmp_bh.b_state = 0;
  398. err = affs_get_block(inode, block, &tmp_bh, 1);
  399. if (!err) {
  400. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  401. if (bh) {
  402. bh->b_state |= tmp_bh.b_state;
  403. return bh;
  404. }
  405. err = -EIO;
  406. }
  407. return ERR_PTR(err);
  408. }
  409. static ssize_t
  410. affs_file_write(struct file *file, const char __user *buf,
  411. size_t count, loff_t *ppos)
  412. {
  413. ssize_t retval;
  414. retval = generic_file_write (file, buf, count, ppos);
  415. if (retval >0) {
  416. struct inode *inode = file->f_dentry->d_inode;
  417. inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
  418. mark_inode_dirty(inode);
  419. }
  420. return retval;
  421. }
  422. static int
  423. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  424. {
  425. struct inode *inode = page->mapping->host;
  426. struct super_block *sb = inode->i_sb;
  427. struct buffer_head *bh;
  428. char *data;
  429. u32 bidx, boff, bsize;
  430. u32 tmp;
  431. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  432. if (from > to || to > PAGE_CACHE_SIZE)
  433. BUG();
  434. kmap(page);
  435. data = page_address(page);
  436. bsize = AFFS_SB(sb)->s_data_blksize;
  437. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  438. bidx = tmp / bsize;
  439. boff = tmp % bsize;
  440. while (from < to) {
  441. bh = affs_bread_ino(inode, bidx, 0);
  442. if (IS_ERR(bh))
  443. return PTR_ERR(bh);
  444. tmp = min(bsize - boff, to - from);
  445. if (from + tmp > to || tmp > bsize)
  446. BUG();
  447. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  448. affs_brelse(bh);
  449. bidx++;
  450. from += tmp;
  451. boff = 0;
  452. }
  453. flush_dcache_page(page);
  454. kunmap(page);
  455. return 0;
  456. }
  457. static int
  458. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  459. {
  460. struct super_block *sb = inode->i_sb;
  461. struct buffer_head *bh, *prev_bh;
  462. u32 bidx, boff;
  463. u32 size, bsize;
  464. u32 tmp;
  465. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  466. bsize = AFFS_SB(sb)->s_data_blksize;
  467. bh = NULL;
  468. size = AFFS_I(inode)->mmu_private;
  469. bidx = size / bsize;
  470. boff = size % bsize;
  471. if (boff) {
  472. bh = affs_bread_ino(inode, bidx, 0);
  473. if (IS_ERR(bh))
  474. return PTR_ERR(bh);
  475. tmp = min(bsize - boff, newsize - size);
  476. if (boff + tmp > bsize || tmp > bsize)
  477. BUG();
  478. memset(AFFS_DATA(bh) + boff, 0, tmp);
  479. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
  480. affs_fix_checksum(sb, bh);
  481. mark_buffer_dirty_inode(bh, inode);
  482. size += tmp;
  483. bidx++;
  484. } else if (bidx) {
  485. bh = affs_bread_ino(inode, bidx - 1, 0);
  486. if (IS_ERR(bh))
  487. return PTR_ERR(bh);
  488. }
  489. while (size < newsize) {
  490. prev_bh = bh;
  491. bh = affs_getzeroblk_ino(inode, bidx);
  492. if (IS_ERR(bh))
  493. goto out;
  494. tmp = min(bsize, newsize - size);
  495. if (tmp > bsize)
  496. BUG();
  497. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  498. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  499. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  500. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  501. affs_fix_checksum(sb, bh);
  502. bh->b_state &= ~(1UL << BH_New);
  503. mark_buffer_dirty_inode(bh, inode);
  504. if (prev_bh) {
  505. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  506. if (tmp)
  507. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  508. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  509. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  510. mark_buffer_dirty_inode(prev_bh, inode);
  511. affs_brelse(prev_bh);
  512. }
  513. size += bsize;
  514. bidx++;
  515. }
  516. affs_brelse(bh);
  517. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  518. return 0;
  519. out:
  520. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  521. return PTR_ERR(bh);
  522. }
  523. static int
  524. affs_readpage_ofs(struct file *file, struct page *page)
  525. {
  526. struct inode *inode = page->mapping->host;
  527. u32 to;
  528. int err;
  529. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  530. to = PAGE_CACHE_SIZE;
  531. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  532. to = inode->i_size & ~PAGE_CACHE_MASK;
  533. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  534. }
  535. err = affs_do_readpage_ofs(file, page, 0, to);
  536. if (!err)
  537. SetPageUptodate(page);
  538. unlock_page(page);
  539. return err;
  540. }
  541. static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  542. {
  543. struct inode *inode = page->mapping->host;
  544. u32 size, offset;
  545. u32 tmp;
  546. int err = 0;
  547. pr_debug("AFFS: prepare_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  548. offset = page->index << PAGE_CACHE_SHIFT;
  549. if (offset + from > AFFS_I(inode)->mmu_private) {
  550. err = affs_extent_file_ofs(inode, offset + from);
  551. if (err)
  552. return err;
  553. }
  554. size = inode->i_size;
  555. if (PageUptodate(page))
  556. return 0;
  557. if (from) {
  558. err = affs_do_readpage_ofs(file, page, 0, from);
  559. if (err)
  560. return err;
  561. }
  562. if (to < PAGE_CACHE_SIZE) {
  563. char *kaddr = kmap_atomic(page, KM_USER0);
  564. memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
  565. flush_dcache_page(page);
  566. kunmap_atomic(kaddr, KM_USER0);
  567. if (size > offset + to) {
  568. if (size < offset + PAGE_CACHE_SIZE)
  569. tmp = size & ~PAGE_CACHE_MASK;
  570. else
  571. tmp = PAGE_CACHE_SIZE;
  572. err = affs_do_readpage_ofs(file, page, to, tmp);
  573. }
  574. }
  575. return err;
  576. }
  577. static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  578. {
  579. struct inode *inode = page->mapping->host;
  580. struct super_block *sb = inode->i_sb;
  581. struct buffer_head *bh, *prev_bh;
  582. char *data;
  583. u32 bidx, boff, bsize;
  584. u32 tmp;
  585. int written;
  586. pr_debug("AFFS: commit_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  587. bsize = AFFS_SB(sb)->s_data_blksize;
  588. data = page_address(page);
  589. bh = NULL;
  590. written = 0;
  591. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  592. bidx = tmp / bsize;
  593. boff = tmp % bsize;
  594. if (boff) {
  595. bh = affs_bread_ino(inode, bidx, 0);
  596. if (IS_ERR(bh))
  597. return PTR_ERR(bh);
  598. tmp = min(bsize - boff, to - from);
  599. if (boff + tmp > bsize || tmp > bsize)
  600. BUG();
  601. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  602. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
  603. affs_fix_checksum(sb, bh);
  604. mark_buffer_dirty_inode(bh, inode);
  605. written += tmp;
  606. from += tmp;
  607. bidx++;
  608. } else if (bidx) {
  609. bh = affs_bread_ino(inode, bidx - 1, 0);
  610. if (IS_ERR(bh))
  611. return PTR_ERR(bh);
  612. }
  613. while (from + bsize <= to) {
  614. prev_bh = bh;
  615. bh = affs_getemptyblk_ino(inode, bidx);
  616. if (IS_ERR(bh))
  617. goto out;
  618. memcpy(AFFS_DATA(bh), data + from, bsize);
  619. if (buffer_new(bh)) {
  620. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  621. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  622. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  623. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  624. AFFS_DATA_HEAD(bh)->next = 0;
  625. bh->b_state &= ~(1UL << BH_New);
  626. if (prev_bh) {
  627. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  628. if (tmp)
  629. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  630. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  631. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  632. mark_buffer_dirty_inode(prev_bh, inode);
  633. }
  634. }
  635. affs_brelse(prev_bh);
  636. affs_fix_checksum(sb, bh);
  637. mark_buffer_dirty_inode(bh, inode);
  638. written += bsize;
  639. from += bsize;
  640. bidx++;
  641. }
  642. if (from < to) {
  643. prev_bh = bh;
  644. bh = affs_bread_ino(inode, bidx, 1);
  645. if (IS_ERR(bh))
  646. goto out;
  647. tmp = min(bsize, to - from);
  648. if (tmp > bsize)
  649. BUG();
  650. memcpy(AFFS_DATA(bh), data + from, tmp);
  651. if (buffer_new(bh)) {
  652. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  653. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  654. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  655. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  656. AFFS_DATA_HEAD(bh)->next = 0;
  657. bh->b_state &= ~(1UL << BH_New);
  658. if (prev_bh) {
  659. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  660. if (tmp)
  661. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  662. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  663. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  664. mark_buffer_dirty_inode(prev_bh, inode);
  665. }
  666. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  667. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  668. affs_brelse(prev_bh);
  669. affs_fix_checksum(sb, bh);
  670. mark_buffer_dirty_inode(bh, inode);
  671. written += tmp;
  672. from += tmp;
  673. bidx++;
  674. }
  675. SetPageUptodate(page);
  676. done:
  677. affs_brelse(bh);
  678. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  679. if (tmp > inode->i_size)
  680. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  681. return written;
  682. out:
  683. bh = prev_bh;
  684. if (!written)
  685. written = PTR_ERR(bh);
  686. goto done;
  687. }
  688. struct address_space_operations affs_aops_ofs = {
  689. .readpage = affs_readpage_ofs,
  690. //.writepage = affs_writepage_ofs,
  691. //.sync_page = affs_sync_page_ofs,
  692. .prepare_write = affs_prepare_write_ofs,
  693. .commit_write = affs_commit_write_ofs
  694. };
  695. /* Free any preallocated blocks. */
  696. void
  697. affs_free_prealloc(struct inode *inode)
  698. {
  699. struct super_block *sb = inode->i_sb;
  700. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  701. while (AFFS_I(inode)->i_pa_cnt) {
  702. AFFS_I(inode)->i_pa_cnt--;
  703. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  704. }
  705. }
  706. /* Truncate (or enlarge) a file to the requested size. */
  707. void
  708. affs_truncate(struct inode *inode)
  709. {
  710. struct super_block *sb = inode->i_sb;
  711. u32 ext, ext_key;
  712. u32 last_blk, blkcnt, blk;
  713. u32 size;
  714. struct buffer_head *ext_bh;
  715. int i;
  716. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  717. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  718. last_blk = 0;
  719. ext = 0;
  720. if (inode->i_size) {
  721. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  722. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  723. }
  724. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  725. struct address_space *mapping = inode->i_mapping;
  726. struct page *page;
  727. u32 size = inode->i_size - 1;
  728. int res;
  729. page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
  730. if (!page)
  731. return;
  732. size = (size & (PAGE_CACHE_SIZE - 1)) + 1;
  733. res = mapping->a_ops->prepare_write(NULL, page, size, size);
  734. if (!res)
  735. res = mapping->a_ops->commit_write(NULL, page, size, size);
  736. unlock_page(page);
  737. page_cache_release(page);
  738. mark_inode_dirty(inode);
  739. return;
  740. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  741. return;
  742. // lock cache
  743. ext_bh = affs_get_extblock(inode, ext);
  744. if (IS_ERR(ext_bh)) {
  745. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  746. ext, PTR_ERR(ext_bh));
  747. return;
  748. }
  749. if (AFFS_I(inode)->i_lc) {
  750. /* clear linear cache */
  751. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  752. if (AFFS_I(inode)->i_lc_size > i) {
  753. AFFS_I(inode)->i_lc_size = i;
  754. for (; i < AFFS_LC_SIZE; i++)
  755. AFFS_I(inode)->i_lc[i] = 0;
  756. }
  757. /* clear associative cache */
  758. for (i = 0; i < AFFS_AC_SIZE; i++)
  759. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  760. AFFS_I(inode)->i_ac[i].ext = 0;
  761. }
  762. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  763. blkcnt = AFFS_I(inode)->i_blkcnt;
  764. i = 0;
  765. blk = last_blk;
  766. if (inode->i_size) {
  767. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  768. blk++;
  769. } else
  770. AFFS_HEAD(ext_bh)->first_data = 0;
  771. size = AFFS_SB(sb)->s_hashsize;
  772. if (size > blkcnt - blk + i)
  773. size = blkcnt - blk + i;
  774. for (; i < size; i++, blk++) {
  775. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  776. AFFS_BLOCK(sb, ext_bh, i) = 0;
  777. }
  778. AFFS_TAIL(sb, ext_bh)->extension = 0;
  779. affs_fix_checksum(sb, ext_bh);
  780. mark_buffer_dirty_inode(ext_bh, inode);
  781. affs_brelse(ext_bh);
  782. if (inode->i_size) {
  783. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  784. AFFS_I(inode)->i_extcnt = ext + 1;
  785. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  786. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  787. u32 tmp;
  788. if (IS_ERR(ext_bh)) {
  789. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  790. ext, PTR_ERR(ext_bh));
  791. return;
  792. }
  793. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  794. AFFS_DATA_HEAD(bh)->next = 0;
  795. affs_adjust_checksum(bh, -tmp);
  796. affs_brelse(bh);
  797. }
  798. } else {
  799. AFFS_I(inode)->i_blkcnt = 0;
  800. AFFS_I(inode)->i_extcnt = 1;
  801. }
  802. AFFS_I(inode)->mmu_private = inode->i_size;
  803. // unlock cache
  804. while (ext_key) {
  805. ext_bh = affs_bread(sb, ext_key);
  806. size = AFFS_SB(sb)->s_hashsize;
  807. if (size > blkcnt - blk)
  808. size = blkcnt - blk;
  809. for (i = 0; i < size; i++, blk++)
  810. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  811. affs_free_block(sb, ext_key);
  812. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  813. affs_brelse(ext_bh);
  814. }
  815. affs_free_prealloc(inode);
  816. }