file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = do_sync_read,
  27. .aio_read = generic_file_aio_read,
  28. .write = do_sync_write,
  29. .aio_write = generic_file_aio_write,
  30. .mmap = generic_file_mmap,
  31. .open = affs_file_open,
  32. .release = affs_file_release,
  33. .fsync = file_fsync,
  34. .splice_read = generic_file_splice_read,
  35. };
  36. const struct inode_operations affs_file_inode_operations = {
  37. .truncate = affs_truncate,
  38. .setattr = affs_notify_change,
  39. };
  40. static int
  41. affs_file_open(struct inode *inode, struct file *filp)
  42. {
  43. if (atomic_read(&filp->f_count) != 1)
  44. return 0;
  45. pr_debug("AFFS: open(%lu,%d)\n",
  46. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  47. atomic_inc(&AFFS_I(inode)->i_opencnt);
  48. return 0;
  49. }
  50. static int
  51. affs_file_release(struct inode *inode, struct file *filp)
  52. {
  53. if (atomic_read(&filp->f_count) != 0)
  54. return 0;
  55. pr_debug("AFFS: release(%lu, %d)\n",
  56. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  57. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  58. mutex_lock(&inode->i_mutex);
  59. if (inode->i_size != AFFS_I(inode)->mmu_private)
  60. affs_truncate(inode);
  61. affs_free_prealloc(inode);
  62. mutex_unlock(&inode->i_mutex);
  63. }
  64. return 0;
  65. }
  66. static int
  67. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  68. {
  69. struct super_block *sb = inode->i_sb;
  70. struct buffer_head *bh;
  71. u32 lc_max;
  72. int i, j, key;
  73. if (!AFFS_I(inode)->i_lc) {
  74. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  75. if (!ptr)
  76. return -ENOMEM;
  77. AFFS_I(inode)->i_lc = (u32 *)ptr;
  78. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  79. }
  80. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  81. if (AFFS_I(inode)->i_extcnt > lc_max) {
  82. u32 lc_shift, lc_mask, tmp, off;
  83. /* need to recalculate linear cache, start from old size */
  84. lc_shift = AFFS_I(inode)->i_lc_shift;
  85. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  86. for (; tmp; tmp >>= 1)
  87. lc_shift++;
  88. lc_mask = (1 << lc_shift) - 1;
  89. /* fix idx and old size to new shift */
  90. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  91. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  92. /* first shrink old cache to make more space */
  93. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  94. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  95. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  96. AFFS_I(inode)->i_lc_shift = lc_shift;
  97. AFFS_I(inode)->i_lc_mask = lc_mask;
  98. }
  99. /* fill cache to the needed index */
  100. i = AFFS_I(inode)->i_lc_size;
  101. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  102. for (; i <= lc_idx; i++) {
  103. if (!i) {
  104. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  105. continue;
  106. }
  107. key = AFFS_I(inode)->i_lc[i - 1];
  108. j = AFFS_I(inode)->i_lc_mask + 1;
  109. // unlock cache
  110. for (; j > 0; j--) {
  111. bh = affs_bread(sb, key);
  112. if (!bh)
  113. goto err;
  114. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  115. affs_brelse(bh);
  116. }
  117. // lock cache
  118. AFFS_I(inode)->i_lc[i] = key;
  119. }
  120. return 0;
  121. err:
  122. // lock cache
  123. return -EIO;
  124. }
  125. static struct buffer_head *
  126. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  127. {
  128. struct super_block *sb = inode->i_sb;
  129. struct buffer_head *new_bh;
  130. u32 blocknr, tmp;
  131. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  132. if (!blocknr)
  133. return ERR_PTR(-ENOSPC);
  134. new_bh = affs_getzeroblk(sb, blocknr);
  135. if (!new_bh) {
  136. affs_free_block(sb, blocknr);
  137. return ERR_PTR(-EIO);
  138. }
  139. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  140. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  141. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  142. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  143. affs_fix_checksum(sb, new_bh);
  144. mark_buffer_dirty_inode(new_bh, inode);
  145. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  146. if (tmp)
  147. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  148. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  149. affs_adjust_checksum(bh, blocknr - tmp);
  150. mark_buffer_dirty_inode(bh, inode);
  151. AFFS_I(inode)->i_extcnt++;
  152. mark_inode_dirty(inode);
  153. return new_bh;
  154. }
  155. static inline struct buffer_head *
  156. affs_get_extblock(struct inode *inode, u32 ext)
  157. {
  158. /* inline the simplest case: same extended block as last time */
  159. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  160. if (ext == AFFS_I(inode)->i_ext_last)
  161. get_bh(bh);
  162. else
  163. /* we have to do more (not inlined) */
  164. bh = affs_get_extblock_slow(inode, ext);
  165. return bh;
  166. }
  167. static struct buffer_head *
  168. affs_get_extblock_slow(struct inode *inode, u32 ext)
  169. {
  170. struct super_block *sb = inode->i_sb;
  171. struct buffer_head *bh;
  172. u32 ext_key;
  173. u32 lc_idx, lc_off, ac_idx;
  174. u32 tmp, idx;
  175. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  176. /* read the next extended block from the current one */
  177. bh = AFFS_I(inode)->i_ext_bh;
  178. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  179. if (ext < AFFS_I(inode)->i_extcnt)
  180. goto read_ext;
  181. if (ext > AFFS_I(inode)->i_extcnt)
  182. BUG();
  183. bh = affs_alloc_extblock(inode, bh, ext);
  184. if (IS_ERR(bh))
  185. return bh;
  186. goto store_ext;
  187. }
  188. if (ext == 0) {
  189. /* we seek back to the file header block */
  190. ext_key = inode->i_ino;
  191. goto read_ext;
  192. }
  193. if (ext >= AFFS_I(inode)->i_extcnt) {
  194. struct buffer_head *prev_bh;
  195. /* allocate a new extended block */
  196. if (ext > AFFS_I(inode)->i_extcnt)
  197. BUG();
  198. /* get previous extended block */
  199. prev_bh = affs_get_extblock(inode, ext - 1);
  200. if (IS_ERR(prev_bh))
  201. return prev_bh;
  202. bh = affs_alloc_extblock(inode, prev_bh, ext);
  203. affs_brelse(prev_bh);
  204. if (IS_ERR(bh))
  205. return bh;
  206. goto store_ext;
  207. }
  208. again:
  209. /* check if there is an extended cache and whether it's large enough */
  210. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  211. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  212. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  213. int err;
  214. err = affs_grow_extcache(inode, lc_idx);
  215. if (err)
  216. return ERR_PTR(err);
  217. goto again;
  218. }
  219. /* every n'th key we find in the linear cache */
  220. if (!lc_off) {
  221. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  222. goto read_ext;
  223. }
  224. /* maybe it's still in the associative cache */
  225. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  226. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  227. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  228. goto read_ext;
  229. }
  230. /* try to find one of the previous extended blocks */
  231. tmp = ext;
  232. idx = ac_idx;
  233. while (--tmp, --lc_off > 0) {
  234. idx = (idx - 1) & AFFS_AC_MASK;
  235. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  236. ext_key = AFFS_I(inode)->i_ac[idx].key;
  237. goto find_ext;
  238. }
  239. }
  240. /* fall back to the linear cache */
  241. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  242. find_ext:
  243. /* read all extended blocks until we find the one we need */
  244. //unlock cache
  245. do {
  246. bh = affs_bread(sb, ext_key);
  247. if (!bh)
  248. goto err_bread;
  249. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  250. affs_brelse(bh);
  251. tmp++;
  252. } while (tmp < ext);
  253. //lock cache
  254. /* store it in the associative cache */
  255. // recalculate ac_idx?
  256. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  257. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  258. read_ext:
  259. /* finally read the right extended block */
  260. //unlock cache
  261. bh = affs_bread(sb, ext_key);
  262. if (!bh)
  263. goto err_bread;
  264. //lock cache
  265. store_ext:
  266. /* release old cached extended block and store the new one */
  267. affs_brelse(AFFS_I(inode)->i_ext_bh);
  268. AFFS_I(inode)->i_ext_last = ext;
  269. AFFS_I(inode)->i_ext_bh = bh;
  270. get_bh(bh);
  271. return bh;
  272. err_bread:
  273. affs_brelse(bh);
  274. return ERR_PTR(-EIO);
  275. }
  276. static int
  277. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  278. {
  279. struct super_block *sb = inode->i_sb;
  280. struct buffer_head *ext_bh;
  281. u32 ext;
  282. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  283. BUG_ON(block > (sector_t)0x7fffffffUL);
  284. if (block >= AFFS_I(inode)->i_blkcnt) {
  285. if (block > AFFS_I(inode)->i_blkcnt || !create)
  286. goto err_big;
  287. } else
  288. create = 0;
  289. //lock cache
  290. affs_lock_ext(inode);
  291. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  292. block -= ext * AFFS_SB(sb)->s_hashsize;
  293. ext_bh = affs_get_extblock(inode, ext);
  294. if (IS_ERR(ext_bh))
  295. goto err_ext;
  296. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  297. if (create) {
  298. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  299. if (!blocknr)
  300. goto err_alloc;
  301. set_buffer_new(bh_result);
  302. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  303. AFFS_I(inode)->i_blkcnt++;
  304. /* store new block */
  305. if (bh_result->b_blocknr)
  306. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  307. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  308. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  309. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  310. bh_result->b_blocknr = blocknr;
  311. if (!block) {
  312. /* insert first block into header block */
  313. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  314. if (tmp)
  315. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  316. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  317. affs_adjust_checksum(ext_bh, blocknr - tmp);
  318. }
  319. }
  320. affs_brelse(ext_bh);
  321. //unlock cache
  322. affs_unlock_ext(inode);
  323. return 0;
  324. err_big:
  325. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  326. return -EIO;
  327. err_ext:
  328. // unlock cache
  329. affs_unlock_ext(inode);
  330. return PTR_ERR(ext_bh);
  331. err_alloc:
  332. brelse(ext_bh);
  333. clear_buffer_mapped(bh_result);
  334. bh_result->b_bdev = NULL;
  335. // unlock cache
  336. affs_unlock_ext(inode);
  337. return -ENOSPC;
  338. }
  339. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  340. {
  341. return block_write_full_page(page, affs_get_block, wbc);
  342. }
  343. static int affs_readpage(struct file *file, struct page *page)
  344. {
  345. return block_read_full_page(page, affs_get_block);
  346. }
  347. static int affs_write_begin(struct file *file, struct address_space *mapping,
  348. loff_t pos, unsigned len, unsigned flags,
  349. struct page **pagep, void **fsdata)
  350. {
  351. *pagep = NULL;
  352. return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  353. affs_get_block,
  354. &AFFS_I(mapping->host)->mmu_private);
  355. }
  356. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  357. {
  358. return generic_block_bmap(mapping,block,affs_get_block);
  359. }
  360. const struct address_space_operations affs_aops = {
  361. .readpage = affs_readpage,
  362. .writepage = affs_writepage,
  363. .sync_page = block_sync_page,
  364. .write_begin = affs_write_begin,
  365. .write_end = generic_write_end,
  366. .bmap = _affs_bmap
  367. };
  368. static inline struct buffer_head *
  369. affs_bread_ino(struct inode *inode, int block, int create)
  370. {
  371. struct buffer_head *bh, tmp_bh;
  372. int err;
  373. tmp_bh.b_state = 0;
  374. err = affs_get_block(inode, block, &tmp_bh, create);
  375. if (!err) {
  376. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  377. if (bh) {
  378. bh->b_state |= tmp_bh.b_state;
  379. return bh;
  380. }
  381. err = -EIO;
  382. }
  383. return ERR_PTR(err);
  384. }
  385. static inline struct buffer_head *
  386. affs_getzeroblk_ino(struct inode *inode, int block)
  387. {
  388. struct buffer_head *bh, tmp_bh;
  389. int err;
  390. tmp_bh.b_state = 0;
  391. err = affs_get_block(inode, block, &tmp_bh, 1);
  392. if (!err) {
  393. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  394. if (bh) {
  395. bh->b_state |= tmp_bh.b_state;
  396. return bh;
  397. }
  398. err = -EIO;
  399. }
  400. return ERR_PTR(err);
  401. }
  402. static inline struct buffer_head *
  403. affs_getemptyblk_ino(struct inode *inode, int block)
  404. {
  405. struct buffer_head *bh, tmp_bh;
  406. int err;
  407. tmp_bh.b_state = 0;
  408. err = affs_get_block(inode, block, &tmp_bh, 1);
  409. if (!err) {
  410. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  411. if (bh) {
  412. bh->b_state |= tmp_bh.b_state;
  413. return bh;
  414. }
  415. err = -EIO;
  416. }
  417. return ERR_PTR(err);
  418. }
  419. static int
  420. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  421. {
  422. struct inode *inode = page->mapping->host;
  423. struct super_block *sb = inode->i_sb;
  424. struct buffer_head *bh;
  425. char *data;
  426. u32 bidx, boff, bsize;
  427. u32 tmp;
  428. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  429. BUG_ON(from > to || to > PAGE_CACHE_SIZE);
  430. kmap(page);
  431. data = page_address(page);
  432. bsize = AFFS_SB(sb)->s_data_blksize;
  433. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  434. bidx = tmp / bsize;
  435. boff = tmp % bsize;
  436. while (from < to) {
  437. bh = affs_bread_ino(inode, bidx, 0);
  438. if (IS_ERR(bh))
  439. return PTR_ERR(bh);
  440. tmp = min(bsize - boff, to - from);
  441. BUG_ON(from + tmp > to || tmp > bsize);
  442. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  443. affs_brelse(bh);
  444. bidx++;
  445. from += tmp;
  446. boff = 0;
  447. }
  448. flush_dcache_page(page);
  449. kunmap(page);
  450. return 0;
  451. }
  452. static int
  453. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  454. {
  455. struct super_block *sb = inode->i_sb;
  456. struct buffer_head *bh, *prev_bh;
  457. u32 bidx, boff;
  458. u32 size, bsize;
  459. u32 tmp;
  460. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  461. bsize = AFFS_SB(sb)->s_data_blksize;
  462. bh = NULL;
  463. size = AFFS_I(inode)->mmu_private;
  464. bidx = size / bsize;
  465. boff = size % bsize;
  466. if (boff) {
  467. bh = affs_bread_ino(inode, bidx, 0);
  468. if (IS_ERR(bh))
  469. return PTR_ERR(bh);
  470. tmp = min(bsize - boff, newsize - size);
  471. BUG_ON(boff + tmp > bsize || tmp > bsize);
  472. memset(AFFS_DATA(bh) + boff, 0, tmp);
  473. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  474. affs_fix_checksum(sb, bh);
  475. mark_buffer_dirty_inode(bh, inode);
  476. size += tmp;
  477. bidx++;
  478. } else if (bidx) {
  479. bh = affs_bread_ino(inode, bidx - 1, 0);
  480. if (IS_ERR(bh))
  481. return PTR_ERR(bh);
  482. }
  483. while (size < newsize) {
  484. prev_bh = bh;
  485. bh = affs_getzeroblk_ino(inode, bidx);
  486. if (IS_ERR(bh))
  487. goto out;
  488. tmp = min(bsize, newsize - size);
  489. BUG_ON(tmp > bsize);
  490. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  491. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  492. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  493. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  494. affs_fix_checksum(sb, bh);
  495. bh->b_state &= ~(1UL << BH_New);
  496. mark_buffer_dirty_inode(bh, inode);
  497. if (prev_bh) {
  498. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  499. if (tmp)
  500. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  501. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  502. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  503. mark_buffer_dirty_inode(prev_bh, inode);
  504. affs_brelse(prev_bh);
  505. }
  506. size += bsize;
  507. bidx++;
  508. }
  509. affs_brelse(bh);
  510. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  511. return 0;
  512. out:
  513. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  514. return PTR_ERR(bh);
  515. }
  516. static int
  517. affs_readpage_ofs(struct file *file, struct page *page)
  518. {
  519. struct inode *inode = page->mapping->host;
  520. u32 to;
  521. int err;
  522. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  523. to = PAGE_CACHE_SIZE;
  524. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  525. to = inode->i_size & ~PAGE_CACHE_MASK;
  526. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  527. }
  528. err = affs_do_readpage_ofs(file, page, 0, to);
  529. if (!err)
  530. SetPageUptodate(page);
  531. unlock_page(page);
  532. return err;
  533. }
  534. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  535. loff_t pos, unsigned len, unsigned flags,
  536. struct page **pagep, void **fsdata)
  537. {
  538. struct inode *inode = mapping->host;
  539. struct page *page;
  540. pgoff_t index;
  541. int err = 0;
  542. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  543. if (pos > AFFS_I(inode)->mmu_private) {
  544. /* XXX: this probably leaves a too-big i_size in case of
  545. * failure. Should really be updating i_size at write_end time
  546. */
  547. err = affs_extent_file_ofs(inode, pos);
  548. if (err)
  549. return err;
  550. }
  551. index = pos >> PAGE_CACHE_SHIFT;
  552. page = __grab_cache_page(mapping, index);
  553. if (!page)
  554. return -ENOMEM;
  555. *pagep = page;
  556. if (PageUptodate(page))
  557. return 0;
  558. /* XXX: inefficient but safe in the face of short writes */
  559. err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE);
  560. if (err) {
  561. unlock_page(page);
  562. page_cache_release(page);
  563. }
  564. return err;
  565. }
  566. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  567. loff_t pos, unsigned len, unsigned copied,
  568. struct page *page, void *fsdata)
  569. {
  570. struct inode *inode = mapping->host;
  571. struct super_block *sb = inode->i_sb;
  572. struct buffer_head *bh, *prev_bh;
  573. char *data;
  574. u32 bidx, boff, bsize;
  575. unsigned from, to;
  576. u32 tmp;
  577. int written;
  578. from = pos & (PAGE_CACHE_SIZE - 1);
  579. to = pos + len;
  580. /*
  581. * XXX: not sure if this can handle short copies (len < copied), but
  582. * we don't have to, because the page should always be uptodate here,
  583. * due to write_begin.
  584. */
  585. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  586. bsize = AFFS_SB(sb)->s_data_blksize;
  587. data = page_address(page);
  588. bh = NULL;
  589. written = 0;
  590. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  591. bidx = tmp / bsize;
  592. boff = tmp % bsize;
  593. if (boff) {
  594. bh = affs_bread_ino(inode, bidx, 0);
  595. if (IS_ERR(bh))
  596. return PTR_ERR(bh);
  597. tmp = min(bsize - boff, to - from);
  598. BUG_ON(boff + tmp > bsize || tmp > bsize);
  599. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  600. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  601. affs_fix_checksum(sb, bh);
  602. mark_buffer_dirty_inode(bh, inode);
  603. written += tmp;
  604. from += tmp;
  605. bidx++;
  606. } else if (bidx) {
  607. bh = affs_bread_ino(inode, bidx - 1, 0);
  608. if (IS_ERR(bh))
  609. return PTR_ERR(bh);
  610. }
  611. while (from + bsize <= to) {
  612. prev_bh = bh;
  613. bh = affs_getemptyblk_ino(inode, bidx);
  614. if (IS_ERR(bh))
  615. goto out;
  616. memcpy(AFFS_DATA(bh), data + from, bsize);
  617. if (buffer_new(bh)) {
  618. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  619. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  620. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  621. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  622. AFFS_DATA_HEAD(bh)->next = 0;
  623. bh->b_state &= ~(1UL << BH_New);
  624. if (prev_bh) {
  625. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  626. if (tmp)
  627. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  628. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  629. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  630. mark_buffer_dirty_inode(prev_bh, inode);
  631. }
  632. }
  633. affs_brelse(prev_bh);
  634. affs_fix_checksum(sb, bh);
  635. mark_buffer_dirty_inode(bh, inode);
  636. written += bsize;
  637. from += bsize;
  638. bidx++;
  639. }
  640. if (from < to) {
  641. prev_bh = bh;
  642. bh = affs_bread_ino(inode, bidx, 1);
  643. if (IS_ERR(bh))
  644. goto out;
  645. tmp = min(bsize, to - from);
  646. BUG_ON(tmp > bsize);
  647. memcpy(AFFS_DATA(bh), data + from, tmp);
  648. if (buffer_new(bh)) {
  649. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  650. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  651. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  652. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  653. AFFS_DATA_HEAD(bh)->next = 0;
  654. bh->b_state &= ~(1UL << BH_New);
  655. if (prev_bh) {
  656. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  657. if (tmp)
  658. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  659. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  660. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  661. mark_buffer_dirty_inode(prev_bh, inode);
  662. }
  663. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  664. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  665. affs_brelse(prev_bh);
  666. affs_fix_checksum(sb, bh);
  667. mark_buffer_dirty_inode(bh, inode);
  668. written += tmp;
  669. from += tmp;
  670. bidx++;
  671. }
  672. SetPageUptodate(page);
  673. done:
  674. affs_brelse(bh);
  675. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  676. if (tmp > inode->i_size)
  677. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  678. unlock_page(page);
  679. page_cache_release(page);
  680. return written;
  681. out:
  682. bh = prev_bh;
  683. if (!written)
  684. written = PTR_ERR(bh);
  685. goto done;
  686. }
  687. const struct address_space_operations affs_aops_ofs = {
  688. .readpage = affs_readpage_ofs,
  689. //.writepage = affs_writepage_ofs,
  690. //.sync_page = affs_sync_page_ofs,
  691. .write_begin = affs_write_begin_ofs,
  692. .write_end = affs_write_end_ofs
  693. };
  694. /* Free any preallocated blocks. */
  695. void
  696. affs_free_prealloc(struct inode *inode)
  697. {
  698. struct super_block *sb = inode->i_sb;
  699. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  700. while (AFFS_I(inode)->i_pa_cnt) {
  701. AFFS_I(inode)->i_pa_cnt--;
  702. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  703. }
  704. }
  705. /* Truncate (or enlarge) a file to the requested size. */
  706. void
  707. affs_truncate(struct inode *inode)
  708. {
  709. struct super_block *sb = inode->i_sb;
  710. u32 ext, ext_key;
  711. u32 last_blk, blkcnt, blk;
  712. u32 size;
  713. struct buffer_head *ext_bh;
  714. int i;
  715. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  716. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  717. last_blk = 0;
  718. ext = 0;
  719. if (inode->i_size) {
  720. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  721. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  722. }
  723. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  724. struct address_space *mapping = inode->i_mapping;
  725. struct page *page;
  726. void *fsdata;
  727. u32 size = inode->i_size;
  728. int res;
  729. res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
  730. if (!res)
  731. res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
  732. else
  733. inode->i_size = AFFS_I(inode)->mmu_private;
  734. mark_inode_dirty(inode);
  735. return;
  736. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  737. return;
  738. // lock cache
  739. ext_bh = affs_get_extblock(inode, ext);
  740. if (IS_ERR(ext_bh)) {
  741. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  742. ext, PTR_ERR(ext_bh));
  743. return;
  744. }
  745. if (AFFS_I(inode)->i_lc) {
  746. /* clear linear cache */
  747. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  748. if (AFFS_I(inode)->i_lc_size > i) {
  749. AFFS_I(inode)->i_lc_size = i;
  750. for (; i < AFFS_LC_SIZE; i++)
  751. AFFS_I(inode)->i_lc[i] = 0;
  752. }
  753. /* clear associative cache */
  754. for (i = 0; i < AFFS_AC_SIZE; i++)
  755. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  756. AFFS_I(inode)->i_ac[i].ext = 0;
  757. }
  758. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  759. blkcnt = AFFS_I(inode)->i_blkcnt;
  760. i = 0;
  761. blk = last_blk;
  762. if (inode->i_size) {
  763. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  764. blk++;
  765. } else
  766. AFFS_HEAD(ext_bh)->first_data = 0;
  767. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  768. size = AFFS_SB(sb)->s_hashsize;
  769. if (size > blkcnt - blk + i)
  770. size = blkcnt - blk + i;
  771. for (; i < size; i++, blk++) {
  772. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  773. AFFS_BLOCK(sb, ext_bh, i) = 0;
  774. }
  775. AFFS_TAIL(sb, ext_bh)->extension = 0;
  776. affs_fix_checksum(sb, ext_bh);
  777. mark_buffer_dirty_inode(ext_bh, inode);
  778. affs_brelse(ext_bh);
  779. if (inode->i_size) {
  780. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  781. AFFS_I(inode)->i_extcnt = ext + 1;
  782. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  783. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  784. u32 tmp;
  785. if (IS_ERR(ext_bh)) {
  786. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  787. ext, PTR_ERR(ext_bh));
  788. return;
  789. }
  790. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  791. AFFS_DATA_HEAD(bh)->next = 0;
  792. affs_adjust_checksum(bh, -tmp);
  793. affs_brelse(bh);
  794. }
  795. } else {
  796. AFFS_I(inode)->i_blkcnt = 0;
  797. AFFS_I(inode)->i_extcnt = 1;
  798. }
  799. AFFS_I(inode)->mmu_private = inode->i_size;
  800. // unlock cache
  801. while (ext_key) {
  802. ext_bh = affs_bread(sb, ext_key);
  803. size = AFFS_SB(sb)->s_hashsize;
  804. if (size > blkcnt - blk)
  805. size = blkcnt - blk;
  806. for (i = 0; i < size; i++, blk++)
  807. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  808. affs_free_block(sb, ext_key);
  809. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  810. affs_brelse(ext_bh);
  811. }
  812. affs_free_prealloc(inode);
  813. }