file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = do_sync_read,
  27. .aio_read = generic_file_aio_read,
  28. .write = do_sync_write,
  29. .aio_write = generic_file_aio_write,
  30. .mmap = generic_file_mmap,
  31. .open = affs_file_open,
  32. .release = affs_file_release,
  33. .fsync = affs_file_fsync,
  34. .splice_read = generic_file_splice_read,
  35. };
  36. const struct inode_operations affs_file_inode_operations = {
  37. .truncate = affs_truncate,
  38. .setattr = affs_notify_change,
  39. };
  40. static int
  41. affs_file_open(struct inode *inode, struct file *filp)
  42. {
  43. pr_debug("AFFS: open(%lu,%d)\n",
  44. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  45. atomic_inc(&AFFS_I(inode)->i_opencnt);
  46. return 0;
  47. }
  48. static int
  49. affs_file_release(struct inode *inode, struct file *filp)
  50. {
  51. pr_debug("AFFS: release(%lu, %d)\n",
  52. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  53. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  54. mutex_lock(&inode->i_mutex);
  55. if (inode->i_size != AFFS_I(inode)->mmu_private)
  56. affs_truncate(inode);
  57. affs_free_prealloc(inode);
  58. mutex_unlock(&inode->i_mutex);
  59. }
  60. return 0;
  61. }
  62. static int
  63. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  64. {
  65. struct super_block *sb = inode->i_sb;
  66. struct buffer_head *bh;
  67. u32 lc_max;
  68. int i, j, key;
  69. if (!AFFS_I(inode)->i_lc) {
  70. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  71. if (!ptr)
  72. return -ENOMEM;
  73. AFFS_I(inode)->i_lc = (u32 *)ptr;
  74. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  75. }
  76. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  77. if (AFFS_I(inode)->i_extcnt > lc_max) {
  78. u32 lc_shift, lc_mask, tmp, off;
  79. /* need to recalculate linear cache, start from old size */
  80. lc_shift = AFFS_I(inode)->i_lc_shift;
  81. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  82. for (; tmp; tmp >>= 1)
  83. lc_shift++;
  84. lc_mask = (1 << lc_shift) - 1;
  85. /* fix idx and old size to new shift */
  86. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  87. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  88. /* first shrink old cache to make more space */
  89. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  90. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  91. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  92. AFFS_I(inode)->i_lc_shift = lc_shift;
  93. AFFS_I(inode)->i_lc_mask = lc_mask;
  94. }
  95. /* fill cache to the needed index */
  96. i = AFFS_I(inode)->i_lc_size;
  97. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  98. for (; i <= lc_idx; i++) {
  99. if (!i) {
  100. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  101. continue;
  102. }
  103. key = AFFS_I(inode)->i_lc[i - 1];
  104. j = AFFS_I(inode)->i_lc_mask + 1;
  105. // unlock cache
  106. for (; j > 0; j--) {
  107. bh = affs_bread(sb, key);
  108. if (!bh)
  109. goto err;
  110. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  111. affs_brelse(bh);
  112. }
  113. // lock cache
  114. AFFS_I(inode)->i_lc[i] = key;
  115. }
  116. return 0;
  117. err:
  118. // lock cache
  119. return -EIO;
  120. }
  121. static struct buffer_head *
  122. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  123. {
  124. struct super_block *sb = inode->i_sb;
  125. struct buffer_head *new_bh;
  126. u32 blocknr, tmp;
  127. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  128. if (!blocknr)
  129. return ERR_PTR(-ENOSPC);
  130. new_bh = affs_getzeroblk(sb, blocknr);
  131. if (!new_bh) {
  132. affs_free_block(sb, blocknr);
  133. return ERR_PTR(-EIO);
  134. }
  135. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  136. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  137. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  138. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  139. affs_fix_checksum(sb, new_bh);
  140. mark_buffer_dirty_inode(new_bh, inode);
  141. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  142. if (tmp)
  143. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  144. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  145. affs_adjust_checksum(bh, blocknr - tmp);
  146. mark_buffer_dirty_inode(bh, inode);
  147. AFFS_I(inode)->i_extcnt++;
  148. mark_inode_dirty(inode);
  149. return new_bh;
  150. }
  151. static inline struct buffer_head *
  152. affs_get_extblock(struct inode *inode, u32 ext)
  153. {
  154. /* inline the simplest case: same extended block as last time */
  155. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  156. if (ext == AFFS_I(inode)->i_ext_last)
  157. get_bh(bh);
  158. else
  159. /* we have to do more (not inlined) */
  160. bh = affs_get_extblock_slow(inode, ext);
  161. return bh;
  162. }
  163. static struct buffer_head *
  164. affs_get_extblock_slow(struct inode *inode, u32 ext)
  165. {
  166. struct super_block *sb = inode->i_sb;
  167. struct buffer_head *bh;
  168. u32 ext_key;
  169. u32 lc_idx, lc_off, ac_idx;
  170. u32 tmp, idx;
  171. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  172. /* read the next extended block from the current one */
  173. bh = AFFS_I(inode)->i_ext_bh;
  174. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  175. if (ext < AFFS_I(inode)->i_extcnt)
  176. goto read_ext;
  177. if (ext > AFFS_I(inode)->i_extcnt)
  178. BUG();
  179. bh = affs_alloc_extblock(inode, bh, ext);
  180. if (IS_ERR(bh))
  181. return bh;
  182. goto store_ext;
  183. }
  184. if (ext == 0) {
  185. /* we seek back to the file header block */
  186. ext_key = inode->i_ino;
  187. goto read_ext;
  188. }
  189. if (ext >= AFFS_I(inode)->i_extcnt) {
  190. struct buffer_head *prev_bh;
  191. /* allocate a new extended block */
  192. if (ext > AFFS_I(inode)->i_extcnt)
  193. BUG();
  194. /* get previous extended block */
  195. prev_bh = affs_get_extblock(inode, ext - 1);
  196. if (IS_ERR(prev_bh))
  197. return prev_bh;
  198. bh = affs_alloc_extblock(inode, prev_bh, ext);
  199. affs_brelse(prev_bh);
  200. if (IS_ERR(bh))
  201. return bh;
  202. goto store_ext;
  203. }
  204. again:
  205. /* check if there is an extended cache and whether it's large enough */
  206. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  207. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  208. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  209. int err;
  210. err = affs_grow_extcache(inode, lc_idx);
  211. if (err)
  212. return ERR_PTR(err);
  213. goto again;
  214. }
  215. /* every n'th key we find in the linear cache */
  216. if (!lc_off) {
  217. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  218. goto read_ext;
  219. }
  220. /* maybe it's still in the associative cache */
  221. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  222. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  223. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  224. goto read_ext;
  225. }
  226. /* try to find one of the previous extended blocks */
  227. tmp = ext;
  228. idx = ac_idx;
  229. while (--tmp, --lc_off > 0) {
  230. idx = (idx - 1) & AFFS_AC_MASK;
  231. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  232. ext_key = AFFS_I(inode)->i_ac[idx].key;
  233. goto find_ext;
  234. }
  235. }
  236. /* fall back to the linear cache */
  237. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  238. find_ext:
  239. /* read all extended blocks until we find the one we need */
  240. //unlock cache
  241. do {
  242. bh = affs_bread(sb, ext_key);
  243. if (!bh)
  244. goto err_bread;
  245. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  246. affs_brelse(bh);
  247. tmp++;
  248. } while (tmp < ext);
  249. //lock cache
  250. /* store it in the associative cache */
  251. // recalculate ac_idx?
  252. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  253. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  254. read_ext:
  255. /* finally read the right extended block */
  256. //unlock cache
  257. bh = affs_bread(sb, ext_key);
  258. if (!bh)
  259. goto err_bread;
  260. //lock cache
  261. store_ext:
  262. /* release old cached extended block and store the new one */
  263. affs_brelse(AFFS_I(inode)->i_ext_bh);
  264. AFFS_I(inode)->i_ext_last = ext;
  265. AFFS_I(inode)->i_ext_bh = bh;
  266. get_bh(bh);
  267. return bh;
  268. err_bread:
  269. affs_brelse(bh);
  270. return ERR_PTR(-EIO);
  271. }
  272. static int
  273. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  274. {
  275. struct super_block *sb = inode->i_sb;
  276. struct buffer_head *ext_bh;
  277. u32 ext;
  278. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  279. BUG_ON(block > (sector_t)0x7fffffffUL);
  280. if (block >= AFFS_I(inode)->i_blkcnt) {
  281. if (block > AFFS_I(inode)->i_blkcnt || !create)
  282. goto err_big;
  283. } else
  284. create = 0;
  285. //lock cache
  286. affs_lock_ext(inode);
  287. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  288. block -= ext * AFFS_SB(sb)->s_hashsize;
  289. ext_bh = affs_get_extblock(inode, ext);
  290. if (IS_ERR(ext_bh))
  291. goto err_ext;
  292. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  293. if (create) {
  294. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  295. if (!blocknr)
  296. goto err_alloc;
  297. set_buffer_new(bh_result);
  298. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  299. AFFS_I(inode)->i_blkcnt++;
  300. /* store new block */
  301. if (bh_result->b_blocknr)
  302. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  303. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  304. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  305. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  306. bh_result->b_blocknr = blocknr;
  307. if (!block) {
  308. /* insert first block into header block */
  309. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  310. if (tmp)
  311. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  312. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  313. affs_adjust_checksum(ext_bh, blocknr - tmp);
  314. }
  315. }
  316. affs_brelse(ext_bh);
  317. //unlock cache
  318. affs_unlock_ext(inode);
  319. return 0;
  320. err_big:
  321. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  322. return -EIO;
  323. err_ext:
  324. // unlock cache
  325. affs_unlock_ext(inode);
  326. return PTR_ERR(ext_bh);
  327. err_alloc:
  328. brelse(ext_bh);
  329. clear_buffer_mapped(bh_result);
  330. bh_result->b_bdev = NULL;
  331. // unlock cache
  332. affs_unlock_ext(inode);
  333. return -ENOSPC;
  334. }
  335. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  336. {
  337. return block_write_full_page(page, affs_get_block, wbc);
  338. }
  339. static int affs_readpage(struct file *file, struct page *page)
  340. {
  341. return block_read_full_page(page, affs_get_block);
  342. }
  343. static int affs_write_begin(struct file *file, struct address_space *mapping,
  344. loff_t pos, unsigned len, unsigned flags,
  345. struct page **pagep, void **fsdata)
  346. {
  347. *pagep = NULL;
  348. return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  349. affs_get_block,
  350. &AFFS_I(mapping->host)->mmu_private);
  351. }
  352. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  353. {
  354. return generic_block_bmap(mapping,block,affs_get_block);
  355. }
  356. const struct address_space_operations affs_aops = {
  357. .readpage = affs_readpage,
  358. .writepage = affs_writepage,
  359. .sync_page = block_sync_page,
  360. .write_begin = affs_write_begin,
  361. .write_end = generic_write_end,
  362. .bmap = _affs_bmap
  363. };
  364. static inline struct buffer_head *
  365. affs_bread_ino(struct inode *inode, int block, int create)
  366. {
  367. struct buffer_head *bh, tmp_bh;
  368. int err;
  369. tmp_bh.b_state = 0;
  370. err = affs_get_block(inode, block, &tmp_bh, create);
  371. if (!err) {
  372. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  373. if (bh) {
  374. bh->b_state |= tmp_bh.b_state;
  375. return bh;
  376. }
  377. err = -EIO;
  378. }
  379. return ERR_PTR(err);
  380. }
  381. static inline struct buffer_head *
  382. affs_getzeroblk_ino(struct inode *inode, int block)
  383. {
  384. struct buffer_head *bh, tmp_bh;
  385. int err;
  386. tmp_bh.b_state = 0;
  387. err = affs_get_block(inode, block, &tmp_bh, 1);
  388. if (!err) {
  389. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  390. if (bh) {
  391. bh->b_state |= tmp_bh.b_state;
  392. return bh;
  393. }
  394. err = -EIO;
  395. }
  396. return ERR_PTR(err);
  397. }
  398. static inline struct buffer_head *
  399. affs_getemptyblk_ino(struct inode *inode, int block)
  400. {
  401. struct buffer_head *bh, tmp_bh;
  402. int err;
  403. tmp_bh.b_state = 0;
  404. err = affs_get_block(inode, block, &tmp_bh, 1);
  405. if (!err) {
  406. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  407. if (bh) {
  408. bh->b_state |= tmp_bh.b_state;
  409. return bh;
  410. }
  411. err = -EIO;
  412. }
  413. return ERR_PTR(err);
  414. }
  415. static int
  416. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  417. {
  418. struct inode *inode = page->mapping->host;
  419. struct super_block *sb = inode->i_sb;
  420. struct buffer_head *bh;
  421. char *data;
  422. u32 bidx, boff, bsize;
  423. u32 tmp;
  424. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  425. BUG_ON(from > to || to > PAGE_CACHE_SIZE);
  426. kmap(page);
  427. data = page_address(page);
  428. bsize = AFFS_SB(sb)->s_data_blksize;
  429. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  430. bidx = tmp / bsize;
  431. boff = tmp % bsize;
  432. while (from < to) {
  433. bh = affs_bread_ino(inode, bidx, 0);
  434. if (IS_ERR(bh))
  435. return PTR_ERR(bh);
  436. tmp = min(bsize - boff, to - from);
  437. BUG_ON(from + tmp > to || tmp > bsize);
  438. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  439. affs_brelse(bh);
  440. bidx++;
  441. from += tmp;
  442. boff = 0;
  443. }
  444. flush_dcache_page(page);
  445. kunmap(page);
  446. return 0;
  447. }
  448. static int
  449. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  450. {
  451. struct super_block *sb = inode->i_sb;
  452. struct buffer_head *bh, *prev_bh;
  453. u32 bidx, boff;
  454. u32 size, bsize;
  455. u32 tmp;
  456. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  457. bsize = AFFS_SB(sb)->s_data_blksize;
  458. bh = NULL;
  459. size = AFFS_I(inode)->mmu_private;
  460. bidx = size / bsize;
  461. boff = size % bsize;
  462. if (boff) {
  463. bh = affs_bread_ino(inode, bidx, 0);
  464. if (IS_ERR(bh))
  465. return PTR_ERR(bh);
  466. tmp = min(bsize - boff, newsize - size);
  467. BUG_ON(boff + tmp > bsize || tmp > bsize);
  468. memset(AFFS_DATA(bh) + boff, 0, tmp);
  469. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  470. affs_fix_checksum(sb, bh);
  471. mark_buffer_dirty_inode(bh, inode);
  472. size += tmp;
  473. bidx++;
  474. } else if (bidx) {
  475. bh = affs_bread_ino(inode, bidx - 1, 0);
  476. if (IS_ERR(bh))
  477. return PTR_ERR(bh);
  478. }
  479. while (size < newsize) {
  480. prev_bh = bh;
  481. bh = affs_getzeroblk_ino(inode, bidx);
  482. if (IS_ERR(bh))
  483. goto out;
  484. tmp = min(bsize, newsize - size);
  485. BUG_ON(tmp > bsize);
  486. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  487. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  488. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  489. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  490. affs_fix_checksum(sb, bh);
  491. bh->b_state &= ~(1UL << BH_New);
  492. mark_buffer_dirty_inode(bh, inode);
  493. if (prev_bh) {
  494. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  495. if (tmp)
  496. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  497. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  498. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  499. mark_buffer_dirty_inode(prev_bh, inode);
  500. affs_brelse(prev_bh);
  501. }
  502. size += bsize;
  503. bidx++;
  504. }
  505. affs_brelse(bh);
  506. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  507. return 0;
  508. out:
  509. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  510. return PTR_ERR(bh);
  511. }
  512. static int
  513. affs_readpage_ofs(struct file *file, struct page *page)
  514. {
  515. struct inode *inode = page->mapping->host;
  516. u32 to;
  517. int err;
  518. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  519. to = PAGE_CACHE_SIZE;
  520. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  521. to = inode->i_size & ~PAGE_CACHE_MASK;
  522. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  523. }
  524. err = affs_do_readpage_ofs(file, page, 0, to);
  525. if (!err)
  526. SetPageUptodate(page);
  527. unlock_page(page);
  528. return err;
  529. }
  530. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  531. loff_t pos, unsigned len, unsigned flags,
  532. struct page **pagep, void **fsdata)
  533. {
  534. struct inode *inode = mapping->host;
  535. struct page *page;
  536. pgoff_t index;
  537. int err = 0;
  538. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  539. if (pos > AFFS_I(inode)->mmu_private) {
  540. /* XXX: this probably leaves a too-big i_size in case of
  541. * failure. Should really be updating i_size at write_end time
  542. */
  543. err = affs_extent_file_ofs(inode, pos);
  544. if (err)
  545. return err;
  546. }
  547. index = pos >> PAGE_CACHE_SHIFT;
  548. page = grab_cache_page_write_begin(mapping, index, flags);
  549. if (!page)
  550. return -ENOMEM;
  551. *pagep = page;
  552. if (PageUptodate(page))
  553. return 0;
  554. /* XXX: inefficient but safe in the face of short writes */
  555. err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE);
  556. if (err) {
  557. unlock_page(page);
  558. page_cache_release(page);
  559. }
  560. return err;
  561. }
  562. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  563. loff_t pos, unsigned len, unsigned copied,
  564. struct page *page, void *fsdata)
  565. {
  566. struct inode *inode = mapping->host;
  567. struct super_block *sb = inode->i_sb;
  568. struct buffer_head *bh, *prev_bh;
  569. char *data;
  570. u32 bidx, boff, bsize;
  571. unsigned from, to;
  572. u32 tmp;
  573. int written;
  574. from = pos & (PAGE_CACHE_SIZE - 1);
  575. to = pos + len;
  576. /*
  577. * XXX: not sure if this can handle short copies (len < copied), but
  578. * we don't have to, because the page should always be uptodate here,
  579. * due to write_begin.
  580. */
  581. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  582. bsize = AFFS_SB(sb)->s_data_blksize;
  583. data = page_address(page);
  584. bh = NULL;
  585. written = 0;
  586. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  587. bidx = tmp / bsize;
  588. boff = tmp % bsize;
  589. if (boff) {
  590. bh = affs_bread_ino(inode, bidx, 0);
  591. if (IS_ERR(bh))
  592. return PTR_ERR(bh);
  593. tmp = min(bsize - boff, to - from);
  594. BUG_ON(boff + tmp > bsize || tmp > bsize);
  595. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  596. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  597. affs_fix_checksum(sb, bh);
  598. mark_buffer_dirty_inode(bh, inode);
  599. written += tmp;
  600. from += tmp;
  601. bidx++;
  602. } else if (bidx) {
  603. bh = affs_bread_ino(inode, bidx - 1, 0);
  604. if (IS_ERR(bh))
  605. return PTR_ERR(bh);
  606. }
  607. while (from + bsize <= to) {
  608. prev_bh = bh;
  609. bh = affs_getemptyblk_ino(inode, bidx);
  610. if (IS_ERR(bh))
  611. goto out;
  612. memcpy(AFFS_DATA(bh), data + from, bsize);
  613. if (buffer_new(bh)) {
  614. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  615. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  616. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  617. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  618. AFFS_DATA_HEAD(bh)->next = 0;
  619. bh->b_state &= ~(1UL << BH_New);
  620. if (prev_bh) {
  621. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  622. if (tmp)
  623. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  624. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  625. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  626. mark_buffer_dirty_inode(prev_bh, inode);
  627. }
  628. }
  629. affs_brelse(prev_bh);
  630. affs_fix_checksum(sb, bh);
  631. mark_buffer_dirty_inode(bh, inode);
  632. written += bsize;
  633. from += bsize;
  634. bidx++;
  635. }
  636. if (from < to) {
  637. prev_bh = bh;
  638. bh = affs_bread_ino(inode, bidx, 1);
  639. if (IS_ERR(bh))
  640. goto out;
  641. tmp = min(bsize, to - from);
  642. BUG_ON(tmp > bsize);
  643. memcpy(AFFS_DATA(bh), data + from, tmp);
  644. if (buffer_new(bh)) {
  645. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  646. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  647. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  648. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  649. AFFS_DATA_HEAD(bh)->next = 0;
  650. bh->b_state &= ~(1UL << BH_New);
  651. if (prev_bh) {
  652. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  653. if (tmp)
  654. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  655. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  656. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  657. mark_buffer_dirty_inode(prev_bh, inode);
  658. }
  659. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  660. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  661. affs_brelse(prev_bh);
  662. affs_fix_checksum(sb, bh);
  663. mark_buffer_dirty_inode(bh, inode);
  664. written += tmp;
  665. from += tmp;
  666. bidx++;
  667. }
  668. SetPageUptodate(page);
  669. done:
  670. affs_brelse(bh);
  671. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  672. if (tmp > inode->i_size)
  673. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  674. unlock_page(page);
  675. page_cache_release(page);
  676. return written;
  677. out:
  678. bh = prev_bh;
  679. if (!written)
  680. written = PTR_ERR(bh);
  681. goto done;
  682. }
  683. const struct address_space_operations affs_aops_ofs = {
  684. .readpage = affs_readpage_ofs,
  685. //.writepage = affs_writepage_ofs,
  686. //.sync_page = affs_sync_page_ofs,
  687. .write_begin = affs_write_begin_ofs,
  688. .write_end = affs_write_end_ofs
  689. };
  690. /* Free any preallocated blocks. */
  691. void
  692. affs_free_prealloc(struct inode *inode)
  693. {
  694. struct super_block *sb = inode->i_sb;
  695. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  696. while (AFFS_I(inode)->i_pa_cnt) {
  697. AFFS_I(inode)->i_pa_cnt--;
  698. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  699. }
  700. }
  701. /* Truncate (or enlarge) a file to the requested size. */
  702. void
  703. affs_truncate(struct inode *inode)
  704. {
  705. struct super_block *sb = inode->i_sb;
  706. u32 ext, ext_key;
  707. u32 last_blk, blkcnt, blk;
  708. u32 size;
  709. struct buffer_head *ext_bh;
  710. int i;
  711. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  712. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  713. last_blk = 0;
  714. ext = 0;
  715. if (inode->i_size) {
  716. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  717. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  718. }
  719. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  720. struct address_space *mapping = inode->i_mapping;
  721. struct page *page;
  722. void *fsdata;
  723. u32 size = inode->i_size;
  724. int res;
  725. res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
  726. if (!res)
  727. res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
  728. else
  729. inode->i_size = AFFS_I(inode)->mmu_private;
  730. mark_inode_dirty(inode);
  731. return;
  732. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  733. return;
  734. // lock cache
  735. ext_bh = affs_get_extblock(inode, ext);
  736. if (IS_ERR(ext_bh)) {
  737. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  738. ext, PTR_ERR(ext_bh));
  739. return;
  740. }
  741. if (AFFS_I(inode)->i_lc) {
  742. /* clear linear cache */
  743. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  744. if (AFFS_I(inode)->i_lc_size > i) {
  745. AFFS_I(inode)->i_lc_size = i;
  746. for (; i < AFFS_LC_SIZE; i++)
  747. AFFS_I(inode)->i_lc[i] = 0;
  748. }
  749. /* clear associative cache */
  750. for (i = 0; i < AFFS_AC_SIZE; i++)
  751. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  752. AFFS_I(inode)->i_ac[i].ext = 0;
  753. }
  754. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  755. blkcnt = AFFS_I(inode)->i_blkcnt;
  756. i = 0;
  757. blk = last_blk;
  758. if (inode->i_size) {
  759. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  760. blk++;
  761. } else
  762. AFFS_HEAD(ext_bh)->first_data = 0;
  763. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  764. size = AFFS_SB(sb)->s_hashsize;
  765. if (size > blkcnt - blk + i)
  766. size = blkcnt - blk + i;
  767. for (; i < size; i++, blk++) {
  768. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  769. AFFS_BLOCK(sb, ext_bh, i) = 0;
  770. }
  771. AFFS_TAIL(sb, ext_bh)->extension = 0;
  772. affs_fix_checksum(sb, ext_bh);
  773. mark_buffer_dirty_inode(ext_bh, inode);
  774. affs_brelse(ext_bh);
  775. if (inode->i_size) {
  776. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  777. AFFS_I(inode)->i_extcnt = ext + 1;
  778. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  779. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  780. u32 tmp;
  781. if (IS_ERR(ext_bh)) {
  782. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  783. ext, PTR_ERR(ext_bh));
  784. return;
  785. }
  786. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  787. AFFS_DATA_HEAD(bh)->next = 0;
  788. affs_adjust_checksum(bh, -tmp);
  789. affs_brelse(bh);
  790. }
  791. } else {
  792. AFFS_I(inode)->i_blkcnt = 0;
  793. AFFS_I(inode)->i_extcnt = 1;
  794. }
  795. AFFS_I(inode)->mmu_private = inode->i_size;
  796. // unlock cache
  797. while (ext_key) {
  798. ext_bh = affs_bread(sb, ext_key);
  799. size = AFFS_SB(sb)->s_hashsize;
  800. if (size > blkcnt - blk)
  801. size = blkcnt - blk;
  802. for (i = 0; i < size; i++, blk++)
  803. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  804. affs_free_block(sb, ext_key);
  805. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  806. affs_brelse(ext_bh);
  807. }
  808. affs_free_prealloc(inode);
  809. }
  810. int affs_file_fsync(struct file *filp, struct dentry *dentry, int datasync)
  811. {
  812. struct inode * inode = dentry->d_inode;
  813. int ret, err;
  814. ret = write_inode_now(inode, 0);
  815. err = sync_blockdev(inode->i_sb->s_bdev);
  816. if (!ret)
  817. ret = err;
  818. return ret;
  819. }