inode.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201
  1. /*
  2. * inode.c
  3. *
  4. * PURPOSE
  5. * Inode handling routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * COPYRIGHT
  8. * This file is distributed under the terms of the GNU General Public
  9. * License (GPL). Copies of the GPL can be obtained from:
  10. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  11. * Each contributing author retains all rights to their own work.
  12. *
  13. * (C) 1998 Dave Boynton
  14. * (C) 1998-2004 Ben Fennema
  15. * (C) 1999-2000 Stelias Computing Inc
  16. *
  17. * HISTORY
  18. *
  19. * 10/04/98 dgb Added rudimentary directory functions
  20. * 10/07/98 Fully working udf_block_map! It works!
  21. * 11/25/98 bmap altered to better support extents
  22. * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
  23. * 12/12/98 rewrote udf_block_map to handle next extents and descs across
  24. * block boundaries (which is not actually allowed)
  25. * 12/20/98 added support for strategy 4096
  26. * 03/07/99 rewrote udf_block_map (again)
  27. * New funcs, inode_bmap, udf_next_aext
  28. * 04/19/99 Support for writing device EA's for major/minor #
  29. */
  30. #include "udfdecl.h"
  31. #include <linux/mm.h>
  32. #include <linux/smp_lock.h>
  33. #include <linux/module.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/buffer_head.h>
  36. #include <linux/writeback.h>
  37. #include <linux/slab.h>
  38. #include "udf_i.h"
  39. #include "udf_sb.h"
  40. MODULE_AUTHOR("Ben Fennema");
  41. MODULE_DESCRIPTION("Universal Disk Format Filesystem");
  42. MODULE_LICENSE("GPL");
  43. #define EXTENT_MERGE_SIZE 5
  44. static mode_t udf_convert_permissions(struct fileEntry *);
  45. static int udf_update_inode(struct inode *, int);
  46. static void udf_fill_inode(struct inode *, struct buffer_head *);
  47. static int udf_alloc_i_data(struct inode *inode, size_t size);
  48. static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
  49. long *, int *);
  50. static int8_t udf_insert_aext(struct inode *, struct extent_position,
  51. kernel_lb_addr, uint32_t);
  52. static void udf_split_extents(struct inode *, int *, int, int,
  53. kernel_long_ad[EXTENT_MERGE_SIZE], int *);
  54. static void udf_prealloc_extents(struct inode *, int, int,
  55. kernel_long_ad[EXTENT_MERGE_SIZE], int *);
  56. static void udf_merge_extents(struct inode *,
  57. kernel_long_ad[EXTENT_MERGE_SIZE], int *);
  58. static void udf_update_extents(struct inode *,
  59. kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
  60. struct extent_position *);
  61. static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
  62. /*
  63. * udf_delete_inode
  64. *
  65. * PURPOSE
  66. * Clean-up before the specified inode is destroyed.
  67. *
  68. * DESCRIPTION
  69. * This routine is called when the kernel destroys an inode structure
  70. * ie. when iput() finds i_count == 0.
  71. *
  72. * HISTORY
  73. * July 1, 1997 - Andrew E. Mileski
  74. * Written, tested, and released.
  75. *
  76. * Called at the last iput() if i_nlink is zero.
  77. */
  78. void udf_delete_inode(struct inode *inode)
  79. {
  80. truncate_inode_pages(&inode->i_data, 0);
  81. if (is_bad_inode(inode))
  82. goto no_delete;
  83. inode->i_size = 0;
  84. udf_truncate(inode);
  85. lock_kernel();
  86. udf_update_inode(inode, IS_SYNC(inode));
  87. udf_free_inode(inode);
  88. unlock_kernel();
  89. return;
  90. no_delete:
  91. clear_inode(inode);
  92. }
  93. /*
  94. * If we are going to release inode from memory, we discard preallocation and
  95. * truncate last inode extent to proper length. We could use drop_inode() but
  96. * it's called under inode_lock and thus we cannot mark inode dirty there. We
  97. * use clear_inode() but we have to make sure to write inode as it's not written
  98. * automatically.
  99. */
  100. void udf_clear_inode(struct inode *inode)
  101. {
  102. if (!(inode->i_sb->s_flags & MS_RDONLY)) {
  103. lock_kernel();
  104. /* Discard preallocation for directories, symlinks, etc. */
  105. udf_discard_prealloc(inode);
  106. udf_truncate_tail_extent(inode);
  107. unlock_kernel();
  108. write_inode_now(inode, 1);
  109. }
  110. kfree(UDF_I_DATA(inode));
  111. UDF_I_DATA(inode) = NULL;
  112. }
  113. static int udf_writepage(struct page *page, struct writeback_control *wbc)
  114. {
  115. return block_write_full_page(page, udf_get_block, wbc);
  116. }
  117. static int udf_readpage(struct file *file, struct page *page)
  118. {
  119. return block_read_full_page(page, udf_get_block);
  120. }
  121. static int udf_prepare_write(struct file *file, struct page *page,
  122. unsigned from, unsigned to)
  123. {
  124. return block_prepare_write(page, from, to, udf_get_block);
  125. }
  126. static sector_t udf_bmap(struct address_space *mapping, sector_t block)
  127. {
  128. return generic_block_bmap(mapping, block, udf_get_block);
  129. }
  130. const struct address_space_operations udf_aops = {
  131. .readpage = udf_readpage,
  132. .writepage = udf_writepage,
  133. .sync_page = block_sync_page,
  134. .prepare_write = udf_prepare_write,
  135. .commit_write = generic_commit_write,
  136. .bmap = udf_bmap,
  137. };
  138. void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
  139. {
  140. struct page *page;
  141. char *kaddr;
  142. struct writeback_control udf_wbc = {
  143. .sync_mode = WB_SYNC_NONE,
  144. .nr_to_write = 1,
  145. };
  146. /* from now on we have normal address_space methods */
  147. inode->i_data.a_ops = &udf_aops;
  148. if (!UDF_I_LENALLOC(inode)) {
  149. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
  150. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
  151. else
  152. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
  153. mark_inode_dirty(inode);
  154. return;
  155. }
  156. page = grab_cache_page(inode->i_mapping, 0);
  157. BUG_ON(!PageLocked(page));
  158. if (!PageUptodate(page)) {
  159. kaddr = kmap(page);
  160. memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
  161. PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
  162. memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
  163. UDF_I_LENALLOC(inode));
  164. flush_dcache_page(page);
  165. SetPageUptodate(page);
  166. kunmap(page);
  167. }
  168. memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
  169. UDF_I_LENALLOC(inode));
  170. UDF_I_LENALLOC(inode) = 0;
  171. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
  172. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
  173. else
  174. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
  175. inode->i_data.a_ops->writepage(page, &udf_wbc);
  176. page_cache_release(page);
  177. mark_inode_dirty(inode);
  178. }
  179. struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
  180. int *err)
  181. {
  182. int newblock;
  183. struct buffer_head *dbh = NULL;
  184. kernel_lb_addr eloc;
  185. uint32_t elen;
  186. uint8_t alloctype;
  187. struct extent_position epos;
  188. struct udf_fileident_bh sfibh, dfibh;
  189. loff_t f_pos = udf_ext0_offset(inode) >> 2;
  190. int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
  191. struct fileIdentDesc cfi, *sfi, *dfi;
  192. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
  193. alloctype = ICBTAG_FLAG_AD_SHORT;
  194. else
  195. alloctype = ICBTAG_FLAG_AD_LONG;
  196. if (!inode->i_size) {
  197. UDF_I_ALLOCTYPE(inode) = alloctype;
  198. mark_inode_dirty(inode);
  199. return NULL;
  200. }
  201. /* alloc block, and copy data to it */
  202. *block = udf_new_block(inode->i_sb, inode,
  203. UDF_I_LOCATION(inode).partitionReferenceNum,
  204. UDF_I_LOCATION(inode).logicalBlockNum, err);
  205. if (!(*block))
  206. return NULL;
  207. newblock = udf_get_pblock(inode->i_sb, *block,
  208. UDF_I_LOCATION(inode).partitionReferenceNum,
  209. 0);
  210. if (!newblock)
  211. return NULL;
  212. dbh = udf_tgetblk(inode->i_sb, newblock);
  213. if (!dbh)
  214. return NULL;
  215. lock_buffer(dbh);
  216. memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
  217. set_buffer_uptodate(dbh);
  218. unlock_buffer(dbh);
  219. mark_buffer_dirty_inode(dbh, inode);
  220. sfibh.soffset = sfibh.eoffset =
  221. (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
  222. sfibh.sbh = sfibh.ebh = NULL;
  223. dfibh.soffset = dfibh.eoffset = 0;
  224. dfibh.sbh = dfibh.ebh = dbh;
  225. while ((f_pos < size)) {
  226. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
  227. sfi =
  228. udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL,
  229. NULL, NULL);
  230. if (!sfi) {
  231. brelse(dbh);
  232. return NULL;
  233. }
  234. UDF_I_ALLOCTYPE(inode) = alloctype;
  235. sfi->descTag.tagLocation = cpu_to_le32(*block);
  236. dfibh.soffset = dfibh.eoffset;
  237. dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
  238. dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
  239. if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
  240. sfi->fileIdent +
  241. le16_to_cpu(sfi->lengthOfImpUse))) {
  242. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
  243. brelse(dbh);
  244. return NULL;
  245. }
  246. }
  247. mark_buffer_dirty_inode(dbh, inode);
  248. memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0,
  249. UDF_I_LENALLOC(inode));
  250. UDF_I_LENALLOC(inode) = 0;
  251. eloc.logicalBlockNum = *block;
  252. eloc.partitionReferenceNum =
  253. UDF_I_LOCATION(inode).partitionReferenceNum;
  254. elen = inode->i_size;
  255. UDF_I_LENEXTENTS(inode) = elen;
  256. epos.bh = NULL;
  257. epos.block = UDF_I_LOCATION(inode);
  258. epos.offset = udf_file_entry_alloc_offset(inode);
  259. udf_add_aext(inode, &epos, eloc, elen, 0);
  260. /* UniqueID stuff */
  261. brelse(epos.bh);
  262. mark_inode_dirty(inode);
  263. return dbh;
  264. }
  265. static int udf_get_block(struct inode *inode, sector_t block,
  266. struct buffer_head *bh_result, int create)
  267. {
  268. int err, new;
  269. struct buffer_head *bh;
  270. unsigned long phys;
  271. if (!create) {
  272. phys = udf_block_map(inode, block);
  273. if (phys)
  274. map_bh(bh_result, inode->i_sb, phys);
  275. return 0;
  276. }
  277. err = -EIO;
  278. new = 0;
  279. bh = NULL;
  280. lock_kernel();
  281. if (block < 0)
  282. goto abort_negative;
  283. if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) {
  284. UDF_I_NEXT_ALLOC_BLOCK(inode)++;
  285. UDF_I_NEXT_ALLOC_GOAL(inode)++;
  286. }
  287. err = 0;
  288. bh = inode_getblk(inode, block, &err, &phys, &new);
  289. BUG_ON(bh);
  290. if (err)
  291. goto abort;
  292. BUG_ON(!phys);
  293. if (new)
  294. set_buffer_new(bh_result);
  295. map_bh(bh_result, inode->i_sb, phys);
  296. abort:
  297. unlock_kernel();
  298. return err;
  299. abort_negative:
  300. udf_warning(inode->i_sb, "udf_get_block", "block < 0");
  301. goto abort;
  302. }
  303. static struct buffer_head *udf_getblk(struct inode *inode, long block,
  304. int create, int *err)
  305. {
  306. struct buffer_head dummy;
  307. dummy.b_state = 0;
  308. dummy.b_blocknr = -1000;
  309. *err = udf_get_block(inode, block, &dummy, create);
  310. if (!*err && buffer_mapped(&dummy)) {
  311. struct buffer_head *bh;
  312. bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
  313. if (buffer_new(&dummy)) {
  314. lock_buffer(bh);
  315. memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
  316. set_buffer_uptodate(bh);
  317. unlock_buffer(bh);
  318. mark_buffer_dirty_inode(bh, inode);
  319. }
  320. return bh;
  321. }
  322. return NULL;
  323. }
  324. /* Extend the file by 'blocks' blocks, return the number of extents added */
  325. int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
  326. kernel_long_ad * last_ext, sector_t blocks)
  327. {
  328. sector_t add;
  329. int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
  330. struct super_block *sb = inode->i_sb;
  331. kernel_lb_addr prealloc_loc = { 0, 0 };
  332. int prealloc_len = 0;
  333. /* The previous extent is fake and we should not extend by anything
  334. * - there's nothing to do... */
  335. if (!blocks && fake)
  336. return 0;
  337. /* Round the last extent up to a multiple of block size */
  338. if (last_ext->extLength & (sb->s_blocksize - 1)) {
  339. last_ext->extLength =
  340. (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
  341. (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
  342. sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
  343. UDF_I_LENEXTENTS(inode) =
  344. (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
  345. ~(sb->s_blocksize - 1);
  346. }
  347. /* Last extent are just preallocated blocks? */
  348. if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
  349. EXT_NOT_RECORDED_ALLOCATED) {
  350. /* Save the extent so that we can reattach it to the end */
  351. prealloc_loc = last_ext->extLocation;
  352. prealloc_len = last_ext->extLength;
  353. /* Mark the extent as a hole */
  354. last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
  355. (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
  356. last_ext->extLocation.logicalBlockNum = 0;
  357. last_ext->extLocation.partitionReferenceNum = 0;
  358. }
  359. /* Can we merge with the previous extent? */
  360. if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
  361. EXT_NOT_RECORDED_NOT_ALLOCATED) {
  362. add =
  363. ((1 << 30) - sb->s_blocksize -
  364. (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> sb->
  365. s_blocksize_bits;
  366. if (add > blocks)
  367. add = blocks;
  368. blocks -= add;
  369. last_ext->extLength += add << sb->s_blocksize_bits;
  370. }
  371. if (fake) {
  372. udf_add_aext(inode, last_pos, last_ext->extLocation,
  373. last_ext->extLength, 1);
  374. count++;
  375. } else
  376. udf_write_aext(inode, last_pos, last_ext->extLocation,
  377. last_ext->extLength, 1);
  378. /* Managed to do everything necessary? */
  379. if (!blocks)
  380. goto out;
  381. /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
  382. last_ext->extLocation.logicalBlockNum = 0;
  383. last_ext->extLocation.partitionReferenceNum = 0;
  384. add = (1 << (30 - sb->s_blocksize_bits)) - 1;
  385. last_ext->extLength =
  386. EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
  387. /* Create enough extents to cover the whole hole */
  388. while (blocks > add) {
  389. blocks -= add;
  390. if (udf_add_aext(inode, last_pos, last_ext->extLocation,
  391. last_ext->extLength, 1) == -1)
  392. return -1;
  393. count++;
  394. }
  395. if (blocks) {
  396. last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
  397. (blocks << sb->s_blocksize_bits);
  398. if (udf_add_aext(inode, last_pos, last_ext->extLocation,
  399. last_ext->extLength, 1) == -1)
  400. return -1;
  401. count++;
  402. }
  403. out:
  404. /* Do we have some preallocated blocks saved? */
  405. if (prealloc_len) {
  406. if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1)
  407. == -1)
  408. return -1;
  409. last_ext->extLocation = prealloc_loc;
  410. last_ext->extLength = prealloc_len;
  411. count++;
  412. }
  413. /* last_pos should point to the last written extent... */
  414. if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
  415. last_pos->offset -= sizeof(short_ad);
  416. else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
  417. last_pos->offset -= sizeof(long_ad);
  418. else
  419. return -1;
  420. return count;
  421. }
  422. static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
  423. int *err, long *phys, int *new)
  424. {
  425. static sector_t last_block;
  426. struct buffer_head *result = NULL;
  427. kernel_long_ad laarr[EXTENT_MERGE_SIZE];
  428. struct extent_position prev_epos, cur_epos, next_epos;
  429. int count = 0, startnum = 0, endnum = 0;
  430. uint32_t elen = 0, tmpelen;
  431. kernel_lb_addr eloc, tmpeloc;
  432. int c = 1;
  433. loff_t lbcount = 0, b_off = 0;
  434. uint32_t newblocknum, newblock;
  435. sector_t offset = 0;
  436. int8_t etype;
  437. int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
  438. int lastblock = 0;
  439. prev_epos.offset = udf_file_entry_alloc_offset(inode);
  440. prev_epos.block = UDF_I_LOCATION(inode);
  441. prev_epos.bh = NULL;
  442. cur_epos = next_epos = prev_epos;
  443. b_off = (loff_t) block << inode->i_sb->s_blocksize_bits;
  444. /* find the extent which contains the block we are looking for.
  445. alternate between laarr[0] and laarr[1] for locations of the
  446. current extent, and the previous extent */
  447. do {
  448. if (prev_epos.bh != cur_epos.bh) {
  449. brelse(prev_epos.bh);
  450. get_bh(cur_epos.bh);
  451. prev_epos.bh = cur_epos.bh;
  452. }
  453. if (cur_epos.bh != next_epos.bh) {
  454. brelse(cur_epos.bh);
  455. get_bh(next_epos.bh);
  456. cur_epos.bh = next_epos.bh;
  457. }
  458. lbcount += elen;
  459. prev_epos.block = cur_epos.block;
  460. cur_epos.block = next_epos.block;
  461. prev_epos.offset = cur_epos.offset;
  462. cur_epos.offset = next_epos.offset;
  463. if ((etype =
  464. udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
  465. break;
  466. c = !c;
  467. laarr[c].extLength = (etype << 30) | elen;
  468. laarr[c].extLocation = eloc;
  469. if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
  470. pgoal = eloc.logicalBlockNum +
  471. ((elen + inode->i_sb->s_blocksize - 1) >>
  472. inode->i_sb->s_blocksize_bits);
  473. count++;
  474. } while (lbcount + elen <= b_off);
  475. b_off -= lbcount;
  476. offset = b_off >> inode->i_sb->s_blocksize_bits;
  477. /*
  478. * Move prev_epos and cur_epos into indirect extent if we are at
  479. * the pointer to it
  480. */
  481. udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
  482. udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
  483. /* if the extent is allocated and recorded, return the block
  484. if the extent is not a multiple of the blocksize, round up */
  485. if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
  486. if (elen & (inode->i_sb->s_blocksize - 1)) {
  487. elen = EXT_RECORDED_ALLOCATED |
  488. ((elen + inode->i_sb->s_blocksize - 1) &
  489. ~(inode->i_sb->s_blocksize - 1));
  490. etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
  491. }
  492. brelse(prev_epos.bh);
  493. brelse(cur_epos.bh);
  494. brelse(next_epos.bh);
  495. newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
  496. *phys = newblock;
  497. return NULL;
  498. }
  499. last_block = block;
  500. /* Are we beyond EOF? */
  501. if (etype == -1) {
  502. int ret;
  503. if (count) {
  504. if (c)
  505. laarr[0] = laarr[1];
  506. startnum = 1;
  507. } else {
  508. /* Create a fake extent when there's not one */
  509. memset(&laarr[0].extLocation, 0x00,
  510. sizeof(kernel_lb_addr));
  511. laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
  512. /* Will udf_extend_file() create real extent from a fake one? */
  513. startnum = (offset > 0);
  514. }
  515. /* Create extents for the hole between EOF and offset */
  516. ret = udf_extend_file(inode, &prev_epos, laarr, offset);
  517. if (ret == -1) {
  518. brelse(prev_epos.bh);
  519. brelse(cur_epos.bh);
  520. brelse(next_epos.bh);
  521. /* We don't really know the error here so we just make
  522. * something up */
  523. *err = -ENOSPC;
  524. return NULL;
  525. }
  526. c = 0;
  527. offset = 0;
  528. count += ret;
  529. /* We are not covered by a preallocated extent? */
  530. if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
  531. EXT_NOT_RECORDED_ALLOCATED) {
  532. /* Is there any real extent? - otherwise we overwrite
  533. * the fake one... */
  534. if (count)
  535. c = !c;
  536. laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
  537. inode->i_sb->s_blocksize;
  538. memset(&laarr[c].extLocation, 0x00,
  539. sizeof(kernel_lb_addr));
  540. count++;
  541. endnum++;
  542. }
  543. endnum = c + 1;
  544. lastblock = 1;
  545. } else {
  546. endnum = startnum = ((count > 2) ? 2 : count);
  547. /* if the current extent is in position 0, swap it with the previous */
  548. if (!c && count != 1) {
  549. laarr[2] = laarr[0];
  550. laarr[0] = laarr[1];
  551. laarr[1] = laarr[2];
  552. c = 1;
  553. }
  554. /* if the current block is located in an extent, read the next extent */
  555. if ((etype =
  556. udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) {
  557. laarr[c + 1].extLength = (etype << 30) | elen;
  558. laarr[c + 1].extLocation = eloc;
  559. count++;
  560. startnum++;
  561. endnum++;
  562. } else {
  563. lastblock = 1;
  564. }
  565. }
  566. /* if the current extent is not recorded but allocated, get the
  567. block in the extent corresponding to the requested block */
  568. if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
  569. newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
  570. else { /* otherwise, allocate a new block */
  571. if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
  572. goal = UDF_I_NEXT_ALLOC_GOAL(inode);
  573. if (!goal) {
  574. if (!(goal = pgoal))
  575. goal =
  576. UDF_I_LOCATION(inode).logicalBlockNum + 1;
  577. }
  578. if (!(newblocknum = udf_new_block(inode->i_sb, inode,
  579. UDF_I_LOCATION(inode).
  580. partitionReferenceNum, goal,
  581. err))) {
  582. brelse(prev_epos.bh);
  583. *err = -ENOSPC;
  584. return NULL;
  585. }
  586. UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
  587. }
  588. /* if the extent the requsted block is located in contains multiple blocks,
  589. split the extent into at most three extents. blocks prior to requested
  590. block, requested block, and blocks after requested block */
  591. udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
  592. #ifdef UDF_PREALLOCATE
  593. /* preallocate blocks */
  594. udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
  595. #endif
  596. /* merge any continuous blocks in laarr */
  597. udf_merge_extents(inode, laarr, &endnum);
  598. /* write back the new extents, inserting new extents if the new number
  599. of extents is greater than the old number, and deleting extents if
  600. the new number of extents is less than the old number */
  601. udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
  602. brelse(prev_epos.bh);
  603. if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
  604. UDF_I_LOCATION(inode).
  605. partitionReferenceNum, 0))) {
  606. return NULL;
  607. }
  608. *phys = newblock;
  609. *err = 0;
  610. *new = 1;
  611. UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
  612. UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
  613. inode->i_ctime = current_fs_time(inode->i_sb);
  614. if (IS_SYNC(inode))
  615. udf_sync_inode(inode);
  616. else
  617. mark_inode_dirty(inode);
  618. return result;
  619. }
  620. static void udf_split_extents(struct inode *inode, int *c, int offset,
  621. int newblocknum,
  622. kernel_long_ad laarr[EXTENT_MERGE_SIZE],
  623. int *endnum)
  624. {
  625. if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
  626. (laarr[*c].extLength >> 30) ==
  627. (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
  628. int curr = *c;
  629. int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
  630. inode->i_sb->s_blocksize -
  631. 1) >> inode->i_sb->s_blocksize_bits;
  632. int8_t etype = (laarr[curr].extLength >> 30);
  633. if (blen == 1) ;
  634. else if (!offset || blen == offset + 1) {
  635. laarr[curr + 2] = laarr[curr + 1];
  636. laarr[curr + 1] = laarr[curr];
  637. } else {
  638. laarr[curr + 3] = laarr[curr + 1];
  639. laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
  640. }
  641. if (offset) {
  642. if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
  643. udf_free_blocks(inode->i_sb, inode,
  644. laarr[curr].extLocation, 0,
  645. offset);
  646. laarr[curr].extLength =
  647. EXT_NOT_RECORDED_NOT_ALLOCATED | (offset <<
  648. inode->
  649. i_sb->
  650. s_blocksize_bits);
  651. laarr[curr].extLocation.logicalBlockNum = 0;
  652. laarr[curr].extLocation.partitionReferenceNum =
  653. 0;
  654. } else
  655. laarr[curr].extLength = (etype << 30) |
  656. (offset << inode->i_sb->s_blocksize_bits);
  657. curr++;
  658. (*c)++;
  659. (*endnum)++;
  660. }
  661. laarr[curr].extLocation.logicalBlockNum = newblocknum;
  662. if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
  663. laarr[curr].extLocation.partitionReferenceNum =
  664. UDF_I_LOCATION(inode).partitionReferenceNum;
  665. laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
  666. inode->i_sb->s_blocksize;
  667. curr++;
  668. if (blen != offset + 1) {
  669. if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
  670. laarr[curr].extLocation.logicalBlockNum +=
  671. (offset + 1);
  672. laarr[curr].extLength =
  673. (etype << 30) | ((blen - (offset + 1)) << inode->
  674. i_sb->s_blocksize_bits);
  675. curr++;
  676. (*endnum)++;
  677. }
  678. }
  679. }
  680. static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
  681. kernel_long_ad laarr[EXTENT_MERGE_SIZE],
  682. int *endnum)
  683. {
  684. int start, length = 0, currlength = 0, i;
  685. if (*endnum >= (c + 1)) {
  686. if (!lastblock)
  687. return;
  688. else
  689. start = c;
  690. } else {
  691. if ((laarr[c + 1].extLength >> 30) ==
  692. (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
  693. start = c + 1;
  694. length = currlength =
  695. (((laarr[c + 1].
  696. extLength & UDF_EXTENT_LENGTH_MASK) +
  697. inode->i_sb->s_blocksize -
  698. 1) >> inode->i_sb->s_blocksize_bits);
  699. } else
  700. start = c;
  701. }
  702. for (i = start + 1; i <= *endnum; i++) {
  703. if (i == *endnum) {
  704. if (lastblock)
  705. length += UDF_DEFAULT_PREALLOC_BLOCKS;
  706. } else if ((laarr[i].extLength >> 30) ==
  707. (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
  708. length +=
  709. (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
  710. inode->i_sb->s_blocksize -
  711. 1) >> inode->i_sb->s_blocksize_bits);
  712. else
  713. break;
  714. }
  715. if (length) {
  716. int next = laarr[start].extLocation.logicalBlockNum +
  717. (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
  718. inode->i_sb->s_blocksize -
  719. 1) >> inode->i_sb->s_blocksize_bits);
  720. int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
  721. laarr[start].extLocation.
  722. partitionReferenceNum,
  723. next,
  724. (UDF_DEFAULT_PREALLOC_BLOCKS
  725. >
  726. length ? length :
  727. UDF_DEFAULT_PREALLOC_BLOCKS)
  728. - currlength);
  729. if (numalloc) {
  730. if (start == (c + 1))
  731. laarr[start].extLength +=
  732. (numalloc << inode->i_sb->s_blocksize_bits);
  733. else {
  734. memmove(&laarr[c + 2], &laarr[c + 1],
  735. sizeof(long_ad) * (*endnum - (c + 1)));
  736. (*endnum)++;
  737. laarr[c + 1].extLocation.logicalBlockNum = next;
  738. laarr[c + 1].extLocation.partitionReferenceNum =
  739. laarr[c].extLocation.partitionReferenceNum;
  740. laarr[c + 1].extLength =
  741. EXT_NOT_RECORDED_ALLOCATED | (numalloc <<
  742. inode->i_sb->
  743. s_blocksize_bits);
  744. start = c + 1;
  745. }
  746. for (i = start + 1; numalloc && i < *endnum; i++) {
  747. int elen =
  748. ((laarr[i].
  749. extLength & UDF_EXTENT_LENGTH_MASK) +
  750. inode->i_sb->s_blocksize -
  751. 1) >> inode->i_sb->s_blocksize_bits;
  752. if (elen > numalloc) {
  753. laarr[i].extLength -=
  754. (numalloc << inode->i_sb->
  755. s_blocksize_bits);
  756. numalloc = 0;
  757. } else {
  758. numalloc -= elen;
  759. if (*endnum > (i + 1))
  760. memmove(&laarr[i],
  761. &laarr[i + 1],
  762. sizeof(long_ad) *
  763. (*endnum - (i + 1)));
  764. i--;
  765. (*endnum)--;
  766. }
  767. }
  768. UDF_I_LENEXTENTS(inode) +=
  769. numalloc << inode->i_sb->s_blocksize_bits;
  770. }
  771. }
  772. }
  773. static void udf_merge_extents(struct inode *inode,
  774. kernel_long_ad laarr[EXTENT_MERGE_SIZE],
  775. int *endnum)
  776. {
  777. int i;
  778. for (i = 0; i < (*endnum - 1); i++) {
  779. if ((laarr[i].extLength >> 30) ==
  780. (laarr[i + 1].extLength >> 30)) {
  781. if (((laarr[i].extLength >> 30) ==
  782. (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
  783. ||
  784. ((laarr[i + 1].extLocation.logicalBlockNum -
  785. laarr[i].extLocation.logicalBlockNum) ==
  786. (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
  787. inode->i_sb->s_blocksize -
  788. 1) >> inode->i_sb->s_blocksize_bits))) {
  789. if (((laarr[i].
  790. extLength & UDF_EXTENT_LENGTH_MASK) +
  791. (laarr[i + 1].
  792. extLength & UDF_EXTENT_LENGTH_MASK) +
  793. inode->i_sb->s_blocksize -
  794. 1) & ~UDF_EXTENT_LENGTH_MASK) {
  795. laarr[i + 1].extLength =
  796. (laarr[i + 1].extLength -
  797. (laarr[i].
  798. extLength &
  799. UDF_EXTENT_LENGTH_MASK) +
  800. UDF_EXTENT_LENGTH_MASK) & ~(inode->
  801. i_sb->
  802. s_blocksize
  803. - 1);
  804. laarr[i].extLength =
  805. (laarr[i].
  806. extLength & UDF_EXTENT_FLAG_MASK) +
  807. (UDF_EXTENT_LENGTH_MASK + 1) -
  808. inode->i_sb->s_blocksize;
  809. laarr[i +
  810. 1].extLocation.logicalBlockNum =
  811. laarr[i].extLocation.
  812. logicalBlockNum +
  813. ((laarr[i].
  814. extLength &
  815. UDF_EXTENT_LENGTH_MASK) >> inode->
  816. i_sb->s_blocksize_bits);
  817. } else {
  818. laarr[i].extLength =
  819. laarr[i + 1].extLength +
  820. (((laarr[i].
  821. extLength &
  822. UDF_EXTENT_LENGTH_MASK) +
  823. inode->i_sb->s_blocksize -
  824. 1) & ~(inode->i_sb->s_blocksize -
  825. 1));
  826. if (*endnum > (i + 2))
  827. memmove(&laarr[i + 1],
  828. &laarr[i + 2],
  829. sizeof(long_ad) *
  830. (*endnum - (i + 2)));
  831. i--;
  832. (*endnum)--;
  833. }
  834. }
  835. } else
  836. if (((laarr[i].extLength >> 30) ==
  837. (EXT_NOT_RECORDED_ALLOCATED >> 30))
  838. && ((laarr[i + 1].extLength >> 30) ==
  839. (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
  840. udf_free_blocks(inode->i_sb, inode,
  841. laarr[i].extLocation, 0,
  842. ((laarr[i].
  843. extLength & UDF_EXTENT_LENGTH_MASK) +
  844. inode->i_sb->s_blocksize -
  845. 1) >> inode->i_sb->s_blocksize_bits);
  846. laarr[i].extLocation.logicalBlockNum = 0;
  847. laarr[i].extLocation.partitionReferenceNum = 0;
  848. if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
  849. (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) +
  850. inode->i_sb->s_blocksize -
  851. 1) & ~UDF_EXTENT_LENGTH_MASK) {
  852. laarr[i + 1].extLength =
  853. (laarr[i + 1].extLength -
  854. (laarr[i].
  855. extLength & UDF_EXTENT_LENGTH_MASK) +
  856. UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->
  857. s_blocksize -
  858. 1);
  859. laarr[i].extLength =
  860. (laarr[i].
  861. extLength & UDF_EXTENT_FLAG_MASK) +
  862. (UDF_EXTENT_LENGTH_MASK + 1) -
  863. inode->i_sb->s_blocksize;
  864. } else {
  865. laarr[i].extLength = laarr[i + 1].extLength +
  866. (((laarr[i].
  867. extLength & UDF_EXTENT_LENGTH_MASK) +
  868. inode->i_sb->s_blocksize -
  869. 1) & ~(inode->i_sb->s_blocksize - 1));
  870. if (*endnum > (i + 2))
  871. memmove(&laarr[i + 1], &laarr[i + 2],
  872. sizeof(long_ad) * (*endnum -
  873. (i + 2)));
  874. i--;
  875. (*endnum)--;
  876. }
  877. } else if ((laarr[i].extLength >> 30) ==
  878. (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
  879. udf_free_blocks(inode->i_sb, inode,
  880. laarr[i].extLocation, 0,
  881. ((laarr[i].
  882. extLength & UDF_EXTENT_LENGTH_MASK) +
  883. inode->i_sb->s_blocksize -
  884. 1) >> inode->i_sb->s_blocksize_bits);
  885. laarr[i].extLocation.logicalBlockNum = 0;
  886. laarr[i].extLocation.partitionReferenceNum = 0;
  887. laarr[i].extLength =
  888. (laarr[i].
  889. extLength & UDF_EXTENT_LENGTH_MASK) |
  890. EXT_NOT_RECORDED_NOT_ALLOCATED;
  891. }
  892. }
  893. }
  894. static void udf_update_extents(struct inode *inode,
  895. kernel_long_ad laarr[EXTENT_MERGE_SIZE],
  896. int startnum, int endnum,
  897. struct extent_position *epos)
  898. {
  899. int start = 0, i;
  900. kernel_lb_addr tmploc;
  901. uint32_t tmplen;
  902. if (startnum > endnum) {
  903. for (i = 0; i < (startnum - endnum); i++)
  904. udf_delete_aext(inode, *epos, laarr[i].extLocation,
  905. laarr[i].extLength);
  906. } else if (startnum < endnum) {
  907. for (i = 0; i < (endnum - startnum); i++) {
  908. udf_insert_aext(inode, *epos, laarr[i].extLocation,
  909. laarr[i].extLength);
  910. udf_next_aext(inode, epos, &laarr[i].extLocation,
  911. &laarr[i].extLength, 1);
  912. start++;
  913. }
  914. }
  915. for (i = start; i < endnum; i++) {
  916. udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
  917. udf_write_aext(inode, epos, laarr[i].extLocation,
  918. laarr[i].extLength, 1);
  919. }
  920. }
  921. struct buffer_head *udf_bread(struct inode *inode, int block,
  922. int create, int *err)
  923. {
  924. struct buffer_head *bh = NULL;
  925. bh = udf_getblk(inode, block, create, err);
  926. if (!bh)
  927. return NULL;
  928. if (buffer_uptodate(bh))
  929. return bh;
  930. ll_rw_block(READ, 1, &bh);
  931. wait_on_buffer(bh);
  932. if (buffer_uptodate(bh))
  933. return bh;
  934. brelse(bh);
  935. *err = -EIO;
  936. return NULL;
  937. }
  938. void udf_truncate(struct inode *inode)
  939. {
  940. int offset;
  941. int err;
  942. if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  943. S_ISLNK(inode->i_mode)))
  944. return;
  945. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  946. return;
  947. lock_kernel();
  948. if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
  949. if (inode->i_sb->s_blocksize <
  950. (udf_file_entry_alloc_offset(inode) + inode->i_size)) {
  951. udf_expand_file_adinicb(inode, inode->i_size, &err);
  952. if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) {
  953. inode->i_size = UDF_I_LENALLOC(inode);
  954. unlock_kernel();
  955. return;
  956. } else
  957. udf_truncate_extents(inode);
  958. } else {
  959. offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
  960. memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) +
  961. offset, 0x00,
  962. inode->i_sb->s_blocksize - offset -
  963. udf_file_entry_alloc_offset(inode));
  964. UDF_I_LENALLOC(inode) = inode->i_size;
  965. }
  966. } else {
  967. block_truncate_page(inode->i_mapping, inode->i_size,
  968. udf_get_block);
  969. udf_truncate_extents(inode);
  970. }
  971. inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
  972. if (IS_SYNC(inode))
  973. udf_sync_inode(inode);
  974. else
  975. mark_inode_dirty(inode);
  976. unlock_kernel();
  977. }
  978. static void __udf_read_inode(struct inode *inode)
  979. {
  980. struct buffer_head *bh = NULL;
  981. struct fileEntry *fe;
  982. uint16_t ident;
  983. /*
  984. * Set defaults, but the inode is still incomplete!
  985. * Note: get_new_inode() sets the following on a new inode:
  986. * i_sb = sb
  987. * i_no = ino
  988. * i_flags = sb->s_flags
  989. * i_state = 0
  990. * clean_inode(): zero fills and sets
  991. * i_count = 1
  992. * i_nlink = 1
  993. * i_op = NULL;
  994. */
  995. bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
  996. if (!bh) {
  997. printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
  998. inode->i_ino);
  999. make_bad_inode(inode);
  1000. return;
  1001. }
  1002. if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
  1003. ident != TAG_IDENT_USE) {
  1004. printk(KERN_ERR
  1005. "udf: udf_read_inode(ino %ld) failed ident=%d\n",
  1006. inode->i_ino, ident);
  1007. brelse(bh);
  1008. make_bad_inode(inode);
  1009. return;
  1010. }
  1011. fe = (struct fileEntry *)bh->b_data;
  1012. if (le16_to_cpu(fe->icbTag.strategyType) == 4096) {
  1013. struct buffer_head *ibh = NULL, *nbh = NULL;
  1014. struct indirectEntry *ie;
  1015. ibh =
  1016. udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1,
  1017. &ident);
  1018. if (ident == TAG_IDENT_IE) {
  1019. if (ibh) {
  1020. kernel_lb_addr loc;
  1021. ie = (struct indirectEntry *)ibh->b_data;
  1022. loc = lelb_to_cpu(ie->indirectICB.extLocation);
  1023. if (ie->indirectICB.extLength &&
  1024. (nbh =
  1025. udf_read_ptagged(inode->i_sb, loc, 0,
  1026. &ident))) {
  1027. if (ident == TAG_IDENT_FE
  1028. || ident == TAG_IDENT_EFE) {
  1029. memcpy(&UDF_I_LOCATION(inode),
  1030. &loc,
  1031. sizeof(kernel_lb_addr));
  1032. brelse(bh);
  1033. brelse(ibh);
  1034. brelse(nbh);
  1035. __udf_read_inode(inode);
  1036. return;
  1037. } else {
  1038. brelse(nbh);
  1039. brelse(ibh);
  1040. }
  1041. } else
  1042. brelse(ibh);
  1043. }
  1044. } else
  1045. brelse(ibh);
  1046. } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) {
  1047. printk(KERN_ERR "udf: unsupported strategy type: %d\n",
  1048. le16_to_cpu(fe->icbTag.strategyType));
  1049. brelse(bh);
  1050. make_bad_inode(inode);
  1051. return;
  1052. }
  1053. udf_fill_inode(inode, bh);
  1054. brelse(bh);
  1055. }
  1056. static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
  1057. {
  1058. struct fileEntry *fe;
  1059. struct extendedFileEntry *efe;
  1060. time_t convtime;
  1061. long convtime_usec;
  1062. int offset;
  1063. fe = (struct fileEntry *)bh->b_data;
  1064. efe = (struct extendedFileEntry *)bh->b_data;
  1065. if (le16_to_cpu(fe->icbTag.strategyType) == 4)
  1066. UDF_I_STRAT4096(inode) = 0;
  1067. else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
  1068. UDF_I_STRAT4096(inode) = 1;
  1069. UDF_I_ALLOCTYPE(inode) =
  1070. le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
  1071. UDF_I_UNIQUE(inode) = 0;
  1072. UDF_I_LENEATTR(inode) = 0;
  1073. UDF_I_LENEXTENTS(inode) = 0;
  1074. UDF_I_LENALLOC(inode) = 0;
  1075. UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
  1076. UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
  1077. if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) {
  1078. UDF_I_EFE(inode) = 1;
  1079. UDF_I_USE(inode) = 0;
  1080. if (udf_alloc_i_data
  1081. (inode,
  1082. inode->i_sb->s_blocksize -
  1083. sizeof(struct extendedFileEntry))) {
  1084. make_bad_inode(inode);
  1085. return;
  1086. }
  1087. memcpy(UDF_I_DATA(inode),
  1088. bh->b_data + sizeof(struct extendedFileEntry),
  1089. inode->i_sb->s_blocksize -
  1090. sizeof(struct extendedFileEntry));
  1091. } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) {
  1092. UDF_I_EFE(inode) = 0;
  1093. UDF_I_USE(inode) = 0;
  1094. if (udf_alloc_i_data
  1095. (inode,
  1096. inode->i_sb->s_blocksize - sizeof(struct fileEntry))) {
  1097. make_bad_inode(inode);
  1098. return;
  1099. }
  1100. memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry),
  1101. inode->i_sb->s_blocksize - sizeof(struct fileEntry));
  1102. } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
  1103. UDF_I_EFE(inode) = 0;
  1104. UDF_I_USE(inode) = 1;
  1105. UDF_I_LENALLOC(inode) =
  1106. le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)->
  1107. lengthAllocDescs);
  1108. if (udf_alloc_i_data
  1109. (inode,
  1110. inode->i_sb->s_blocksize -
  1111. sizeof(struct unallocSpaceEntry))) {
  1112. make_bad_inode(inode);
  1113. return;
  1114. }
  1115. memcpy(UDF_I_DATA(inode),
  1116. bh->b_data + sizeof(struct unallocSpaceEntry),
  1117. inode->i_sb->s_blocksize -
  1118. sizeof(struct unallocSpaceEntry));
  1119. return;
  1120. }
  1121. inode->i_uid = le32_to_cpu(fe->uid);
  1122. if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
  1123. UDF_FLAG_UID_IGNORE))
  1124. inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
  1125. inode->i_gid = le32_to_cpu(fe->gid);
  1126. if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
  1127. UDF_FLAG_GID_IGNORE))
  1128. inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
  1129. inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
  1130. if (!inode->i_nlink)
  1131. inode->i_nlink = 1;
  1132. inode->i_size = le64_to_cpu(fe->informationLength);
  1133. UDF_I_LENEXTENTS(inode) = inode->i_size;
  1134. inode->i_mode = udf_convert_permissions(fe);
  1135. inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
  1136. if (UDF_I_EFE(inode) == 0) {
  1137. inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
  1138. (inode->i_sb->s_blocksize_bits - 9);
  1139. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1140. lets_to_cpu(fe->accessTime))) {
  1141. inode->i_atime.tv_sec = convtime;
  1142. inode->i_atime.tv_nsec = convtime_usec * 1000;
  1143. } else {
  1144. inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
  1145. }
  1146. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1147. lets_to_cpu(fe->modificationTime))) {
  1148. inode->i_mtime.tv_sec = convtime;
  1149. inode->i_mtime.tv_nsec = convtime_usec * 1000;
  1150. } else {
  1151. inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
  1152. }
  1153. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1154. lets_to_cpu(fe->attrTime))) {
  1155. inode->i_ctime.tv_sec = convtime;
  1156. inode->i_ctime.tv_nsec = convtime_usec * 1000;
  1157. } else {
  1158. inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
  1159. }
  1160. UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
  1161. UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
  1162. UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
  1163. offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
  1164. } else {
  1165. inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
  1166. (inode->i_sb->s_blocksize_bits - 9);
  1167. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1168. lets_to_cpu(efe->accessTime))) {
  1169. inode->i_atime.tv_sec = convtime;
  1170. inode->i_atime.tv_nsec = convtime_usec * 1000;
  1171. } else {
  1172. inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
  1173. }
  1174. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1175. lets_to_cpu(efe->modificationTime))) {
  1176. inode->i_mtime.tv_sec = convtime;
  1177. inode->i_mtime.tv_nsec = convtime_usec * 1000;
  1178. } else {
  1179. inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
  1180. }
  1181. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1182. lets_to_cpu(efe->createTime))) {
  1183. UDF_I_CRTIME(inode).tv_sec = convtime;
  1184. UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
  1185. } else {
  1186. UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
  1187. }
  1188. if (udf_stamp_to_time(&convtime, &convtime_usec,
  1189. lets_to_cpu(efe->attrTime))) {
  1190. inode->i_ctime.tv_sec = convtime;
  1191. inode->i_ctime.tv_nsec = convtime_usec * 1000;
  1192. } else {
  1193. inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
  1194. }
  1195. UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
  1196. UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
  1197. UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
  1198. offset =
  1199. sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
  1200. }
  1201. switch (fe->icbTag.fileType) {
  1202. case ICBTAG_FILE_TYPE_DIRECTORY:
  1203. {
  1204. inode->i_op = &udf_dir_inode_operations;
  1205. inode->i_fop = &udf_dir_operations;
  1206. inode->i_mode |= S_IFDIR;
  1207. inc_nlink(inode);
  1208. break;
  1209. }
  1210. case ICBTAG_FILE_TYPE_REALTIME:
  1211. case ICBTAG_FILE_TYPE_REGULAR:
  1212. case ICBTAG_FILE_TYPE_UNDEF:
  1213. {
  1214. if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
  1215. inode->i_data.a_ops = &udf_adinicb_aops;
  1216. else
  1217. inode->i_data.a_ops = &udf_aops;
  1218. inode->i_op = &udf_file_inode_operations;
  1219. inode->i_fop = &udf_file_operations;
  1220. inode->i_mode |= S_IFREG;
  1221. break;
  1222. }
  1223. case ICBTAG_FILE_TYPE_BLOCK:
  1224. {
  1225. inode->i_mode |= S_IFBLK;
  1226. break;
  1227. }
  1228. case ICBTAG_FILE_TYPE_CHAR:
  1229. {
  1230. inode->i_mode |= S_IFCHR;
  1231. break;
  1232. }
  1233. case ICBTAG_FILE_TYPE_FIFO:
  1234. {
  1235. init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
  1236. break;
  1237. }
  1238. case ICBTAG_FILE_TYPE_SOCKET:
  1239. {
  1240. init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
  1241. break;
  1242. }
  1243. case ICBTAG_FILE_TYPE_SYMLINK:
  1244. {
  1245. inode->i_data.a_ops = &udf_symlink_aops;
  1246. inode->i_op = &page_symlink_inode_operations;
  1247. inode->i_mode = S_IFLNK | S_IRWXUGO;
  1248. break;
  1249. }
  1250. default:
  1251. {
  1252. printk(KERN_ERR
  1253. "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
  1254. inode->i_ino, fe->icbTag.fileType);
  1255. make_bad_inode(inode);
  1256. return;
  1257. }
  1258. }
  1259. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  1260. struct deviceSpec *dsea = (struct deviceSpec *)
  1261. udf_get_extendedattr(inode, 12, 1);
  1262. if (dsea) {
  1263. init_special_inode(inode, inode->i_mode,
  1264. MKDEV(le32_to_cpu
  1265. (dsea->majorDeviceIdent),
  1266. le32_to_cpu(dsea->
  1267. minorDeviceIdent)));
  1268. /* Developer ID ??? */
  1269. } else {
  1270. make_bad_inode(inode);
  1271. }
  1272. }
  1273. }
  1274. static int udf_alloc_i_data(struct inode *inode, size_t size)
  1275. {
  1276. UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL);
  1277. if (!UDF_I_DATA(inode)) {
  1278. printk(KERN_ERR
  1279. "udf:udf_alloc_i_data (ino %ld) no free memory\n",
  1280. inode->i_ino);
  1281. return -ENOMEM;
  1282. }
  1283. return 0;
  1284. }
  1285. static mode_t udf_convert_permissions(struct fileEntry *fe)
  1286. {
  1287. mode_t mode;
  1288. uint32_t permissions;
  1289. uint32_t flags;
  1290. permissions = le32_to_cpu(fe->permissions);
  1291. flags = le16_to_cpu(fe->icbTag.flags);
  1292. mode = ((permissions) & S_IRWXO) |
  1293. ((permissions >> 2) & S_IRWXG) |
  1294. ((permissions >> 4) & S_IRWXU) |
  1295. ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
  1296. ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
  1297. ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
  1298. return mode;
  1299. }
  1300. /*
  1301. * udf_write_inode
  1302. *
  1303. * PURPOSE
  1304. * Write out the specified inode.
  1305. *
  1306. * DESCRIPTION
  1307. * This routine is called whenever an inode is synced.
  1308. * Currently this routine is just a placeholder.
  1309. *
  1310. * HISTORY
  1311. * July 1, 1997 - Andrew E. Mileski
  1312. * Written, tested, and released.
  1313. */
  1314. int udf_write_inode(struct inode *inode, int sync)
  1315. {
  1316. int ret;
  1317. lock_kernel();
  1318. ret = udf_update_inode(inode, sync);
  1319. unlock_kernel();
  1320. return ret;
  1321. }
  1322. int udf_sync_inode(struct inode *inode)
  1323. {
  1324. return udf_update_inode(inode, 1);
  1325. }
  1326. static int udf_update_inode(struct inode *inode, int do_sync)
  1327. {
  1328. struct buffer_head *bh = NULL;
  1329. struct fileEntry *fe;
  1330. struct extendedFileEntry *efe;
  1331. uint32_t udfperms;
  1332. uint16_t icbflags;
  1333. uint16_t crclen;
  1334. int i;
  1335. kernel_timestamp cpu_time;
  1336. int err = 0;
  1337. bh = udf_tread(inode->i_sb,
  1338. udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode),
  1339. 0));
  1340. if (!bh) {
  1341. udf_debug("bread failure\n");
  1342. return -EIO;
  1343. }
  1344. memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
  1345. fe = (struct fileEntry *)bh->b_data;
  1346. efe = (struct extendedFileEntry *)bh->b_data;
  1347. if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) {
  1348. struct unallocSpaceEntry *use =
  1349. (struct unallocSpaceEntry *)bh->b_data;
  1350. use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
  1351. memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
  1352. UDF_I_DATA(inode),
  1353. inode->i_sb->s_blocksize -
  1354. sizeof(struct unallocSpaceEntry));
  1355. crclen =
  1356. sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
  1357. sizeof(tag);
  1358. use->descTag.tagLocation =
  1359. cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
  1360. use->descTag.descCRCLength = cpu_to_le16(crclen);
  1361. use->descTag.descCRC =
  1362. cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
  1363. use->descTag.tagChecksum = 0;
  1364. for (i = 0; i < 16; i++)
  1365. if (i != 4)
  1366. use->descTag.tagChecksum +=
  1367. ((uint8_t *) & (use->descTag))[i];
  1368. mark_buffer_dirty(bh);
  1369. brelse(bh);
  1370. return err;
  1371. }
  1372. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
  1373. fe->uid = cpu_to_le32(-1);
  1374. else
  1375. fe->uid = cpu_to_le32(inode->i_uid);
  1376. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
  1377. fe->gid = cpu_to_le32(-1);
  1378. else
  1379. fe->gid = cpu_to_le32(inode->i_gid);
  1380. udfperms = ((inode->i_mode & S_IRWXO)) |
  1381. ((inode->i_mode & S_IRWXG) << 2) | ((inode->i_mode & S_IRWXU) << 4);
  1382. udfperms |= (le32_to_cpu(fe->permissions) &
  1383. (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
  1384. FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
  1385. FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
  1386. fe->permissions = cpu_to_le32(udfperms);
  1387. if (S_ISDIR(inode->i_mode))
  1388. fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
  1389. else
  1390. fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
  1391. fe->informationLength = cpu_to_le64(inode->i_size);
  1392. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  1393. regid *eid;
  1394. struct deviceSpec *dsea = (struct deviceSpec *)
  1395. udf_get_extendedattr(inode, 12, 1);
  1396. if (!dsea) {
  1397. dsea = (struct deviceSpec *)
  1398. udf_add_extendedattr(inode,
  1399. sizeof(struct deviceSpec) +
  1400. sizeof(regid), 12, 0x3);
  1401. dsea->attrType = cpu_to_le32(12);
  1402. dsea->attrSubtype = 1;
  1403. dsea->attrLength =
  1404. cpu_to_le32(sizeof(struct deviceSpec) +
  1405. sizeof(regid));
  1406. dsea->impUseLength = cpu_to_le32(sizeof(regid));
  1407. }
  1408. eid = (regid *) dsea->impUse;
  1409. memset(eid, 0, sizeof(regid));
  1410. strcpy(eid->ident, UDF_ID_DEVELOPER);
  1411. eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
  1412. eid->identSuffix[1] = UDF_OS_ID_LINUX;
  1413. dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
  1414. dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
  1415. }
  1416. if (UDF_I_EFE(inode) == 0) {
  1417. memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode),
  1418. inode->i_sb->s_blocksize - sizeof(struct fileEntry));
  1419. fe->logicalBlocksRecorded =
  1420. cpu_to_le64((inode->i_blocks +
  1421. (1 << (inode->i_sb->s_blocksize_bits - 9)) -
  1422. 1) >> (inode->i_sb->s_blocksize_bits - 9));
  1423. if (udf_time_to_stamp(&cpu_time, inode->i_atime))
  1424. fe->accessTime = cpu_to_lets(cpu_time);
  1425. if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
  1426. fe->modificationTime = cpu_to_lets(cpu_time);
  1427. if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
  1428. fe->attrTime = cpu_to_lets(cpu_time);
  1429. memset(&(fe->impIdent), 0, sizeof(regid));
  1430. strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
  1431. fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
  1432. fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
  1433. fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
  1434. fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
  1435. fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
  1436. fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
  1437. crclen = sizeof(struct fileEntry);
  1438. } else {
  1439. memcpy(bh->b_data + sizeof(struct extendedFileEntry),
  1440. UDF_I_DATA(inode),
  1441. inode->i_sb->s_blocksize -
  1442. sizeof(struct extendedFileEntry));
  1443. efe->objectSize = cpu_to_le64(inode->i_size);
  1444. efe->logicalBlocksRecorded = cpu_to_le64((inode->i_blocks +
  1445. (1 <<
  1446. (inode->i_sb->
  1447. s_blocksize_bits -
  1448. 9)) -
  1449. 1) >> (inode->i_sb->
  1450. s_blocksize_bits
  1451. - 9));
  1452. if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
  1453. (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
  1454. UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec)) {
  1455. UDF_I_CRTIME(inode) = inode->i_atime;
  1456. }
  1457. if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
  1458. (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
  1459. UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec)) {
  1460. UDF_I_CRTIME(inode) = inode->i_mtime;
  1461. }
  1462. if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
  1463. (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
  1464. UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec)) {
  1465. UDF_I_CRTIME(inode) = inode->i_ctime;
  1466. }
  1467. if (udf_time_to_stamp(&cpu_time, inode->i_atime))
  1468. efe->accessTime = cpu_to_lets(cpu_time);
  1469. if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
  1470. efe->modificationTime = cpu_to_lets(cpu_time);
  1471. if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
  1472. efe->createTime = cpu_to_lets(cpu_time);
  1473. if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
  1474. efe->attrTime = cpu_to_lets(cpu_time);
  1475. memset(&(efe->impIdent), 0, sizeof(regid));
  1476. strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
  1477. efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
  1478. efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
  1479. efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
  1480. efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
  1481. efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
  1482. efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
  1483. crclen = sizeof(struct extendedFileEntry);
  1484. }
  1485. if (UDF_I_STRAT4096(inode)) {
  1486. fe->icbTag.strategyType = cpu_to_le16(4096);
  1487. fe->icbTag.strategyParameter = cpu_to_le16(1);
  1488. fe->icbTag.numEntries = cpu_to_le16(2);
  1489. } else {
  1490. fe->icbTag.strategyType = cpu_to_le16(4);
  1491. fe->icbTag.numEntries = cpu_to_le16(1);
  1492. }
  1493. if (S_ISDIR(inode->i_mode))
  1494. fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
  1495. else if (S_ISREG(inode->i_mode))
  1496. fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
  1497. else if (S_ISLNK(inode->i_mode))
  1498. fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
  1499. else if (S_ISBLK(inode->i_mode))
  1500. fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
  1501. else if (S_ISCHR(inode->i_mode))
  1502. fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
  1503. else if (S_ISFIFO(inode->i_mode))
  1504. fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
  1505. else if (S_ISSOCK(inode->i_mode))
  1506. fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
  1507. icbflags = UDF_I_ALLOCTYPE(inode) |
  1508. ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
  1509. ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
  1510. ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
  1511. (le16_to_cpu(fe->icbTag.flags) &
  1512. ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
  1513. ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
  1514. fe->icbTag.flags = cpu_to_le16(icbflags);
  1515. if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
  1516. fe->descTag.descVersion = cpu_to_le16(3);
  1517. else
  1518. fe->descTag.descVersion = cpu_to_le16(2);
  1519. fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
  1520. fe->descTag.tagLocation =
  1521. cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
  1522. crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
  1523. fe->descTag.descCRCLength = cpu_to_le16(crclen);
  1524. fe->descTag.descCRC =
  1525. cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
  1526. fe->descTag.tagChecksum = 0;
  1527. for (i = 0; i < 16; i++)
  1528. if (i != 4)
  1529. fe->descTag.tagChecksum +=
  1530. ((uint8_t *) & (fe->descTag))[i];
  1531. /* write the data blocks */
  1532. mark_buffer_dirty(bh);
  1533. if (do_sync) {
  1534. sync_dirty_buffer(bh);
  1535. if (buffer_req(bh) && !buffer_uptodate(bh)) {
  1536. printk("IO error syncing udf inode [%s:%08lx]\n",
  1537. inode->i_sb->s_id, inode->i_ino);
  1538. err = -EIO;
  1539. }
  1540. }
  1541. brelse(bh);
  1542. return err;
  1543. }
  1544. struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
  1545. {
  1546. unsigned long block = udf_get_lb_pblock(sb, ino, 0);
  1547. struct inode *inode = iget_locked(sb, block);
  1548. if (!inode)
  1549. return NULL;
  1550. if (inode->i_state & I_NEW) {
  1551. memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
  1552. __udf_read_inode(inode);
  1553. unlock_new_inode(inode);
  1554. }
  1555. if (is_bad_inode(inode))
  1556. goto out_iput;
  1557. if (ino.logicalBlockNum >=
  1558. UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
  1559. udf_debug("block=%d, partition=%d out of range\n",
  1560. ino.logicalBlockNum, ino.partitionReferenceNum);
  1561. make_bad_inode(inode);
  1562. goto out_iput;
  1563. }
  1564. return inode;
  1565. out_iput:
  1566. iput(inode);
  1567. return NULL;
  1568. }
  1569. int8_t udf_add_aext(struct inode * inode, struct extent_position * epos,
  1570. kernel_lb_addr eloc, uint32_t elen, int inc)
  1571. {
  1572. int adsize;
  1573. short_ad *sad = NULL;
  1574. long_ad *lad = NULL;
  1575. struct allocExtDesc *aed;
  1576. int8_t etype;
  1577. uint8_t *ptr;
  1578. if (!epos->bh)
  1579. ptr =
  1580. UDF_I_DATA(inode) + epos->offset -
  1581. udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
  1582. else
  1583. ptr = epos->bh->b_data + epos->offset;
  1584. if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
  1585. adsize = sizeof(short_ad);
  1586. else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
  1587. adsize = sizeof(long_ad);
  1588. else
  1589. return -1;
  1590. if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
  1591. char *sptr, *dptr;
  1592. struct buffer_head *nbh;
  1593. int err, loffset;
  1594. kernel_lb_addr obloc = epos->block;
  1595. if (!
  1596. (epos->block.logicalBlockNum =
  1597. udf_new_block(inode->i_sb, NULL,
  1598. obloc.partitionReferenceNum,
  1599. obloc.logicalBlockNum, &err))) {
  1600. return -1;
  1601. }
  1602. if (!
  1603. (nbh =
  1604. udf_tgetblk(inode->i_sb,
  1605. udf_get_lb_pblock(inode->i_sb, epos->block,
  1606. 0)))) {
  1607. return -1;
  1608. }
  1609. lock_buffer(nbh);
  1610. memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
  1611. set_buffer_uptodate(nbh);
  1612. unlock_buffer(nbh);
  1613. mark_buffer_dirty_inode(nbh, inode);
  1614. aed = (struct allocExtDesc *)(nbh->b_data);
  1615. if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
  1616. aed->previousAllocExtLocation =
  1617. cpu_to_le32(obloc.logicalBlockNum);
  1618. if (epos->offset + adsize > inode->i_sb->s_blocksize) {
  1619. loffset = epos->offset;
  1620. aed->lengthAllocDescs = cpu_to_le32(adsize);
  1621. sptr = ptr - adsize;
  1622. dptr = nbh->b_data + sizeof(struct allocExtDesc);
  1623. memcpy(dptr, sptr, adsize);
  1624. epos->offset = sizeof(struct allocExtDesc) + adsize;
  1625. } else {
  1626. loffset = epos->offset + adsize;
  1627. aed->lengthAllocDescs = cpu_to_le32(0);
  1628. sptr = ptr;
  1629. epos->offset = sizeof(struct allocExtDesc);
  1630. if (epos->bh) {
  1631. aed = (struct allocExtDesc *)epos->bh->b_data;
  1632. aed->lengthAllocDescs =
  1633. cpu_to_le32(le32_to_cpu
  1634. (aed->lengthAllocDescs) +
  1635. adsize);
  1636. } else {
  1637. UDF_I_LENALLOC(inode) += adsize;
  1638. mark_inode_dirty(inode);
  1639. }
  1640. }
  1641. if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
  1642. udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
  1643. epos->block.logicalBlockNum, sizeof(tag));
  1644. else
  1645. udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
  1646. epos->block.logicalBlockNum, sizeof(tag));
  1647. switch (UDF_I_ALLOCTYPE(inode)) {
  1648. case ICBTAG_FLAG_AD_SHORT:
  1649. {
  1650. sad = (short_ad *) sptr;
  1651. sad->extLength =
  1652. cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
  1653. inode->i_sb->s_blocksize);
  1654. sad->extPosition =
  1655. cpu_to_le32(epos->block.logicalBlockNum);
  1656. break;
  1657. }
  1658. case ICBTAG_FLAG_AD_LONG:
  1659. {
  1660. lad = (long_ad *) sptr;
  1661. lad->extLength =
  1662. cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
  1663. inode->i_sb->s_blocksize);
  1664. lad->extLocation = cpu_to_lelb(epos->block);
  1665. memset(lad->impUse, 0x00, sizeof(lad->impUse));
  1666. break;
  1667. }
  1668. }
  1669. if (epos->bh) {
  1670. if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
  1671. || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
  1672. udf_update_tag(epos->bh->b_data, loffset);
  1673. else
  1674. udf_update_tag(epos->bh->b_data,
  1675. sizeof(struct allocExtDesc));
  1676. mark_buffer_dirty_inode(epos->bh, inode);
  1677. brelse(epos->bh);
  1678. } else
  1679. mark_inode_dirty(inode);
  1680. epos->bh = nbh;
  1681. }
  1682. etype = udf_write_aext(inode, epos, eloc, elen, inc);
  1683. if (!epos->bh) {
  1684. UDF_I_LENALLOC(inode) += adsize;
  1685. mark_inode_dirty(inode);
  1686. } else {
  1687. aed = (struct allocExtDesc *)epos->bh->b_data;
  1688. aed->lengthAllocDescs =
  1689. cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
  1690. if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
  1691. || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
  1692. udf_update_tag(epos->bh->b_data,
  1693. epos->offset + (inc ? 0 : adsize));
  1694. else
  1695. udf_update_tag(epos->bh->b_data,
  1696. sizeof(struct allocExtDesc));
  1697. mark_buffer_dirty_inode(epos->bh, inode);
  1698. }
  1699. return etype;
  1700. }
  1701. int8_t udf_write_aext(struct inode * inode, struct extent_position * epos,
  1702. kernel_lb_addr eloc, uint32_t elen, int inc)
  1703. {
  1704. int adsize;
  1705. uint8_t *ptr;
  1706. if (!epos->bh)
  1707. ptr =
  1708. UDF_I_DATA(inode) + epos->offset -
  1709. udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
  1710. else
  1711. ptr = epos->bh->b_data + epos->offset;
  1712. switch (UDF_I_ALLOCTYPE(inode)) {
  1713. case ICBTAG_FLAG_AD_SHORT:
  1714. {
  1715. short_ad *sad = (short_ad *) ptr;
  1716. sad->extLength = cpu_to_le32(elen);
  1717. sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
  1718. adsize = sizeof(short_ad);
  1719. break;
  1720. }
  1721. case ICBTAG_FLAG_AD_LONG:
  1722. {
  1723. long_ad *lad = (long_ad *) ptr;
  1724. lad->extLength = cpu_to_le32(elen);
  1725. lad->extLocation = cpu_to_lelb(eloc);
  1726. memset(lad->impUse, 0x00, sizeof(lad->impUse));
  1727. adsize = sizeof(long_ad);
  1728. break;
  1729. }
  1730. default:
  1731. return -1;
  1732. }
  1733. if (epos->bh) {
  1734. if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
  1735. || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) {
  1736. struct allocExtDesc *aed =
  1737. (struct allocExtDesc *)epos->bh->b_data;
  1738. udf_update_tag(epos->bh->b_data,
  1739. le32_to_cpu(aed->lengthAllocDescs) +
  1740. sizeof(struct allocExtDesc));
  1741. }
  1742. mark_buffer_dirty_inode(epos->bh, inode);
  1743. } else
  1744. mark_inode_dirty(inode);
  1745. if (inc)
  1746. epos->offset += adsize;
  1747. return (elen >> 30);
  1748. }
  1749. int8_t udf_next_aext(struct inode * inode, struct extent_position * epos,
  1750. kernel_lb_addr * eloc, uint32_t * elen, int inc)
  1751. {
  1752. int8_t etype;
  1753. while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
  1754. (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
  1755. epos->block = *eloc;
  1756. epos->offset = sizeof(struct allocExtDesc);
  1757. brelse(epos->bh);
  1758. if (!
  1759. (epos->bh =
  1760. udf_tread(inode->i_sb,
  1761. udf_get_lb_pblock(inode->i_sb, epos->block,
  1762. 0)))) {
  1763. udf_debug("reading block %d failed!\n",
  1764. udf_get_lb_pblock(inode->i_sb, epos->block,
  1765. 0));
  1766. return -1;
  1767. }
  1768. }
  1769. return etype;
  1770. }
  1771. int8_t udf_current_aext(struct inode * inode, struct extent_position * epos,
  1772. kernel_lb_addr * eloc, uint32_t * elen, int inc)
  1773. {
  1774. int alen;
  1775. int8_t etype;
  1776. uint8_t *ptr;
  1777. if (!epos->bh) {
  1778. if (!epos->offset)
  1779. epos->offset = udf_file_entry_alloc_offset(inode);
  1780. ptr =
  1781. UDF_I_DATA(inode) + epos->offset -
  1782. udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
  1783. alen =
  1784. udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
  1785. } else {
  1786. if (!epos->offset)
  1787. epos->offset = sizeof(struct allocExtDesc);
  1788. ptr = epos->bh->b_data + epos->offset;
  1789. alen =
  1790. sizeof(struct allocExtDesc) +
  1791. le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
  1792. lengthAllocDescs);
  1793. }
  1794. switch (UDF_I_ALLOCTYPE(inode)) {
  1795. case ICBTAG_FLAG_AD_SHORT:
  1796. {
  1797. short_ad *sad;
  1798. if (!
  1799. (sad =
  1800. udf_get_fileshortad(ptr, alen, &epos->offset,
  1801. inc)))
  1802. return -1;
  1803. etype = le32_to_cpu(sad->extLength) >> 30;
  1804. eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
  1805. eloc->partitionReferenceNum =
  1806. UDF_I_LOCATION(inode).partitionReferenceNum;
  1807. *elen =
  1808. le32_to_cpu(sad->
  1809. extLength) & UDF_EXTENT_LENGTH_MASK;
  1810. break;
  1811. }
  1812. case ICBTAG_FLAG_AD_LONG:
  1813. {
  1814. long_ad *lad;
  1815. if (!
  1816. (lad =
  1817. udf_get_filelongad(ptr, alen, &epos->offset, inc)))
  1818. return -1;
  1819. etype = le32_to_cpu(lad->extLength) >> 30;
  1820. *eloc = lelb_to_cpu(lad->extLocation);
  1821. *elen =
  1822. le32_to_cpu(lad->
  1823. extLength) & UDF_EXTENT_LENGTH_MASK;
  1824. break;
  1825. }
  1826. default:
  1827. {
  1828. udf_debug("alloc_type = %d unsupported\n",
  1829. UDF_I_ALLOCTYPE(inode));
  1830. return -1;
  1831. }
  1832. }
  1833. return etype;
  1834. }
  1835. static int8_t
  1836. udf_insert_aext(struct inode *inode, struct extent_position epos,
  1837. kernel_lb_addr neloc, uint32_t nelen)
  1838. {
  1839. kernel_lb_addr oeloc;
  1840. uint32_t oelen;
  1841. int8_t etype;
  1842. if (epos.bh)
  1843. get_bh(epos.bh);
  1844. while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
  1845. udf_write_aext(inode, &epos, neloc, nelen, 1);
  1846. neloc = oeloc;
  1847. nelen = (etype << 30) | oelen;
  1848. }
  1849. udf_add_aext(inode, &epos, neloc, nelen, 1);
  1850. brelse(epos.bh);
  1851. return (nelen >> 30);
  1852. }
  1853. int8_t udf_delete_aext(struct inode * inode, struct extent_position epos,
  1854. kernel_lb_addr eloc, uint32_t elen)
  1855. {
  1856. struct extent_position oepos;
  1857. int adsize;
  1858. int8_t etype;
  1859. struct allocExtDesc *aed;
  1860. if (epos.bh) {
  1861. get_bh(epos.bh);
  1862. get_bh(epos.bh);
  1863. }
  1864. if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
  1865. adsize = sizeof(short_ad);
  1866. else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
  1867. adsize = sizeof(long_ad);
  1868. else
  1869. adsize = 0;
  1870. oepos = epos;
  1871. if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
  1872. return -1;
  1873. while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
  1874. udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
  1875. if (oepos.bh != epos.bh) {
  1876. oepos.block = epos.block;
  1877. brelse(oepos.bh);
  1878. get_bh(epos.bh);
  1879. oepos.bh = epos.bh;
  1880. oepos.offset = epos.offset - adsize;
  1881. }
  1882. }
  1883. memset(&eloc, 0x00, sizeof(kernel_lb_addr));
  1884. elen = 0;
  1885. if (epos.bh != oepos.bh) {
  1886. udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
  1887. udf_write_aext(inode, &oepos, eloc, elen, 1);
  1888. udf_write_aext(inode, &oepos, eloc, elen, 1);
  1889. if (!oepos.bh) {
  1890. UDF_I_LENALLOC(inode) -= (adsize * 2);
  1891. mark_inode_dirty(inode);
  1892. } else {
  1893. aed = (struct allocExtDesc *)oepos.bh->b_data;
  1894. aed->lengthAllocDescs =
  1895. cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
  1896. (2 * adsize));
  1897. if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
  1898. || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
  1899. udf_update_tag(oepos.bh->b_data,
  1900. oepos.offset - (2 * adsize));
  1901. else
  1902. udf_update_tag(oepos.bh->b_data,
  1903. sizeof(struct allocExtDesc));
  1904. mark_buffer_dirty_inode(oepos.bh, inode);
  1905. }
  1906. } else {
  1907. udf_write_aext(inode, &oepos, eloc, elen, 1);
  1908. if (!oepos.bh) {
  1909. UDF_I_LENALLOC(inode) -= adsize;
  1910. mark_inode_dirty(inode);
  1911. } else {
  1912. aed = (struct allocExtDesc *)oepos.bh->b_data;
  1913. aed->lengthAllocDescs =
  1914. cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) -
  1915. adsize);
  1916. if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)
  1917. || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
  1918. udf_update_tag(oepos.bh->b_data,
  1919. epos.offset - adsize);
  1920. else
  1921. udf_update_tag(oepos.bh->b_data,
  1922. sizeof(struct allocExtDesc));
  1923. mark_buffer_dirty_inode(oepos.bh, inode);
  1924. }
  1925. }
  1926. brelse(epos.bh);
  1927. brelse(oepos.bh);
  1928. return (elen >> 30);
  1929. }
  1930. int8_t inode_bmap(struct inode * inode, sector_t block,
  1931. struct extent_position * pos, kernel_lb_addr * eloc,
  1932. uint32_t * elen, sector_t * offset)
  1933. {
  1934. loff_t lbcount = 0, bcount =
  1935. (loff_t) block << inode->i_sb->s_blocksize_bits;
  1936. int8_t etype;
  1937. if (block < 0) {
  1938. printk(KERN_ERR "udf: inode_bmap: block < 0\n");
  1939. return -1;
  1940. }
  1941. pos->offset = 0;
  1942. pos->block = UDF_I_LOCATION(inode);
  1943. pos->bh = NULL;
  1944. *elen = 0;
  1945. do {
  1946. if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) {
  1947. *offset =
  1948. (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
  1949. UDF_I_LENEXTENTS(inode) = lbcount;
  1950. return -1;
  1951. }
  1952. lbcount += *elen;
  1953. } while (lbcount <= bcount);
  1954. *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
  1955. return etype;
  1956. }
  1957. long udf_block_map(struct inode *inode, sector_t block)
  1958. {
  1959. kernel_lb_addr eloc;
  1960. uint32_t elen;
  1961. sector_t offset;
  1962. struct extent_position epos = { NULL, 0, {0, 0} };
  1963. int ret;
  1964. lock_kernel();
  1965. if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
  1966. (EXT_RECORDED_ALLOCATED >> 30))
  1967. ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
  1968. else
  1969. ret = 0;
  1970. unlock_kernel();
  1971. brelse(epos.bh);
  1972. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
  1973. return udf_fixed_to_variable(ret);
  1974. else
  1975. return ret;
  1976. }