super.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295
  1. /*
  2. * super.c
  3. *
  4. * PURPOSE
  5. * Super block routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * DESCRIPTION
  8. * OSTA-UDF(tm) = Optical Storage Technology Association
  9. * Universal Disk Format.
  10. *
  11. * This code is based on version 2.00 of the UDF specification,
  12. * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
  13. * http://www.osta.org/
  14. * http://www.ecma.ch/
  15. * http://www.iso.org/
  16. *
  17. * COPYRIGHT
  18. * This file is distributed under the terms of the GNU General Public
  19. * License (GPL). Copies of the GPL can be obtained from:
  20. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  21. * Each contributing author retains all rights to their own work.
  22. *
  23. * (C) 1998 Dave Boynton
  24. * (C) 1998-2004 Ben Fennema
  25. * (C) 2000 Stelias Computing Inc
  26. *
  27. * HISTORY
  28. *
  29. * 09/24/98 dgb changed to allow compiling outside of kernel, and
  30. * added some debugging.
  31. * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34
  32. * 10/16/98 attempting some multi-session support
  33. * 10/17/98 added freespace count for "df"
  34. * 11/11/98 gr added novrs option
  35. * 11/26/98 dgb added fileset,anchor mount options
  36. * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced
  37. * vol descs. rewrote option handling based on isofs
  38. * 12/20/98 find the free space bitmap (if it exists)
  39. */
  40. #include "udfdecl.h"
  41. #include <linux/blkdev.h>
  42. #include <linux/slab.h>
  43. #include <linux/kernel.h>
  44. #include <linux/module.h>
  45. #include <linux/parser.h>
  46. #include <linux/stat.h>
  47. #include <linux/cdrom.h>
  48. #include <linux/nls.h>
  49. #include <linux/buffer_head.h>
  50. #include <linux/vfs.h>
  51. #include <linux/vmalloc.h>
  52. #include <linux/errno.h>
  53. #include <linux/mount.h>
  54. #include <linux/seq_file.h>
  55. #include <linux/bitmap.h>
  56. #include <linux/crc-itu-t.h>
  57. #include <asm/byteorder.h>
  58. #include "udf_sb.h"
  59. #include "udf_i.h"
  60. #include <linux/init.h>
  61. #include <asm/uaccess.h>
  62. #define VDS_POS_PRIMARY_VOL_DESC 0
  63. #define VDS_POS_UNALLOC_SPACE_DESC 1
  64. #define VDS_POS_LOGICAL_VOL_DESC 2
  65. #define VDS_POS_PARTITION_DESC 3
  66. #define VDS_POS_IMP_USE_VOL_DESC 4
  67. #define VDS_POS_VOL_DESC_PTR 5
  68. #define VDS_POS_TERMINATING_DESC 6
  69. #define VDS_POS_LENGTH 7
  70. #define UDF_DEFAULT_BLOCKSIZE 2048
  71. /* These are the "meat" - everything else is stuffing */
  72. static int udf_fill_super(struct super_block *, void *, int);
  73. static void udf_put_super(struct super_block *);
  74. static int udf_sync_fs(struct super_block *, int);
  75. static int udf_remount_fs(struct super_block *, int *, char *);
  76. static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
  77. static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
  78. struct kernel_lb_addr *);
  79. static void udf_load_fileset(struct super_block *, struct buffer_head *,
  80. struct kernel_lb_addr *);
  81. static void udf_open_lvid(struct super_block *);
  82. static void udf_close_lvid(struct super_block *);
  83. static unsigned int udf_count_free(struct super_block *);
  84. static int udf_statfs(struct dentry *, struct kstatfs *);
  85. static int udf_show_options(struct seq_file *, struct dentry *);
  86. struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
  87. {
  88. struct logicalVolIntegrityDesc *lvid =
  89. (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
  90. __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
  91. __u32 offset = number_of_partitions * 2 *
  92. sizeof(uint32_t)/sizeof(uint8_t);
  93. return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
  94. }
  95. /* UDF filesystem type */
  96. static struct dentry *udf_mount(struct file_system_type *fs_type,
  97. int flags, const char *dev_name, void *data)
  98. {
  99. return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
  100. }
  101. static struct file_system_type udf_fstype = {
  102. .owner = THIS_MODULE,
  103. .name = "udf",
  104. .mount = udf_mount,
  105. .kill_sb = kill_block_super,
  106. .fs_flags = FS_REQUIRES_DEV,
  107. };
  108. static struct kmem_cache *udf_inode_cachep;
  109. static struct inode *udf_alloc_inode(struct super_block *sb)
  110. {
  111. struct udf_inode_info *ei;
  112. ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
  113. if (!ei)
  114. return NULL;
  115. ei->i_unique = 0;
  116. ei->i_lenExtents = 0;
  117. ei->i_next_alloc_block = 0;
  118. ei->i_next_alloc_goal = 0;
  119. ei->i_strat4096 = 0;
  120. init_rwsem(&ei->i_data_sem);
  121. return &ei->vfs_inode;
  122. }
  123. static void udf_i_callback(struct rcu_head *head)
  124. {
  125. struct inode *inode = container_of(head, struct inode, i_rcu);
  126. kmem_cache_free(udf_inode_cachep, UDF_I(inode));
  127. }
  128. static void udf_destroy_inode(struct inode *inode)
  129. {
  130. call_rcu(&inode->i_rcu, udf_i_callback);
  131. }
  132. static void init_once(void *foo)
  133. {
  134. struct udf_inode_info *ei = (struct udf_inode_info *)foo;
  135. ei->i_ext.i_data = NULL;
  136. inode_init_once(&ei->vfs_inode);
  137. }
  138. static int init_inodecache(void)
  139. {
  140. udf_inode_cachep = kmem_cache_create("udf_inode_cache",
  141. sizeof(struct udf_inode_info),
  142. 0, (SLAB_RECLAIM_ACCOUNT |
  143. SLAB_MEM_SPREAD),
  144. init_once);
  145. if (!udf_inode_cachep)
  146. return -ENOMEM;
  147. return 0;
  148. }
  149. static void destroy_inodecache(void)
  150. {
  151. kmem_cache_destroy(udf_inode_cachep);
  152. }
  153. /* Superblock operations */
  154. static const struct super_operations udf_sb_ops = {
  155. .alloc_inode = udf_alloc_inode,
  156. .destroy_inode = udf_destroy_inode,
  157. .write_inode = udf_write_inode,
  158. .evict_inode = udf_evict_inode,
  159. .put_super = udf_put_super,
  160. .sync_fs = udf_sync_fs,
  161. .statfs = udf_statfs,
  162. .remount_fs = udf_remount_fs,
  163. .show_options = udf_show_options,
  164. };
  165. struct udf_options {
  166. unsigned char novrs;
  167. unsigned int blocksize;
  168. unsigned int session;
  169. unsigned int lastblock;
  170. unsigned int anchor;
  171. unsigned int volume;
  172. unsigned short partition;
  173. unsigned int fileset;
  174. unsigned int rootdir;
  175. unsigned int flags;
  176. umode_t umask;
  177. gid_t gid;
  178. uid_t uid;
  179. umode_t fmode;
  180. umode_t dmode;
  181. struct nls_table *nls_map;
  182. };
  183. static int __init init_udf_fs(void)
  184. {
  185. int err;
  186. err = init_inodecache();
  187. if (err)
  188. goto out1;
  189. err = register_filesystem(&udf_fstype);
  190. if (err)
  191. goto out;
  192. return 0;
  193. out:
  194. destroy_inodecache();
  195. out1:
  196. return err;
  197. }
  198. static void __exit exit_udf_fs(void)
  199. {
  200. unregister_filesystem(&udf_fstype);
  201. destroy_inodecache();
  202. }
  203. module_init(init_udf_fs)
  204. module_exit(exit_udf_fs)
  205. static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
  206. {
  207. struct udf_sb_info *sbi = UDF_SB(sb);
  208. sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
  209. GFP_KERNEL);
  210. if (!sbi->s_partmaps) {
  211. udf_err(sb, "Unable to allocate space for %d partition maps\n",
  212. count);
  213. sbi->s_partitions = 0;
  214. return -ENOMEM;
  215. }
  216. sbi->s_partitions = count;
  217. return 0;
  218. }
  219. static int udf_show_options(struct seq_file *seq, struct dentry *root)
  220. {
  221. struct super_block *sb = root->d_sb;
  222. struct udf_sb_info *sbi = UDF_SB(sb);
  223. if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
  224. seq_puts(seq, ",nostrict");
  225. if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
  226. seq_printf(seq, ",bs=%lu", sb->s_blocksize);
  227. if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
  228. seq_puts(seq, ",unhide");
  229. if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
  230. seq_puts(seq, ",undelete");
  231. if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
  232. seq_puts(seq, ",noadinicb");
  233. if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
  234. seq_puts(seq, ",shortad");
  235. if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
  236. seq_puts(seq, ",uid=forget");
  237. if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
  238. seq_puts(seq, ",uid=ignore");
  239. if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
  240. seq_puts(seq, ",gid=forget");
  241. if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
  242. seq_puts(seq, ",gid=ignore");
  243. if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
  244. seq_printf(seq, ",uid=%u", sbi->s_uid);
  245. if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
  246. seq_printf(seq, ",gid=%u", sbi->s_gid);
  247. if (sbi->s_umask != 0)
  248. seq_printf(seq, ",umask=%ho", sbi->s_umask);
  249. if (sbi->s_fmode != UDF_INVALID_MODE)
  250. seq_printf(seq, ",mode=%ho", sbi->s_fmode);
  251. if (sbi->s_dmode != UDF_INVALID_MODE)
  252. seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
  253. if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
  254. seq_printf(seq, ",session=%u", sbi->s_session);
  255. if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
  256. seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
  257. if (sbi->s_anchor != 0)
  258. seq_printf(seq, ",anchor=%u", sbi->s_anchor);
  259. /*
  260. * volume, partition, fileset and rootdir seem to be ignored
  261. * currently
  262. */
  263. if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
  264. seq_puts(seq, ",utf8");
  265. if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
  266. seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
  267. return 0;
  268. }
  269. /*
  270. * udf_parse_options
  271. *
  272. * PURPOSE
  273. * Parse mount options.
  274. *
  275. * DESCRIPTION
  276. * The following mount options are supported:
  277. *
  278. * gid= Set the default group.
  279. * umask= Set the default umask.
  280. * mode= Set the default file permissions.
  281. * dmode= Set the default directory permissions.
  282. * uid= Set the default user.
  283. * bs= Set the block size.
  284. * unhide Show otherwise hidden files.
  285. * undelete Show deleted files in lists.
  286. * adinicb Embed data in the inode (default)
  287. * noadinicb Don't embed data in the inode
  288. * shortad Use short ad's
  289. * longad Use long ad's (default)
  290. * nostrict Unset strict conformance
  291. * iocharset= Set the NLS character set
  292. *
  293. * The remaining are for debugging and disaster recovery:
  294. *
  295. * novrs Skip volume sequence recognition
  296. *
  297. * The following expect a offset from 0.
  298. *
  299. * session= Set the CDROM session (default= last session)
  300. * anchor= Override standard anchor location. (default= 256)
  301. * volume= Override the VolumeDesc location. (unused)
  302. * partition= Override the PartitionDesc location. (unused)
  303. * lastblock= Set the last block of the filesystem/
  304. *
  305. * The following expect a offset from the partition root.
  306. *
  307. * fileset= Override the fileset block location. (unused)
  308. * rootdir= Override the root directory location. (unused)
  309. * WARNING: overriding the rootdir to a non-directory may
  310. * yield highly unpredictable results.
  311. *
  312. * PRE-CONDITIONS
  313. * options Pointer to mount options string.
  314. * uopts Pointer to mount options variable.
  315. *
  316. * POST-CONDITIONS
  317. * <return> 1 Mount options parsed okay.
  318. * <return> 0 Error parsing mount options.
  319. *
  320. * HISTORY
  321. * July 1, 1997 - Andrew E. Mileski
  322. * Written, tested, and released.
  323. */
  324. enum {
  325. Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
  326. Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
  327. Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
  328. Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
  329. Opt_rootdir, Opt_utf8, Opt_iocharset,
  330. Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
  331. Opt_fmode, Opt_dmode
  332. };
  333. static const match_table_t tokens = {
  334. {Opt_novrs, "novrs"},
  335. {Opt_nostrict, "nostrict"},
  336. {Opt_bs, "bs=%u"},
  337. {Opt_unhide, "unhide"},
  338. {Opt_undelete, "undelete"},
  339. {Opt_noadinicb, "noadinicb"},
  340. {Opt_adinicb, "adinicb"},
  341. {Opt_shortad, "shortad"},
  342. {Opt_longad, "longad"},
  343. {Opt_uforget, "uid=forget"},
  344. {Opt_uignore, "uid=ignore"},
  345. {Opt_gforget, "gid=forget"},
  346. {Opt_gignore, "gid=ignore"},
  347. {Opt_gid, "gid=%u"},
  348. {Opt_uid, "uid=%u"},
  349. {Opt_umask, "umask=%o"},
  350. {Opt_session, "session=%u"},
  351. {Opt_lastblock, "lastblock=%u"},
  352. {Opt_anchor, "anchor=%u"},
  353. {Opt_volume, "volume=%u"},
  354. {Opt_partition, "partition=%u"},
  355. {Opt_fileset, "fileset=%u"},
  356. {Opt_rootdir, "rootdir=%u"},
  357. {Opt_utf8, "utf8"},
  358. {Opt_iocharset, "iocharset=%s"},
  359. {Opt_fmode, "mode=%o"},
  360. {Opt_dmode, "dmode=%o"},
  361. {Opt_err, NULL}
  362. };
  363. static int udf_parse_options(char *options, struct udf_options *uopt,
  364. bool remount)
  365. {
  366. char *p;
  367. int option;
  368. uopt->novrs = 0;
  369. uopt->partition = 0xFFFF;
  370. uopt->session = 0xFFFFFFFF;
  371. uopt->lastblock = 0;
  372. uopt->anchor = 0;
  373. uopt->volume = 0xFFFFFFFF;
  374. uopt->rootdir = 0xFFFFFFFF;
  375. uopt->fileset = 0xFFFFFFFF;
  376. uopt->nls_map = NULL;
  377. if (!options)
  378. return 1;
  379. while ((p = strsep(&options, ",")) != NULL) {
  380. substring_t args[MAX_OPT_ARGS];
  381. int token;
  382. if (!*p)
  383. continue;
  384. token = match_token(p, tokens, args);
  385. switch (token) {
  386. case Opt_novrs:
  387. uopt->novrs = 1;
  388. break;
  389. case Opt_bs:
  390. if (match_int(&args[0], &option))
  391. return 0;
  392. uopt->blocksize = option;
  393. uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
  394. break;
  395. case Opt_unhide:
  396. uopt->flags |= (1 << UDF_FLAG_UNHIDE);
  397. break;
  398. case Opt_undelete:
  399. uopt->flags |= (1 << UDF_FLAG_UNDELETE);
  400. break;
  401. case Opt_noadinicb:
  402. uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
  403. break;
  404. case Opt_adinicb:
  405. uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
  406. break;
  407. case Opt_shortad:
  408. uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
  409. break;
  410. case Opt_longad:
  411. uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
  412. break;
  413. case Opt_gid:
  414. if (match_int(args, &option))
  415. return 0;
  416. uopt->gid = option;
  417. uopt->flags |= (1 << UDF_FLAG_GID_SET);
  418. break;
  419. case Opt_uid:
  420. if (match_int(args, &option))
  421. return 0;
  422. uopt->uid = option;
  423. uopt->flags |= (1 << UDF_FLAG_UID_SET);
  424. break;
  425. case Opt_umask:
  426. if (match_octal(args, &option))
  427. return 0;
  428. uopt->umask = option;
  429. break;
  430. case Opt_nostrict:
  431. uopt->flags &= ~(1 << UDF_FLAG_STRICT);
  432. break;
  433. case Opt_session:
  434. if (match_int(args, &option))
  435. return 0;
  436. uopt->session = option;
  437. if (!remount)
  438. uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
  439. break;
  440. case Opt_lastblock:
  441. if (match_int(args, &option))
  442. return 0;
  443. uopt->lastblock = option;
  444. if (!remount)
  445. uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
  446. break;
  447. case Opt_anchor:
  448. if (match_int(args, &option))
  449. return 0;
  450. uopt->anchor = option;
  451. break;
  452. case Opt_volume:
  453. if (match_int(args, &option))
  454. return 0;
  455. uopt->volume = option;
  456. break;
  457. case Opt_partition:
  458. if (match_int(args, &option))
  459. return 0;
  460. uopt->partition = option;
  461. break;
  462. case Opt_fileset:
  463. if (match_int(args, &option))
  464. return 0;
  465. uopt->fileset = option;
  466. break;
  467. case Opt_rootdir:
  468. if (match_int(args, &option))
  469. return 0;
  470. uopt->rootdir = option;
  471. break;
  472. case Opt_utf8:
  473. uopt->flags |= (1 << UDF_FLAG_UTF8);
  474. break;
  475. #ifdef CONFIG_UDF_NLS
  476. case Opt_iocharset:
  477. uopt->nls_map = load_nls(args[0].from);
  478. uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
  479. break;
  480. #endif
  481. case Opt_uignore:
  482. uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
  483. break;
  484. case Opt_uforget:
  485. uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
  486. break;
  487. case Opt_gignore:
  488. uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
  489. break;
  490. case Opt_gforget:
  491. uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
  492. break;
  493. case Opt_fmode:
  494. if (match_octal(args, &option))
  495. return 0;
  496. uopt->fmode = option & 0777;
  497. break;
  498. case Opt_dmode:
  499. if (match_octal(args, &option))
  500. return 0;
  501. uopt->dmode = option & 0777;
  502. break;
  503. default:
  504. pr_err("bad mount option \"%s\" or missing value\n", p);
  505. return 0;
  506. }
  507. }
  508. return 1;
  509. }
  510. static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
  511. {
  512. struct udf_options uopt;
  513. struct udf_sb_info *sbi = UDF_SB(sb);
  514. int error = 0;
  515. uopt.flags = sbi->s_flags;
  516. uopt.uid = sbi->s_uid;
  517. uopt.gid = sbi->s_gid;
  518. uopt.umask = sbi->s_umask;
  519. uopt.fmode = sbi->s_fmode;
  520. uopt.dmode = sbi->s_dmode;
  521. if (!udf_parse_options(options, &uopt, true))
  522. return -EINVAL;
  523. write_lock(&sbi->s_cred_lock);
  524. sbi->s_flags = uopt.flags;
  525. sbi->s_uid = uopt.uid;
  526. sbi->s_gid = uopt.gid;
  527. sbi->s_umask = uopt.umask;
  528. sbi->s_fmode = uopt.fmode;
  529. sbi->s_dmode = uopt.dmode;
  530. write_unlock(&sbi->s_cred_lock);
  531. if (sbi->s_lvid_bh) {
  532. int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
  533. if (write_rev > UDF_MAX_WRITE_VERSION)
  534. *flags |= MS_RDONLY;
  535. }
  536. if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
  537. goto out_unlock;
  538. if (*flags & MS_RDONLY)
  539. udf_close_lvid(sb);
  540. else
  541. udf_open_lvid(sb);
  542. out_unlock:
  543. return error;
  544. }
  545. /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
  546. /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
  547. static loff_t udf_check_vsd(struct super_block *sb)
  548. {
  549. struct volStructDesc *vsd = NULL;
  550. loff_t sector = 32768;
  551. int sectorsize;
  552. struct buffer_head *bh = NULL;
  553. int nsr02 = 0;
  554. int nsr03 = 0;
  555. struct udf_sb_info *sbi;
  556. sbi = UDF_SB(sb);
  557. if (sb->s_blocksize < sizeof(struct volStructDesc))
  558. sectorsize = sizeof(struct volStructDesc);
  559. else
  560. sectorsize = sb->s_blocksize;
  561. sector += (sbi->s_session << sb->s_blocksize_bits);
  562. udf_debug("Starting at sector %u (%ld byte sectors)\n",
  563. (unsigned int)(sector >> sb->s_blocksize_bits),
  564. sb->s_blocksize);
  565. /* Process the sequence (if applicable) */
  566. for (; !nsr02 && !nsr03; sector += sectorsize) {
  567. /* Read a block */
  568. bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
  569. if (!bh)
  570. break;
  571. /* Look for ISO descriptors */
  572. vsd = (struct volStructDesc *)(bh->b_data +
  573. (sector & (sb->s_blocksize - 1)));
  574. if (vsd->stdIdent[0] == 0) {
  575. brelse(bh);
  576. break;
  577. } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
  578. VSD_STD_ID_LEN)) {
  579. switch (vsd->structType) {
  580. case 0:
  581. udf_debug("ISO9660 Boot Record found\n");
  582. break;
  583. case 1:
  584. udf_debug("ISO9660 Primary Volume Descriptor found\n");
  585. break;
  586. case 2:
  587. udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
  588. break;
  589. case 3:
  590. udf_debug("ISO9660 Volume Partition Descriptor found\n");
  591. break;
  592. case 255:
  593. udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
  594. break;
  595. default:
  596. udf_debug("ISO9660 VRS (%u) found\n",
  597. vsd->structType);
  598. break;
  599. }
  600. } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
  601. VSD_STD_ID_LEN))
  602. ; /* nothing */
  603. else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
  604. VSD_STD_ID_LEN)) {
  605. brelse(bh);
  606. break;
  607. } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
  608. VSD_STD_ID_LEN))
  609. nsr02 = sector;
  610. else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
  611. VSD_STD_ID_LEN))
  612. nsr03 = sector;
  613. brelse(bh);
  614. }
  615. if (nsr03)
  616. return nsr03;
  617. else if (nsr02)
  618. return nsr02;
  619. else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
  620. return -1;
  621. else
  622. return 0;
  623. }
  624. static int udf_find_fileset(struct super_block *sb,
  625. struct kernel_lb_addr *fileset,
  626. struct kernel_lb_addr *root)
  627. {
  628. struct buffer_head *bh = NULL;
  629. long lastblock;
  630. uint16_t ident;
  631. struct udf_sb_info *sbi;
  632. if (fileset->logicalBlockNum != 0xFFFFFFFF ||
  633. fileset->partitionReferenceNum != 0xFFFF) {
  634. bh = udf_read_ptagged(sb, fileset, 0, &ident);
  635. if (!bh) {
  636. return 1;
  637. } else if (ident != TAG_IDENT_FSD) {
  638. brelse(bh);
  639. return 1;
  640. }
  641. }
  642. sbi = UDF_SB(sb);
  643. if (!bh) {
  644. /* Search backwards through the partitions */
  645. struct kernel_lb_addr newfileset;
  646. /* --> cvg: FIXME - is it reasonable? */
  647. return 1;
  648. for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
  649. (newfileset.partitionReferenceNum != 0xFFFF &&
  650. fileset->logicalBlockNum == 0xFFFFFFFF &&
  651. fileset->partitionReferenceNum == 0xFFFF);
  652. newfileset.partitionReferenceNum--) {
  653. lastblock = sbi->s_partmaps
  654. [newfileset.partitionReferenceNum]
  655. .s_partition_len;
  656. newfileset.logicalBlockNum = 0;
  657. do {
  658. bh = udf_read_ptagged(sb, &newfileset, 0,
  659. &ident);
  660. if (!bh) {
  661. newfileset.logicalBlockNum++;
  662. continue;
  663. }
  664. switch (ident) {
  665. case TAG_IDENT_SBD:
  666. {
  667. struct spaceBitmapDesc *sp;
  668. sp = (struct spaceBitmapDesc *)
  669. bh->b_data;
  670. newfileset.logicalBlockNum += 1 +
  671. ((le32_to_cpu(sp->numOfBytes) +
  672. sizeof(struct spaceBitmapDesc)
  673. - 1) >> sb->s_blocksize_bits);
  674. brelse(bh);
  675. break;
  676. }
  677. case TAG_IDENT_FSD:
  678. *fileset = newfileset;
  679. break;
  680. default:
  681. newfileset.logicalBlockNum++;
  682. brelse(bh);
  683. bh = NULL;
  684. break;
  685. }
  686. } while (newfileset.logicalBlockNum < lastblock &&
  687. fileset->logicalBlockNum == 0xFFFFFFFF &&
  688. fileset->partitionReferenceNum == 0xFFFF);
  689. }
  690. }
  691. if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
  692. fileset->partitionReferenceNum != 0xFFFF) && bh) {
  693. udf_debug("Fileset at block=%d, partition=%d\n",
  694. fileset->logicalBlockNum,
  695. fileset->partitionReferenceNum);
  696. sbi->s_partition = fileset->partitionReferenceNum;
  697. udf_load_fileset(sb, bh, root);
  698. brelse(bh);
  699. return 0;
  700. }
  701. return 1;
  702. }
  703. static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
  704. {
  705. struct primaryVolDesc *pvoldesc;
  706. struct ustr *instr, *outstr;
  707. struct buffer_head *bh;
  708. uint16_t ident;
  709. int ret = 1;
  710. instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
  711. if (!instr)
  712. return 1;
  713. outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
  714. if (!outstr)
  715. goto out1;
  716. bh = udf_read_tagged(sb, block, block, &ident);
  717. if (!bh)
  718. goto out2;
  719. BUG_ON(ident != TAG_IDENT_PVD);
  720. pvoldesc = (struct primaryVolDesc *)bh->b_data;
  721. if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
  722. pvoldesc->recordingDateAndTime)) {
  723. #ifdef UDFFS_DEBUG
  724. struct timestamp *ts = &pvoldesc->recordingDateAndTime;
  725. udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
  726. le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
  727. ts->minute, le16_to_cpu(ts->typeAndTimezone));
  728. #endif
  729. }
  730. if (!udf_build_ustr(instr, pvoldesc->volIdent, 32))
  731. if (udf_CS0toUTF8(outstr, instr)) {
  732. strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
  733. outstr->u_len > 31 ? 31 : outstr->u_len);
  734. udf_debug("volIdent[] = '%s'\n",
  735. UDF_SB(sb)->s_volume_ident);
  736. }
  737. if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128))
  738. if (udf_CS0toUTF8(outstr, instr))
  739. udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
  740. brelse(bh);
  741. ret = 0;
  742. out2:
  743. kfree(outstr);
  744. out1:
  745. kfree(instr);
  746. return ret;
  747. }
  748. struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
  749. u32 meta_file_loc, u32 partition_num)
  750. {
  751. struct kernel_lb_addr addr;
  752. struct inode *metadata_fe;
  753. addr.logicalBlockNum = meta_file_loc;
  754. addr.partitionReferenceNum = partition_num;
  755. metadata_fe = udf_iget(sb, &addr);
  756. if (metadata_fe == NULL)
  757. udf_warn(sb, "metadata inode efe not found\n");
  758. else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
  759. udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
  760. iput(metadata_fe);
  761. metadata_fe = NULL;
  762. }
  763. return metadata_fe;
  764. }
  765. static int udf_load_metadata_files(struct super_block *sb, int partition)
  766. {
  767. struct udf_sb_info *sbi = UDF_SB(sb);
  768. struct udf_part_map *map;
  769. struct udf_meta_data *mdata;
  770. struct kernel_lb_addr addr;
  771. map = &sbi->s_partmaps[partition];
  772. mdata = &map->s_type_specific.s_metadata;
  773. /* metadata address */
  774. udf_debug("Metadata file location: block = %d part = %d\n",
  775. mdata->s_meta_file_loc, map->s_partition_num);
  776. mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
  777. mdata->s_meta_file_loc, map->s_partition_num);
  778. if (mdata->s_metadata_fe == NULL) {
  779. /* mirror file entry */
  780. udf_debug("Mirror metadata file location: block = %d part = %d\n",
  781. mdata->s_mirror_file_loc, map->s_partition_num);
  782. mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
  783. mdata->s_mirror_file_loc, map->s_partition_num);
  784. if (mdata->s_mirror_fe == NULL) {
  785. udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
  786. goto error_exit;
  787. }
  788. }
  789. /*
  790. * bitmap file entry
  791. * Note:
  792. * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
  793. */
  794. if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
  795. addr.logicalBlockNum = mdata->s_bitmap_file_loc;
  796. addr.partitionReferenceNum = map->s_partition_num;
  797. udf_debug("Bitmap file location: block = %d part = %d\n",
  798. addr.logicalBlockNum, addr.partitionReferenceNum);
  799. mdata->s_bitmap_fe = udf_iget(sb, &addr);
  800. if (mdata->s_bitmap_fe == NULL) {
  801. if (sb->s_flags & MS_RDONLY)
  802. udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
  803. else {
  804. udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
  805. goto error_exit;
  806. }
  807. }
  808. }
  809. udf_debug("udf_load_metadata_files Ok\n");
  810. return 0;
  811. error_exit:
  812. return 1;
  813. }
  814. static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
  815. struct kernel_lb_addr *root)
  816. {
  817. struct fileSetDesc *fset;
  818. fset = (struct fileSetDesc *)bh->b_data;
  819. *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
  820. UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
  821. udf_debug("Rootdir at block=%d, partition=%d\n",
  822. root->logicalBlockNum, root->partitionReferenceNum);
  823. }
  824. int udf_compute_nr_groups(struct super_block *sb, u32 partition)
  825. {
  826. struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
  827. return DIV_ROUND_UP(map->s_partition_len +
  828. (sizeof(struct spaceBitmapDesc) << 3),
  829. sb->s_blocksize * 8);
  830. }
  831. static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
  832. {
  833. struct udf_bitmap *bitmap;
  834. int nr_groups;
  835. int size;
  836. nr_groups = udf_compute_nr_groups(sb, index);
  837. size = sizeof(struct udf_bitmap) +
  838. (sizeof(struct buffer_head *) * nr_groups);
  839. if (size <= PAGE_SIZE)
  840. bitmap = kzalloc(size, GFP_KERNEL);
  841. else
  842. bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
  843. if (bitmap == NULL) {
  844. udf_err(sb, "Unable to allocate space for bitmap and %d buffer_head pointers\n",
  845. nr_groups);
  846. return NULL;
  847. }
  848. bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
  849. bitmap->s_nr_groups = nr_groups;
  850. return bitmap;
  851. }
  852. static int udf_fill_partdesc_info(struct super_block *sb,
  853. struct partitionDesc *p, int p_index)
  854. {
  855. struct udf_part_map *map;
  856. struct udf_sb_info *sbi = UDF_SB(sb);
  857. struct partitionHeaderDesc *phd;
  858. map = &sbi->s_partmaps[p_index];
  859. map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
  860. map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
  861. if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
  862. map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
  863. if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
  864. map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
  865. if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
  866. map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
  867. if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
  868. map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
  869. udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n",
  870. p_index, map->s_partition_type,
  871. map->s_partition_root, map->s_partition_len);
  872. if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
  873. strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
  874. return 0;
  875. phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
  876. if (phd->unallocSpaceTable.extLength) {
  877. struct kernel_lb_addr loc = {
  878. .logicalBlockNum = le32_to_cpu(
  879. phd->unallocSpaceTable.extPosition),
  880. .partitionReferenceNum = p_index,
  881. };
  882. map->s_uspace.s_table = udf_iget(sb, &loc);
  883. if (!map->s_uspace.s_table) {
  884. udf_debug("cannot load unallocSpaceTable (part %d)\n",
  885. p_index);
  886. return 1;
  887. }
  888. map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
  889. udf_debug("unallocSpaceTable (part %d) @ %ld\n",
  890. p_index, map->s_uspace.s_table->i_ino);
  891. }
  892. if (phd->unallocSpaceBitmap.extLength) {
  893. struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
  894. if (!bitmap)
  895. return 1;
  896. map->s_uspace.s_bitmap = bitmap;
  897. bitmap->s_extLength = le32_to_cpu(
  898. phd->unallocSpaceBitmap.extLength);
  899. bitmap->s_extPosition = le32_to_cpu(
  900. phd->unallocSpaceBitmap.extPosition);
  901. map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
  902. udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
  903. p_index, bitmap->s_extPosition);
  904. }
  905. if (phd->partitionIntegrityTable.extLength)
  906. udf_debug("partitionIntegrityTable (part %d)\n", p_index);
  907. if (phd->freedSpaceTable.extLength) {
  908. struct kernel_lb_addr loc = {
  909. .logicalBlockNum = le32_to_cpu(
  910. phd->freedSpaceTable.extPosition),
  911. .partitionReferenceNum = p_index,
  912. };
  913. map->s_fspace.s_table = udf_iget(sb, &loc);
  914. if (!map->s_fspace.s_table) {
  915. udf_debug("cannot load freedSpaceTable (part %d)\n",
  916. p_index);
  917. return 1;
  918. }
  919. map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
  920. udf_debug("freedSpaceTable (part %d) @ %ld\n",
  921. p_index, map->s_fspace.s_table->i_ino);
  922. }
  923. if (phd->freedSpaceBitmap.extLength) {
  924. struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
  925. if (!bitmap)
  926. return 1;
  927. map->s_fspace.s_bitmap = bitmap;
  928. bitmap->s_extLength = le32_to_cpu(
  929. phd->freedSpaceBitmap.extLength);
  930. bitmap->s_extPosition = le32_to_cpu(
  931. phd->freedSpaceBitmap.extPosition);
  932. map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
  933. udf_debug("freedSpaceBitmap (part %d) @ %d\n",
  934. p_index, bitmap->s_extPosition);
  935. }
  936. return 0;
  937. }
  938. static void udf_find_vat_block(struct super_block *sb, int p_index,
  939. int type1_index, sector_t start_block)
  940. {
  941. struct udf_sb_info *sbi = UDF_SB(sb);
  942. struct udf_part_map *map = &sbi->s_partmaps[p_index];
  943. sector_t vat_block;
  944. struct kernel_lb_addr ino;
  945. /*
  946. * VAT file entry is in the last recorded block. Some broken disks have
  947. * it a few blocks before so try a bit harder...
  948. */
  949. ino.partitionReferenceNum = type1_index;
  950. for (vat_block = start_block;
  951. vat_block >= map->s_partition_root &&
  952. vat_block >= start_block - 3 &&
  953. !sbi->s_vat_inode; vat_block--) {
  954. ino.logicalBlockNum = vat_block - map->s_partition_root;
  955. sbi->s_vat_inode = udf_iget(sb, &ino);
  956. }
  957. }
  958. static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
  959. {
  960. struct udf_sb_info *sbi = UDF_SB(sb);
  961. struct udf_part_map *map = &sbi->s_partmaps[p_index];
  962. struct buffer_head *bh = NULL;
  963. struct udf_inode_info *vati;
  964. uint32_t pos;
  965. struct virtualAllocationTable20 *vat20;
  966. sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
  967. udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
  968. if (!sbi->s_vat_inode &&
  969. sbi->s_last_block != blocks - 1) {
  970. pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
  971. (unsigned long)sbi->s_last_block,
  972. (unsigned long)blocks - 1);
  973. udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
  974. }
  975. if (!sbi->s_vat_inode)
  976. return 1;
  977. if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
  978. map->s_type_specific.s_virtual.s_start_offset = 0;
  979. map->s_type_specific.s_virtual.s_num_entries =
  980. (sbi->s_vat_inode->i_size - 36) >> 2;
  981. } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
  982. vati = UDF_I(sbi->s_vat_inode);
  983. if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
  984. pos = udf_block_map(sbi->s_vat_inode, 0);
  985. bh = sb_bread(sb, pos);
  986. if (!bh)
  987. return 1;
  988. vat20 = (struct virtualAllocationTable20 *)bh->b_data;
  989. } else {
  990. vat20 = (struct virtualAllocationTable20 *)
  991. vati->i_ext.i_data;
  992. }
  993. map->s_type_specific.s_virtual.s_start_offset =
  994. le16_to_cpu(vat20->lengthHeader);
  995. map->s_type_specific.s_virtual.s_num_entries =
  996. (sbi->s_vat_inode->i_size -
  997. map->s_type_specific.s_virtual.
  998. s_start_offset) >> 2;
  999. brelse(bh);
  1000. }
  1001. return 0;
  1002. }
  1003. static int udf_load_partdesc(struct super_block *sb, sector_t block)
  1004. {
  1005. struct buffer_head *bh;
  1006. struct partitionDesc *p;
  1007. struct udf_part_map *map;
  1008. struct udf_sb_info *sbi = UDF_SB(sb);
  1009. int i, type1_idx;
  1010. uint16_t partitionNumber;
  1011. uint16_t ident;
  1012. int ret = 0;
  1013. bh = udf_read_tagged(sb, block, block, &ident);
  1014. if (!bh)
  1015. return 1;
  1016. if (ident != TAG_IDENT_PD)
  1017. goto out_bh;
  1018. p = (struct partitionDesc *)bh->b_data;
  1019. partitionNumber = le16_to_cpu(p->partitionNumber);
  1020. /* First scan for TYPE1, SPARABLE and METADATA partitions */
  1021. for (i = 0; i < sbi->s_partitions; i++) {
  1022. map = &sbi->s_partmaps[i];
  1023. udf_debug("Searching map: (%d == %d)\n",
  1024. map->s_partition_num, partitionNumber);
  1025. if (map->s_partition_num == partitionNumber &&
  1026. (map->s_partition_type == UDF_TYPE1_MAP15 ||
  1027. map->s_partition_type == UDF_SPARABLE_MAP15))
  1028. break;
  1029. }
  1030. if (i >= sbi->s_partitions) {
  1031. udf_debug("Partition (%d) not found in partition map\n",
  1032. partitionNumber);
  1033. goto out_bh;
  1034. }
  1035. ret = udf_fill_partdesc_info(sb, p, i);
  1036. /*
  1037. * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
  1038. * PHYSICAL partitions are already set up
  1039. */
  1040. type1_idx = i;
  1041. for (i = 0; i < sbi->s_partitions; i++) {
  1042. map = &sbi->s_partmaps[i];
  1043. if (map->s_partition_num == partitionNumber &&
  1044. (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
  1045. map->s_partition_type == UDF_VIRTUAL_MAP20 ||
  1046. map->s_partition_type == UDF_METADATA_MAP25))
  1047. break;
  1048. }
  1049. if (i >= sbi->s_partitions)
  1050. goto out_bh;
  1051. ret = udf_fill_partdesc_info(sb, p, i);
  1052. if (ret)
  1053. goto out_bh;
  1054. if (map->s_partition_type == UDF_METADATA_MAP25) {
  1055. ret = udf_load_metadata_files(sb, i);
  1056. if (ret) {
  1057. udf_err(sb, "error loading MetaData partition map %d\n",
  1058. i);
  1059. goto out_bh;
  1060. }
  1061. } else {
  1062. ret = udf_load_vat(sb, i, type1_idx);
  1063. if (ret)
  1064. goto out_bh;
  1065. /*
  1066. * Mark filesystem read-only if we have a partition with
  1067. * virtual map since we don't handle writing to it (we
  1068. * overwrite blocks instead of relocating them).
  1069. */
  1070. sb->s_flags |= MS_RDONLY;
  1071. pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
  1072. }
  1073. out_bh:
  1074. /* In case loading failed, we handle cleanup in udf_fill_super */
  1075. brelse(bh);
  1076. return ret;
  1077. }
  1078. static int udf_load_logicalvol(struct super_block *sb, sector_t block,
  1079. struct kernel_lb_addr *fileset)
  1080. {
  1081. struct logicalVolDesc *lvd;
  1082. int i, j, offset;
  1083. uint8_t type;
  1084. struct udf_sb_info *sbi = UDF_SB(sb);
  1085. struct genericPartitionMap *gpm;
  1086. uint16_t ident;
  1087. struct buffer_head *bh;
  1088. int ret = 0;
  1089. bh = udf_read_tagged(sb, block, block, &ident);
  1090. if (!bh)
  1091. return 1;
  1092. BUG_ON(ident != TAG_IDENT_LVD);
  1093. lvd = (struct logicalVolDesc *)bh->b_data;
  1094. i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
  1095. if (i != 0) {
  1096. ret = i;
  1097. goto out_bh;
  1098. }
  1099. for (i = 0, offset = 0;
  1100. i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
  1101. i++, offset += gpm->partitionMapLength) {
  1102. struct udf_part_map *map = &sbi->s_partmaps[i];
  1103. gpm = (struct genericPartitionMap *)
  1104. &(lvd->partitionMaps[offset]);
  1105. type = gpm->partitionMapType;
  1106. if (type == 1) {
  1107. struct genericPartitionMap1 *gpm1 =
  1108. (struct genericPartitionMap1 *)gpm;
  1109. map->s_partition_type = UDF_TYPE1_MAP15;
  1110. map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
  1111. map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
  1112. map->s_partition_func = NULL;
  1113. } else if (type == 2) {
  1114. struct udfPartitionMap2 *upm2 =
  1115. (struct udfPartitionMap2 *)gpm;
  1116. if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
  1117. strlen(UDF_ID_VIRTUAL))) {
  1118. u16 suf =
  1119. le16_to_cpu(((__le16 *)upm2->partIdent.
  1120. identSuffix)[0]);
  1121. if (suf < 0x0200) {
  1122. map->s_partition_type =
  1123. UDF_VIRTUAL_MAP15;
  1124. map->s_partition_func =
  1125. udf_get_pblock_virt15;
  1126. } else {
  1127. map->s_partition_type =
  1128. UDF_VIRTUAL_MAP20;
  1129. map->s_partition_func =
  1130. udf_get_pblock_virt20;
  1131. }
  1132. } else if (!strncmp(upm2->partIdent.ident,
  1133. UDF_ID_SPARABLE,
  1134. strlen(UDF_ID_SPARABLE))) {
  1135. uint32_t loc;
  1136. struct sparingTable *st;
  1137. struct sparablePartitionMap *spm =
  1138. (struct sparablePartitionMap *)gpm;
  1139. map->s_partition_type = UDF_SPARABLE_MAP15;
  1140. map->s_type_specific.s_sparing.s_packet_len =
  1141. le16_to_cpu(spm->packetLength);
  1142. for (j = 0; j < spm->numSparingTables; j++) {
  1143. struct buffer_head *bh2;
  1144. loc = le32_to_cpu(
  1145. spm->locSparingTable[j]);
  1146. bh2 = udf_read_tagged(sb, loc, loc,
  1147. &ident);
  1148. map->s_type_specific.s_sparing.
  1149. s_spar_map[j] = bh2;
  1150. if (bh2 == NULL)
  1151. continue;
  1152. st = (struct sparingTable *)bh2->b_data;
  1153. if (ident != 0 || strncmp(
  1154. st->sparingIdent.ident,
  1155. UDF_ID_SPARING,
  1156. strlen(UDF_ID_SPARING))) {
  1157. brelse(bh2);
  1158. map->s_type_specific.s_sparing.
  1159. s_spar_map[j] = NULL;
  1160. }
  1161. }
  1162. map->s_partition_func = udf_get_pblock_spar15;
  1163. } else if (!strncmp(upm2->partIdent.ident,
  1164. UDF_ID_METADATA,
  1165. strlen(UDF_ID_METADATA))) {
  1166. struct udf_meta_data *mdata =
  1167. &map->s_type_specific.s_metadata;
  1168. struct metadataPartitionMap *mdm =
  1169. (struct metadataPartitionMap *)
  1170. &(lvd->partitionMaps[offset]);
  1171. udf_debug("Parsing Logical vol part %d type %d id=%s\n",
  1172. i, type, UDF_ID_METADATA);
  1173. map->s_partition_type = UDF_METADATA_MAP25;
  1174. map->s_partition_func = udf_get_pblock_meta25;
  1175. mdata->s_meta_file_loc =
  1176. le32_to_cpu(mdm->metadataFileLoc);
  1177. mdata->s_mirror_file_loc =
  1178. le32_to_cpu(mdm->metadataMirrorFileLoc);
  1179. mdata->s_bitmap_file_loc =
  1180. le32_to_cpu(mdm->metadataBitmapFileLoc);
  1181. mdata->s_alloc_unit_size =
  1182. le32_to_cpu(mdm->allocUnitSize);
  1183. mdata->s_align_unit_size =
  1184. le16_to_cpu(mdm->alignUnitSize);
  1185. if (mdm->flags & 0x01)
  1186. mdata->s_flags |= MF_DUPLICATE_MD;
  1187. udf_debug("Metadata Ident suffix=0x%x\n",
  1188. le16_to_cpu(*(__le16 *)
  1189. mdm->partIdent.identSuffix));
  1190. udf_debug("Metadata part num=%d\n",
  1191. le16_to_cpu(mdm->partitionNum));
  1192. udf_debug("Metadata part alloc unit size=%d\n",
  1193. le32_to_cpu(mdm->allocUnitSize));
  1194. udf_debug("Metadata file loc=%d\n",
  1195. le32_to_cpu(mdm->metadataFileLoc));
  1196. udf_debug("Mirror file loc=%d\n",
  1197. le32_to_cpu(mdm->metadataMirrorFileLoc));
  1198. udf_debug("Bitmap file loc=%d\n",
  1199. le32_to_cpu(mdm->metadataBitmapFileLoc));
  1200. udf_debug("Flags: %d %d\n",
  1201. mdata->s_flags, mdm->flags);
  1202. } else {
  1203. udf_debug("Unknown ident: %s\n",
  1204. upm2->partIdent.ident);
  1205. continue;
  1206. }
  1207. map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
  1208. map->s_partition_num = le16_to_cpu(upm2->partitionNum);
  1209. }
  1210. udf_debug("Partition (%d:%d) type %d on volume %d\n",
  1211. i, map->s_partition_num, type, map->s_volumeseqnum);
  1212. }
  1213. if (fileset) {
  1214. struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
  1215. *fileset = lelb_to_cpu(la->extLocation);
  1216. udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
  1217. fileset->logicalBlockNum,
  1218. fileset->partitionReferenceNum);
  1219. }
  1220. if (lvd->integritySeqExt.extLength)
  1221. udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
  1222. out_bh:
  1223. brelse(bh);
  1224. return ret;
  1225. }
  1226. /*
  1227. * udf_load_logicalvolint
  1228. *
  1229. */
  1230. static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
  1231. {
  1232. struct buffer_head *bh = NULL;
  1233. uint16_t ident;
  1234. struct udf_sb_info *sbi = UDF_SB(sb);
  1235. struct logicalVolIntegrityDesc *lvid;
  1236. while (loc.extLength > 0 &&
  1237. (bh = udf_read_tagged(sb, loc.extLocation,
  1238. loc.extLocation, &ident)) &&
  1239. ident == TAG_IDENT_LVID) {
  1240. sbi->s_lvid_bh = bh;
  1241. lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
  1242. if (lvid->nextIntegrityExt.extLength)
  1243. udf_load_logicalvolint(sb,
  1244. leea_to_cpu(lvid->nextIntegrityExt));
  1245. if (sbi->s_lvid_bh != bh)
  1246. brelse(bh);
  1247. loc.extLength -= sb->s_blocksize;
  1248. loc.extLocation++;
  1249. }
  1250. if (sbi->s_lvid_bh != bh)
  1251. brelse(bh);
  1252. }
  1253. /*
  1254. * udf_process_sequence
  1255. *
  1256. * PURPOSE
  1257. * Process a main/reserve volume descriptor sequence.
  1258. *
  1259. * PRE-CONDITIONS
  1260. * sb Pointer to _locked_ superblock.
  1261. * block First block of first extent of the sequence.
  1262. * lastblock Lastblock of first extent of the sequence.
  1263. *
  1264. * HISTORY
  1265. * July 1, 1997 - Andrew E. Mileski
  1266. * Written, tested, and released.
  1267. */
  1268. static noinline int udf_process_sequence(struct super_block *sb, long block,
  1269. long lastblock, struct kernel_lb_addr *fileset)
  1270. {
  1271. struct buffer_head *bh = NULL;
  1272. struct udf_vds_record vds[VDS_POS_LENGTH];
  1273. struct udf_vds_record *curr;
  1274. struct generic_desc *gd;
  1275. struct volDescPtr *vdp;
  1276. int done = 0;
  1277. uint32_t vdsn;
  1278. uint16_t ident;
  1279. long next_s = 0, next_e = 0;
  1280. memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
  1281. /*
  1282. * Read the main descriptor sequence and find which descriptors
  1283. * are in it.
  1284. */
  1285. for (; (!done && block <= lastblock); block++) {
  1286. bh = udf_read_tagged(sb, block, block, &ident);
  1287. if (!bh) {
  1288. udf_err(sb,
  1289. "Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
  1290. (unsigned long long)block);
  1291. return 1;
  1292. }
  1293. /* Process each descriptor (ISO 13346 3/8.3-8.4) */
  1294. gd = (struct generic_desc *)bh->b_data;
  1295. vdsn = le32_to_cpu(gd->volDescSeqNum);
  1296. switch (ident) {
  1297. case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
  1298. curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
  1299. if (vdsn >= curr->volDescSeqNum) {
  1300. curr->volDescSeqNum = vdsn;
  1301. curr->block = block;
  1302. }
  1303. break;
  1304. case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
  1305. curr = &vds[VDS_POS_VOL_DESC_PTR];
  1306. if (vdsn >= curr->volDescSeqNum) {
  1307. curr->volDescSeqNum = vdsn;
  1308. curr->block = block;
  1309. vdp = (struct volDescPtr *)bh->b_data;
  1310. next_s = le32_to_cpu(
  1311. vdp->nextVolDescSeqExt.extLocation);
  1312. next_e = le32_to_cpu(
  1313. vdp->nextVolDescSeqExt.extLength);
  1314. next_e = next_e >> sb->s_blocksize_bits;
  1315. next_e += next_s;
  1316. }
  1317. break;
  1318. case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
  1319. curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
  1320. if (vdsn >= curr->volDescSeqNum) {
  1321. curr->volDescSeqNum = vdsn;
  1322. curr->block = block;
  1323. }
  1324. break;
  1325. case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
  1326. curr = &vds[VDS_POS_PARTITION_DESC];
  1327. if (!curr->block)
  1328. curr->block = block;
  1329. break;
  1330. case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
  1331. curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
  1332. if (vdsn >= curr->volDescSeqNum) {
  1333. curr->volDescSeqNum = vdsn;
  1334. curr->block = block;
  1335. }
  1336. break;
  1337. case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
  1338. curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
  1339. if (vdsn >= curr->volDescSeqNum) {
  1340. curr->volDescSeqNum = vdsn;
  1341. curr->block = block;
  1342. }
  1343. break;
  1344. case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
  1345. vds[VDS_POS_TERMINATING_DESC].block = block;
  1346. if (next_e) {
  1347. block = next_s;
  1348. lastblock = next_e;
  1349. next_s = next_e = 0;
  1350. } else
  1351. done = 1;
  1352. break;
  1353. }
  1354. brelse(bh);
  1355. }
  1356. /*
  1357. * Now read interesting descriptors again and process them
  1358. * in a suitable order
  1359. */
  1360. if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
  1361. udf_err(sb, "Primary Volume Descriptor not found!\n");
  1362. return 1;
  1363. }
  1364. if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
  1365. return 1;
  1366. if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
  1367. vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
  1368. return 1;
  1369. if (vds[VDS_POS_PARTITION_DESC].block) {
  1370. /*
  1371. * We rescan the whole descriptor sequence to find
  1372. * partition descriptor blocks and process them.
  1373. */
  1374. for (block = vds[VDS_POS_PARTITION_DESC].block;
  1375. block < vds[VDS_POS_TERMINATING_DESC].block;
  1376. block++)
  1377. if (udf_load_partdesc(sb, block))
  1378. return 1;
  1379. }
  1380. return 0;
  1381. }
  1382. static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
  1383. struct kernel_lb_addr *fileset)
  1384. {
  1385. struct anchorVolDescPtr *anchor;
  1386. long main_s, main_e, reserve_s, reserve_e;
  1387. anchor = (struct anchorVolDescPtr *)bh->b_data;
  1388. /* Locate the main sequence */
  1389. main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
  1390. main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
  1391. main_e = main_e >> sb->s_blocksize_bits;
  1392. main_e += main_s;
  1393. /* Locate the reserve sequence */
  1394. reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
  1395. reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
  1396. reserve_e = reserve_e >> sb->s_blocksize_bits;
  1397. reserve_e += reserve_s;
  1398. /* Process the main & reserve sequences */
  1399. /* responsible for finding the PartitionDesc(s) */
  1400. if (!udf_process_sequence(sb, main_s, main_e, fileset))
  1401. return 1;
  1402. return !udf_process_sequence(sb, reserve_s, reserve_e, fileset);
  1403. }
  1404. /*
  1405. * Check whether there is an anchor block in the given block and
  1406. * load Volume Descriptor Sequence if so.
  1407. */
  1408. static int udf_check_anchor_block(struct super_block *sb, sector_t block,
  1409. struct kernel_lb_addr *fileset)
  1410. {
  1411. struct buffer_head *bh;
  1412. uint16_t ident;
  1413. int ret;
  1414. if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
  1415. udf_fixed_to_variable(block) >=
  1416. sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
  1417. return 0;
  1418. bh = udf_read_tagged(sb, block, block, &ident);
  1419. if (!bh)
  1420. return 0;
  1421. if (ident != TAG_IDENT_AVDP) {
  1422. brelse(bh);
  1423. return 0;
  1424. }
  1425. ret = udf_load_sequence(sb, bh, fileset);
  1426. brelse(bh);
  1427. return ret;
  1428. }
  1429. /* Search for an anchor volume descriptor pointer */
  1430. static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
  1431. struct kernel_lb_addr *fileset)
  1432. {
  1433. sector_t last[6];
  1434. int i;
  1435. struct udf_sb_info *sbi = UDF_SB(sb);
  1436. int last_count = 0;
  1437. /* First try user provided anchor */
  1438. if (sbi->s_anchor) {
  1439. if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
  1440. return lastblock;
  1441. }
  1442. /*
  1443. * according to spec, anchor is in either:
  1444. * block 256
  1445. * lastblock-256
  1446. * lastblock
  1447. * however, if the disc isn't closed, it could be 512.
  1448. */
  1449. if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
  1450. return lastblock;
  1451. /*
  1452. * The trouble is which block is the last one. Drives often misreport
  1453. * this so we try various possibilities.
  1454. */
  1455. last[last_count++] = lastblock;
  1456. if (lastblock >= 1)
  1457. last[last_count++] = lastblock - 1;
  1458. last[last_count++] = lastblock + 1;
  1459. if (lastblock >= 2)
  1460. last[last_count++] = lastblock - 2;
  1461. if (lastblock >= 150)
  1462. last[last_count++] = lastblock - 150;
  1463. if (lastblock >= 152)
  1464. last[last_count++] = lastblock - 152;
  1465. for (i = 0; i < last_count; i++) {
  1466. if (last[i] >= sb->s_bdev->bd_inode->i_size >>
  1467. sb->s_blocksize_bits)
  1468. continue;
  1469. if (udf_check_anchor_block(sb, last[i], fileset))
  1470. return last[i];
  1471. if (last[i] < 256)
  1472. continue;
  1473. if (udf_check_anchor_block(sb, last[i] - 256, fileset))
  1474. return last[i];
  1475. }
  1476. /* Finally try block 512 in case media is open */
  1477. if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
  1478. return last[0];
  1479. return 0;
  1480. }
  1481. /*
  1482. * Find an anchor volume descriptor and load Volume Descriptor Sequence from
  1483. * area specified by it. The function expects sbi->s_lastblock to be the last
  1484. * block on the media.
  1485. *
  1486. * Return 1 if ok, 0 if not found.
  1487. *
  1488. */
  1489. static int udf_find_anchor(struct super_block *sb,
  1490. struct kernel_lb_addr *fileset)
  1491. {
  1492. sector_t lastblock;
  1493. struct udf_sb_info *sbi = UDF_SB(sb);
  1494. lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
  1495. if (lastblock)
  1496. goto out;
  1497. /* No anchor found? Try VARCONV conversion of block numbers */
  1498. UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
  1499. /* Firstly, we try to not convert number of the last block */
  1500. lastblock = udf_scan_anchors(sb,
  1501. udf_variable_to_fixed(sbi->s_last_block),
  1502. fileset);
  1503. if (lastblock)
  1504. goto out;
  1505. /* Secondly, we try with converted number of the last block */
  1506. lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
  1507. if (!lastblock) {
  1508. /* VARCONV didn't help. Clear it. */
  1509. UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
  1510. return 0;
  1511. }
  1512. out:
  1513. sbi->s_last_block = lastblock;
  1514. return 1;
  1515. }
  1516. /*
  1517. * Check Volume Structure Descriptor, find Anchor block and load Volume
  1518. * Descriptor Sequence
  1519. */
  1520. static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
  1521. int silent, struct kernel_lb_addr *fileset)
  1522. {
  1523. struct udf_sb_info *sbi = UDF_SB(sb);
  1524. loff_t nsr_off;
  1525. if (!sb_set_blocksize(sb, uopt->blocksize)) {
  1526. if (!silent)
  1527. udf_warn(sb, "Bad block size\n");
  1528. return 0;
  1529. }
  1530. sbi->s_last_block = uopt->lastblock;
  1531. if (!uopt->novrs) {
  1532. /* Check that it is NSR02 compliant */
  1533. nsr_off = udf_check_vsd(sb);
  1534. if (!nsr_off) {
  1535. if (!silent)
  1536. udf_warn(sb, "No VRS found\n");
  1537. return 0;
  1538. }
  1539. if (nsr_off == -1)
  1540. udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
  1541. if (!sbi->s_last_block)
  1542. sbi->s_last_block = udf_get_last_block(sb);
  1543. } else {
  1544. udf_debug("Validity check skipped because of novrs option\n");
  1545. }
  1546. /* Look for anchor block and load Volume Descriptor Sequence */
  1547. sbi->s_anchor = uopt->anchor;
  1548. if (!udf_find_anchor(sb, fileset)) {
  1549. if (!silent)
  1550. udf_warn(sb, "No anchor found\n");
  1551. return 0;
  1552. }
  1553. return 1;
  1554. }
  1555. static void udf_open_lvid(struct super_block *sb)
  1556. {
  1557. struct udf_sb_info *sbi = UDF_SB(sb);
  1558. struct buffer_head *bh = sbi->s_lvid_bh;
  1559. struct logicalVolIntegrityDesc *lvid;
  1560. struct logicalVolIntegrityDescImpUse *lvidiu;
  1561. if (!bh)
  1562. return;
  1563. mutex_lock(&sbi->s_alloc_mutex);
  1564. lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
  1565. lvidiu = udf_sb_lvidiu(sbi);
  1566. lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
  1567. lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
  1568. udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
  1569. CURRENT_TIME);
  1570. lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
  1571. lvid->descTag.descCRC = cpu_to_le16(
  1572. crc_itu_t(0, (char *)lvid + sizeof(struct tag),
  1573. le16_to_cpu(lvid->descTag.descCRCLength)));
  1574. lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
  1575. mark_buffer_dirty(bh);
  1576. sbi->s_lvid_dirty = 0;
  1577. mutex_unlock(&sbi->s_alloc_mutex);
  1578. }
  1579. static void udf_close_lvid(struct super_block *sb)
  1580. {
  1581. struct udf_sb_info *sbi = UDF_SB(sb);
  1582. struct buffer_head *bh = sbi->s_lvid_bh;
  1583. struct logicalVolIntegrityDesc *lvid;
  1584. struct logicalVolIntegrityDescImpUse *lvidiu;
  1585. if (!bh)
  1586. return;
  1587. mutex_lock(&sbi->s_alloc_mutex);
  1588. lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
  1589. lvidiu = udf_sb_lvidiu(sbi);
  1590. lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
  1591. lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
  1592. udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
  1593. if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
  1594. lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
  1595. if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
  1596. lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
  1597. if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
  1598. lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
  1599. lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
  1600. lvid->descTag.descCRC = cpu_to_le16(
  1601. crc_itu_t(0, (char *)lvid + sizeof(struct tag),
  1602. le16_to_cpu(lvid->descTag.descCRCLength)));
  1603. lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
  1604. /*
  1605. * We set buffer uptodate unconditionally here to avoid spurious
  1606. * warnings from mark_buffer_dirty() when previous EIO has marked
  1607. * the buffer as !uptodate
  1608. */
  1609. set_buffer_uptodate(bh);
  1610. mark_buffer_dirty(bh);
  1611. sbi->s_lvid_dirty = 0;
  1612. mutex_unlock(&sbi->s_alloc_mutex);
  1613. }
  1614. u64 lvid_get_unique_id(struct super_block *sb)
  1615. {
  1616. struct buffer_head *bh;
  1617. struct udf_sb_info *sbi = UDF_SB(sb);
  1618. struct logicalVolIntegrityDesc *lvid;
  1619. struct logicalVolHeaderDesc *lvhd;
  1620. u64 uniqueID;
  1621. u64 ret;
  1622. bh = sbi->s_lvid_bh;
  1623. if (!bh)
  1624. return 0;
  1625. lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
  1626. lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
  1627. mutex_lock(&sbi->s_alloc_mutex);
  1628. ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
  1629. if (!(++uniqueID & 0xFFFFFFFF))
  1630. uniqueID += 16;
  1631. lvhd->uniqueID = cpu_to_le64(uniqueID);
  1632. mutex_unlock(&sbi->s_alloc_mutex);
  1633. mark_buffer_dirty(bh);
  1634. return ret;
  1635. }
  1636. static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
  1637. {
  1638. int i;
  1639. int nr_groups = bitmap->s_nr_groups;
  1640. int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
  1641. nr_groups);
  1642. for (i = 0; i < nr_groups; i++)
  1643. if (bitmap->s_block_bitmap[i])
  1644. brelse(bitmap->s_block_bitmap[i]);
  1645. if (size <= PAGE_SIZE)
  1646. kfree(bitmap);
  1647. else
  1648. vfree(bitmap);
  1649. }
  1650. static void udf_free_partition(struct udf_part_map *map)
  1651. {
  1652. int i;
  1653. struct udf_meta_data *mdata;
  1654. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
  1655. iput(map->s_uspace.s_table);
  1656. if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
  1657. iput(map->s_fspace.s_table);
  1658. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
  1659. udf_sb_free_bitmap(map->s_uspace.s_bitmap);
  1660. if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
  1661. udf_sb_free_bitmap(map->s_fspace.s_bitmap);
  1662. if (map->s_partition_type == UDF_SPARABLE_MAP15)
  1663. for (i = 0; i < 4; i++)
  1664. brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
  1665. else if (map->s_partition_type == UDF_METADATA_MAP25) {
  1666. mdata = &map->s_type_specific.s_metadata;
  1667. iput(mdata->s_metadata_fe);
  1668. mdata->s_metadata_fe = NULL;
  1669. iput(mdata->s_mirror_fe);
  1670. mdata->s_mirror_fe = NULL;
  1671. iput(mdata->s_bitmap_fe);
  1672. mdata->s_bitmap_fe = NULL;
  1673. }
  1674. }
  1675. static int udf_fill_super(struct super_block *sb, void *options, int silent)
  1676. {
  1677. int i;
  1678. int ret;
  1679. struct inode *inode = NULL;
  1680. struct udf_options uopt;
  1681. struct kernel_lb_addr rootdir, fileset;
  1682. struct udf_sb_info *sbi;
  1683. uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
  1684. uopt.uid = -1;
  1685. uopt.gid = -1;
  1686. uopt.umask = 0;
  1687. uopt.fmode = UDF_INVALID_MODE;
  1688. uopt.dmode = UDF_INVALID_MODE;
  1689. sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
  1690. if (!sbi)
  1691. return -ENOMEM;
  1692. sb->s_fs_info = sbi;
  1693. mutex_init(&sbi->s_alloc_mutex);
  1694. if (!udf_parse_options((char *)options, &uopt, false))
  1695. goto error_out;
  1696. if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
  1697. uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
  1698. udf_err(sb, "utf8 cannot be combined with iocharset\n");
  1699. goto error_out;
  1700. }
  1701. #ifdef CONFIG_UDF_NLS
  1702. if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
  1703. uopt.nls_map = load_nls_default();
  1704. if (!uopt.nls_map)
  1705. uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
  1706. else
  1707. udf_debug("Using default NLS map\n");
  1708. }
  1709. #endif
  1710. if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
  1711. uopt.flags |= (1 << UDF_FLAG_UTF8);
  1712. fileset.logicalBlockNum = 0xFFFFFFFF;
  1713. fileset.partitionReferenceNum = 0xFFFF;
  1714. sbi->s_flags = uopt.flags;
  1715. sbi->s_uid = uopt.uid;
  1716. sbi->s_gid = uopt.gid;
  1717. sbi->s_umask = uopt.umask;
  1718. sbi->s_fmode = uopt.fmode;
  1719. sbi->s_dmode = uopt.dmode;
  1720. sbi->s_nls_map = uopt.nls_map;
  1721. rwlock_init(&sbi->s_cred_lock);
  1722. if (uopt.session == 0xFFFFFFFF)
  1723. sbi->s_session = udf_get_last_session(sb);
  1724. else
  1725. sbi->s_session = uopt.session;
  1726. udf_debug("Multi-session=%d\n", sbi->s_session);
  1727. /* Fill in the rest of the superblock */
  1728. sb->s_op = &udf_sb_ops;
  1729. sb->s_export_op = &udf_export_ops;
  1730. sb->s_dirt = 0;
  1731. sb->s_magic = UDF_SUPER_MAGIC;
  1732. sb->s_time_gran = 1000;
  1733. if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
  1734. ret = udf_load_vrs(sb, &uopt, silent, &fileset);
  1735. } else {
  1736. uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
  1737. ret = udf_load_vrs(sb, &uopt, silent, &fileset);
  1738. if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
  1739. if (!silent)
  1740. pr_notice("Rescanning with blocksize %d\n",
  1741. UDF_DEFAULT_BLOCKSIZE);
  1742. uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
  1743. ret = udf_load_vrs(sb, &uopt, silent, &fileset);
  1744. }
  1745. }
  1746. if (!ret) {
  1747. udf_warn(sb, "No partition found (1)\n");
  1748. goto error_out;
  1749. }
  1750. udf_debug("Lastblock=%d\n", sbi->s_last_block);
  1751. if (sbi->s_lvid_bh) {
  1752. struct logicalVolIntegrityDescImpUse *lvidiu =
  1753. udf_sb_lvidiu(sbi);
  1754. uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
  1755. uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
  1756. /* uint16_t maxUDFWriteRev =
  1757. le16_to_cpu(lvidiu->maxUDFWriteRev); */
  1758. if (minUDFReadRev > UDF_MAX_READ_VERSION) {
  1759. udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
  1760. le16_to_cpu(lvidiu->minUDFReadRev),
  1761. UDF_MAX_READ_VERSION);
  1762. goto error_out;
  1763. } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
  1764. sb->s_flags |= MS_RDONLY;
  1765. sbi->s_udfrev = minUDFWriteRev;
  1766. if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
  1767. UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
  1768. if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
  1769. UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
  1770. }
  1771. if (!sbi->s_partitions) {
  1772. udf_warn(sb, "No partition found (2)\n");
  1773. goto error_out;
  1774. }
  1775. if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
  1776. UDF_PART_FLAG_READ_ONLY) {
  1777. pr_notice("Partition marked readonly; forcing readonly mount\n");
  1778. sb->s_flags |= MS_RDONLY;
  1779. }
  1780. if (udf_find_fileset(sb, &fileset, &rootdir)) {
  1781. udf_warn(sb, "No fileset found\n");
  1782. goto error_out;
  1783. }
  1784. if (!silent) {
  1785. struct timestamp ts;
  1786. udf_time_to_disk_stamp(&ts, sbi->s_record_time);
  1787. udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
  1788. sbi->s_volume_ident,
  1789. le16_to_cpu(ts.year), ts.month, ts.day,
  1790. ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
  1791. }
  1792. if (!(sb->s_flags & MS_RDONLY))
  1793. udf_open_lvid(sb);
  1794. /* Assign the root inode */
  1795. /* assign inodes by physical block number */
  1796. /* perhaps it's not extensible enough, but for now ... */
  1797. inode = udf_iget(sb, &rootdir);
  1798. if (!inode) {
  1799. udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
  1800. rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
  1801. goto error_out;
  1802. }
  1803. /* Allocate a dentry for the root inode */
  1804. sb->s_root = d_alloc_root(inode);
  1805. if (!sb->s_root) {
  1806. udf_err(sb, "Couldn't allocate root dentry\n");
  1807. iput(inode);
  1808. goto error_out;
  1809. }
  1810. sb->s_maxbytes = MAX_LFS_FILESIZE;
  1811. return 0;
  1812. error_out:
  1813. if (sbi->s_vat_inode)
  1814. iput(sbi->s_vat_inode);
  1815. if (sbi->s_partitions)
  1816. for (i = 0; i < sbi->s_partitions; i++)
  1817. udf_free_partition(&sbi->s_partmaps[i]);
  1818. #ifdef CONFIG_UDF_NLS
  1819. if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
  1820. unload_nls(sbi->s_nls_map);
  1821. #endif
  1822. if (!(sb->s_flags & MS_RDONLY))
  1823. udf_close_lvid(sb);
  1824. brelse(sbi->s_lvid_bh);
  1825. kfree(sbi->s_partmaps);
  1826. kfree(sbi);
  1827. sb->s_fs_info = NULL;
  1828. return -EINVAL;
  1829. }
  1830. void _udf_err(struct super_block *sb, const char *function,
  1831. const char *fmt, ...)
  1832. {
  1833. struct va_format vaf;
  1834. va_list args;
  1835. /* mark sb error */
  1836. if (!(sb->s_flags & MS_RDONLY))
  1837. sb->s_dirt = 1;
  1838. va_start(args, fmt);
  1839. vaf.fmt = fmt;
  1840. vaf.va = &args;
  1841. pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
  1842. va_end(args);
  1843. }
  1844. void _udf_warn(struct super_block *sb, const char *function,
  1845. const char *fmt, ...)
  1846. {
  1847. struct va_format vaf;
  1848. va_list args;
  1849. va_start(args, fmt);
  1850. vaf.fmt = fmt;
  1851. vaf.va = &args;
  1852. pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
  1853. va_end(args);
  1854. }
  1855. static void udf_put_super(struct super_block *sb)
  1856. {
  1857. int i;
  1858. struct udf_sb_info *sbi;
  1859. sbi = UDF_SB(sb);
  1860. if (sbi->s_vat_inode)
  1861. iput(sbi->s_vat_inode);
  1862. if (sbi->s_partitions)
  1863. for (i = 0; i < sbi->s_partitions; i++)
  1864. udf_free_partition(&sbi->s_partmaps[i]);
  1865. #ifdef CONFIG_UDF_NLS
  1866. if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
  1867. unload_nls(sbi->s_nls_map);
  1868. #endif
  1869. if (!(sb->s_flags & MS_RDONLY))
  1870. udf_close_lvid(sb);
  1871. brelse(sbi->s_lvid_bh);
  1872. kfree(sbi->s_partmaps);
  1873. kfree(sb->s_fs_info);
  1874. sb->s_fs_info = NULL;
  1875. }
  1876. static int udf_sync_fs(struct super_block *sb, int wait)
  1877. {
  1878. struct udf_sb_info *sbi = UDF_SB(sb);
  1879. mutex_lock(&sbi->s_alloc_mutex);
  1880. if (sbi->s_lvid_dirty) {
  1881. /*
  1882. * Blockdevice will be synced later so we don't have to submit
  1883. * the buffer for IO
  1884. */
  1885. mark_buffer_dirty(sbi->s_lvid_bh);
  1886. sb->s_dirt = 0;
  1887. sbi->s_lvid_dirty = 0;
  1888. }
  1889. mutex_unlock(&sbi->s_alloc_mutex);
  1890. return 0;
  1891. }
  1892. static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
  1893. {
  1894. struct super_block *sb = dentry->d_sb;
  1895. struct udf_sb_info *sbi = UDF_SB(sb);
  1896. struct logicalVolIntegrityDescImpUse *lvidiu;
  1897. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  1898. if (sbi->s_lvid_bh != NULL)
  1899. lvidiu = udf_sb_lvidiu(sbi);
  1900. else
  1901. lvidiu = NULL;
  1902. buf->f_type = UDF_SUPER_MAGIC;
  1903. buf->f_bsize = sb->s_blocksize;
  1904. buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
  1905. buf->f_bfree = udf_count_free(sb);
  1906. buf->f_bavail = buf->f_bfree;
  1907. buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
  1908. le32_to_cpu(lvidiu->numDirs)) : 0)
  1909. + buf->f_bfree;
  1910. buf->f_ffree = buf->f_bfree;
  1911. buf->f_namelen = UDF_NAME_LEN - 2;
  1912. buf->f_fsid.val[0] = (u32)id;
  1913. buf->f_fsid.val[1] = (u32)(id >> 32);
  1914. return 0;
  1915. }
  1916. static unsigned int udf_count_free_bitmap(struct super_block *sb,
  1917. struct udf_bitmap *bitmap)
  1918. {
  1919. struct buffer_head *bh = NULL;
  1920. unsigned int accum = 0;
  1921. int index;
  1922. int block = 0, newblock;
  1923. struct kernel_lb_addr loc;
  1924. uint32_t bytes;
  1925. uint8_t *ptr;
  1926. uint16_t ident;
  1927. struct spaceBitmapDesc *bm;
  1928. loc.logicalBlockNum = bitmap->s_extPosition;
  1929. loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
  1930. bh = udf_read_ptagged(sb, &loc, 0, &ident);
  1931. if (!bh) {
  1932. udf_err(sb, "udf_count_free failed\n");
  1933. goto out;
  1934. } else if (ident != TAG_IDENT_SBD) {
  1935. brelse(bh);
  1936. udf_err(sb, "udf_count_free failed\n");
  1937. goto out;
  1938. }
  1939. bm = (struct spaceBitmapDesc *)bh->b_data;
  1940. bytes = le32_to_cpu(bm->numOfBytes);
  1941. index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
  1942. ptr = (uint8_t *)bh->b_data;
  1943. while (bytes > 0) {
  1944. u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
  1945. accum += bitmap_weight((const unsigned long *)(ptr + index),
  1946. cur_bytes * 8);
  1947. bytes -= cur_bytes;
  1948. if (bytes) {
  1949. brelse(bh);
  1950. newblock = udf_get_lb_pblock(sb, &loc, ++block);
  1951. bh = udf_tread(sb, newblock);
  1952. if (!bh) {
  1953. udf_debug("read failed\n");
  1954. goto out;
  1955. }
  1956. index = 0;
  1957. ptr = (uint8_t *)bh->b_data;
  1958. }
  1959. }
  1960. brelse(bh);
  1961. out:
  1962. return accum;
  1963. }
  1964. static unsigned int udf_count_free_table(struct super_block *sb,
  1965. struct inode *table)
  1966. {
  1967. unsigned int accum = 0;
  1968. uint32_t elen;
  1969. struct kernel_lb_addr eloc;
  1970. int8_t etype;
  1971. struct extent_position epos;
  1972. mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
  1973. epos.block = UDF_I(table)->i_location;
  1974. epos.offset = sizeof(struct unallocSpaceEntry);
  1975. epos.bh = NULL;
  1976. while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
  1977. accum += (elen >> table->i_sb->s_blocksize_bits);
  1978. brelse(epos.bh);
  1979. mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
  1980. return accum;
  1981. }
  1982. static unsigned int udf_count_free(struct super_block *sb)
  1983. {
  1984. unsigned int accum = 0;
  1985. struct udf_sb_info *sbi;
  1986. struct udf_part_map *map;
  1987. sbi = UDF_SB(sb);
  1988. if (sbi->s_lvid_bh) {
  1989. struct logicalVolIntegrityDesc *lvid =
  1990. (struct logicalVolIntegrityDesc *)
  1991. sbi->s_lvid_bh->b_data;
  1992. if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
  1993. accum = le32_to_cpu(
  1994. lvid->freeSpaceTable[sbi->s_partition]);
  1995. if (accum == 0xFFFFFFFF)
  1996. accum = 0;
  1997. }
  1998. }
  1999. if (accum)
  2000. return accum;
  2001. map = &sbi->s_partmaps[sbi->s_partition];
  2002. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
  2003. accum += udf_count_free_bitmap(sb,
  2004. map->s_uspace.s_bitmap);
  2005. }
  2006. if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
  2007. accum += udf_count_free_bitmap(sb,
  2008. map->s_fspace.s_bitmap);
  2009. }
  2010. if (accum)
  2011. return accum;
  2012. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
  2013. accum += udf_count_free_table(sb,
  2014. map->s_uspace.s_table);
  2015. }
  2016. if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
  2017. accum += udf_count_free_table(sb,
  2018. map->s_fspace.s_table);
  2019. }
  2020. return accum;
  2021. }