|
@@ -15,6 +15,118 @@
|
|
|
#include "hfsplus_fs.h"
|
|
|
#include "hfsplus_raw.h"
|
|
|
|
|
|
+/*
|
|
|
+ * Initial source code of clump size calculation is gotten
|
|
|
+ * from http://opensource.apple.com/tarballs/diskdev_cmds/
|
|
|
+ */
|
|
|
+#define CLUMP_ENTRIES 15
|
|
|
+
|
|
|
+static short clumptbl[CLUMP_ENTRIES * 3] = {
|
|
|
+/*
|
|
|
+ * Volume Attributes Catalog Extents
|
|
|
+ * Size Clump (MB) Clump (MB) Clump (MB)
|
|
|
+ */
|
|
|
+ /* 1GB */ 4, 4, 4,
|
|
|
+ /* 2GB */ 6, 6, 4,
|
|
|
+ /* 4GB */ 8, 8, 4,
|
|
|
+ /* 8GB */ 11, 11, 5,
|
|
|
+ /*
|
|
|
+ * For volumes 16GB and larger, we want to make sure that a full OS
|
|
|
+ * install won't require fragmentation of the Catalog or Attributes
|
|
|
+ * B-trees. We do this by making the clump sizes sufficiently large,
|
|
|
+ * and by leaving a gap after the B-trees for them to grow into.
|
|
|
+ *
|
|
|
+ * For SnowLeopard 10A298, a FullNetInstall with all packages selected
|
|
|
+ * results in:
|
|
|
+ * Catalog B-tree Header
|
|
|
+ * nodeSize: 8192
|
|
|
+ * totalNodes: 31616
|
|
|
+ * freeNodes: 1978
|
|
|
+ * (used = 231.55 MB)
|
|
|
+ * Attributes B-tree Header
|
|
|
+ * nodeSize: 8192
|
|
|
+ * totalNodes: 63232
|
|
|
+ * freeNodes: 958
|
|
|
+ * (used = 486.52 MB)
|
|
|
+ *
|
|
|
+ * We also want Time Machine backup volumes to have a sufficiently
|
|
|
+ * large clump size to reduce fragmentation.
|
|
|
+ *
|
|
|
+ * The series of numbers for Catalog and Attribute form a geometric
|
|
|
+ * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
|
|
|
+ * the previous term. For Attributes (16GB to 512GB), each term is
|
|
|
+ * 4**(1/5) times the previous term. For 1TB to 16TB, each term is
|
|
|
+ * 2**(1/5) times the previous term.
|
|
|
+ */
|
|
|
+ /* 16GB */ 64, 32, 5,
|
|
|
+ /* 32GB */ 84, 49, 6,
|
|
|
+ /* 64GB */ 111, 74, 7,
|
|
|
+ /* 128GB */ 147, 111, 8,
|
|
|
+ /* 256GB */ 194, 169, 9,
|
|
|
+ /* 512GB */ 256, 256, 11,
|
|
|
+ /* 1TB */ 294, 294, 14,
|
|
|
+ /* 2TB */ 338, 338, 16,
|
|
|
+ /* 4TB */ 388, 388, 20,
|
|
|
+ /* 8TB */ 446, 446, 25,
|
|
|
+ /* 16TB */ 512, 512, 32
|
|
|
+};
|
|
|
+
|
|
|
+u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
|
|
|
+ u64 sectors, int file_id)
|
|
|
+{
|
|
|
+ u32 mod = max(node_size, block_size);
|
|
|
+ u32 clump_size;
|
|
|
+ int column;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /* Figure out which column of the above table to use for this file. */
|
|
|
+ switch (file_id) {
|
|
|
+ case HFSPLUS_ATTR_CNID:
|
|
|
+ column = 0;
|
|
|
+ break;
|
|
|
+ case HFSPLUS_CAT_CNID:
|
|
|
+ column = 1;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ column = 2;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The default clump size is 0.8% of the volume size. And
|
|
|
+ * it must also be a multiple of the node and block size.
|
|
|
+ */
|
|
|
+ if (sectors < 0x200000) {
|
|
|
+ clump_size = sectors << 2; /* 0.8 % */
|
|
|
+ if (clump_size < (8 * node_size))
|
|
|
+ clump_size = 8 * node_size;
|
|
|
+ } else {
|
|
|
+ /* turn exponent into table index... */
|
|
|
+ for (i = 0, sectors = sectors >> 22;
|
|
|
+ sectors && (i < CLUMP_ENTRIES - 1);
|
|
|
+ ++i, sectors = sectors >> 1) {
|
|
|
+ /* empty body */
|
|
|
+ }
|
|
|
+
|
|
|
+ clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Round the clump size to a multiple of node and block size.
|
|
|
+ * NOTE: This rounds down.
|
|
|
+ */
|
|
|
+ clump_size /= mod;
|
|
|
+ clump_size *= mod;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Rounding down could have rounded down to 0 if the block size was
|
|
|
+ * greater than the clump size. If so, just use one block or node.
|
|
|
+ */
|
|
|
+ if (clump_size == 0)
|
|
|
+ clump_size = mod;
|
|
|
+
|
|
|
+ return clump_size;
|
|
|
+}
|
|
|
|
|
|
/* Get a reference to a B*Tree and do some initial checks */
|
|
|
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
|