|
@@ -135,11 +135,11 @@ struct cpu_cache_info {
|
|
|
struct kobject kobj;
|
|
|
};
|
|
|
|
|
|
-static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata;
|
|
|
+static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
|
|
|
#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
-static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
|
|
|
+static void cache_shared_cpu_map_setup(unsigned int cpu,
|
|
|
struct cache_info * this_leaf)
|
|
|
{
|
|
|
pal_cache_shared_info_t csi;
|
|
@@ -174,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
|
|
|
&csi) == PAL_STATUS_SUCCESS);
|
|
|
}
|
|
|
#else
|
|
|
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
|
|
|
+static void cache_shared_cpu_map_setup(unsigned int cpu,
|
|
|
struct cache_info * this_leaf)
|
|
|
{
|
|
|
cpu_set(cpu, this_leaf->shared_cpu_map);
|
|
@@ -298,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = {
|
|
|
.sysfs_ops = &cache_sysfs_ops,
|
|
|
};
|
|
|
|
|
|
-static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
|
|
|
+static void cpu_cache_sysfs_exit(unsigned int cpu)
|
|
|
{
|
|
|
kfree(all_cpu_cache_info[cpu].cache_leaves);
|
|
|
all_cpu_cache_info[cpu].cache_leaves = NULL;
|
|
@@ -307,7 +307,7 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
-static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
|
|
|
+static int cpu_cache_sysfs_init(unsigned int cpu)
|
|
|
{
|
|
|
unsigned long i, levels, unique_caches;
|
|
|
pal_cache_config_info_t cci;
|
|
@@ -351,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
|
|
|
}
|
|
|
|
|
|
/* Add cache interface for CPU device */
|
|
|
-static int __cpuinit cache_add_dev(struct device * sys_dev)
|
|
|
+static int cache_add_dev(struct device *sys_dev)
|
|
|
{
|
|
|
unsigned int cpu = sys_dev->id;
|
|
|
unsigned long i, j;
|
|
@@ -401,7 +401,7 @@ static int __cpuinit cache_add_dev(struct device * sys_dev)
|
|
|
}
|
|
|
|
|
|
/* Remove cache interface for CPU device */
|
|
|
-static int __cpuinit cache_remove_dev(struct device * sys_dev)
|
|
|
+static int cache_remove_dev(struct device *sys_dev)
|
|
|
{
|
|
|
unsigned int cpu = sys_dev->id;
|
|
|
unsigned long i;
|
|
@@ -425,7 +425,7 @@ static int __cpuinit cache_remove_dev(struct device * sys_dev)
|
|
|
* When a cpu is hot-plugged, do a check and initiate
|
|
|
* cache kobject if necessary
|
|
|
*/
|
|
|
-static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
|
|
|
+static int cache_cpu_callback(struct notifier_block *nfb,
|
|
|
unsigned long action, void *hcpu)
|
|
|
{
|
|
|
unsigned int cpu = (unsigned long)hcpu;
|
|
@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
-static struct notifier_block __cpuinitdata cache_cpu_notifier =
|
|
|
+static struct notifier_block cache_cpu_notifier =
|
|
|
{
|
|
|
.notifier_call = cache_cpu_callback
|
|
|
};
|