|
@@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
|
|
|
/* Init the unload section of the module. */
|
|
|
static void module_unload_init(struct module *mod)
|
|
|
{
|
|
|
- unsigned int i;
|
|
|
+ int cpu;
|
|
|
|
|
|
INIT_LIST_HEAD(&mod->modules_which_use_me);
|
|
|
- for (i = 0; i < NR_CPUS; i++)
|
|
|
- local_set(&mod->ref[i].count, 0);
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ local_set(__module_ref_addr(mod, cpu), 0);
|
|
|
/* Hold reference count during initialization. */
|
|
|
- local_set(&mod->ref[raw_smp_processor_id()].count, 1);
|
|
|
+ local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
|
|
|
/* Backwards compatibility macros put refcount during init. */
|
|
|
mod->waiter = current;
|
|
|
}
|
|
@@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
|
|
|
|
|
|
unsigned int module_refcount(struct module *mod)
|
|
|
{
|
|
|
- unsigned int i, total = 0;
|
|
|
+ unsigned int total = 0;
|
|
|
+ int cpu;
|
|
|
|
|
|
- for (i = 0; i < NR_CPUS; i++)
|
|
|
- total += local_read(&mod->ref[i].count);
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ total += local_read(__module_ref_addr(mod, cpu));
|
|
|
return total;
|
|
|
}
|
|
|
EXPORT_SYMBOL(module_refcount);
|
|
@@ -894,7 +895,7 @@ void module_put(struct module *module)
|
|
|
{
|
|
|
if (module) {
|
|
|
unsigned int cpu = get_cpu();
|
|
|
- local_dec(&module->ref[cpu].count);
|
|
|
+ local_dec(__module_ref_addr(module, cpu));
|
|
|
/* Maybe they're waiting for us to drop reference? */
|
|
|
if (unlikely(!module_is_live(module)))
|
|
|
wake_up_process(module->waiter);
|
|
@@ -1464,7 +1465,10 @@ static void free_module(struct module *mod)
|
|
|
kfree(mod->args);
|
|
|
if (mod->percpu)
|
|
|
percpu_modfree(mod->percpu);
|
|
|
-
|
|
|
+#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
|
|
+ if (mod->refptr)
|
|
|
+ percpu_modfree(mod->refptr);
|
|
|
+#endif
|
|
|
/* Free lock-classes: */
|
|
|
lockdep_free_key_range(mod->module_core, mod->core_size);
|
|
|
|
|
@@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod,
|
|
|
if (err < 0)
|
|
|
goto free_mod;
|
|
|
|
|
|
+#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
|
|
+ mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
|
|
|
+ mod->name);
|
|
|
+ if (!mod->refptr) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto free_mod;
|
|
|
+ }
|
|
|
+#endif
|
|
|
if (pcpuindex) {
|
|
|
/* We have a special allocation for this section. */
|
|
|
percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
|
|
@@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod,
|
|
|
mod->name);
|
|
|
if (!percpu) {
|
|
|
err = -ENOMEM;
|
|
|
- goto free_mod;
|
|
|
+ goto free_percpu;
|
|
|
}
|
|
|
sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
|
|
|
mod->percpu = percpu;
|
|
@@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod,
|
|
|
free_percpu:
|
|
|
if (percpu)
|
|
|
percpu_modfree(percpu);
|
|
|
+#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
|
|
|
+ percpu_modfree(mod->refptr);
|
|
|
+#endif
|
|
|
free_mod:
|
|
|
kfree(args);
|
|
|
free_hdr:
|