Skip to content

Commit 2f35c41

Browse files
mhiramathitachirustyrussell
authored andcommitted
module: Replace module_ref with atomic_t refcnt
Replace module_ref per-cpu complex reference counter with an atomic_t simple refcnt. This is for code simplification. Signed-off-by: Masami Hiramatsu <[email protected]> Signed-off-by: Rusty Russell <[email protected]>
1 parent 0286b5e commit 2f35c41

File tree

3 files changed

+7
-50
lines changed

3 files changed

+7
-50
lines changed

include/linux/module.h

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -210,20 +210,6 @@ enum module_state {
210210
MODULE_STATE_UNFORMED, /* Still setting it up. */
211211
};
212212

213-
/**
214-
* struct module_ref - per cpu module reference counts
215-
* @incs: number of module get on this cpu
216-
* @decs: number of module put on this cpu
217-
*
218-
* We force an alignment on 8 or 16 bytes, so that alloc_percpu()
219-
* put @incs/@decs in same cache line, with no extra memory cost,
220-
* since alloc_percpu() is fine grained.
221-
*/
222-
struct module_ref {
223-
unsigned long incs;
224-
unsigned long decs;
225-
} __attribute((aligned(2 * sizeof(unsigned long))));
226-
227213
struct module {
228214
enum module_state state;
229215

@@ -367,7 +353,7 @@ struct module {
367353
/* Destruction function. */
368354
void (*exit)(void);
369355

370-
struct module_ref __percpu *refptr;
356+
atomic_t refcnt;
371357
#endif
372358

373359
#ifdef CONFIG_CONSTRUCTORS

include/trace/events/module.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
8080

8181
TP_fast_assign(
8282
__entry->ip = ip;
83-
__entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
83+
__entry->refcnt = atomic_read(&mod->refcnt);
8484
__assign_str(name, mod->name);
8585
),
8686

kernel/module.c

Lines changed: 5 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -631,15 +631,11 @@ EXPORT_TRACEPOINT_SYMBOL(module_get);
631631
/* Init the unload section of the module. */
632632
static int module_unload_init(struct module *mod)
633633
{
634-
mod->refptr = alloc_percpu(struct module_ref);
635-
if (!mod->refptr)
636-
return -ENOMEM;
637-
638634
INIT_LIST_HEAD(&mod->source_list);
639635
INIT_LIST_HEAD(&mod->target_list);
640636

641637
/* Hold reference count during initialization. */
642-
raw_cpu_write(mod->refptr->incs, 1);
638+
atomic_set(&mod->refcnt, 1);
643639

644640
return 0;
645641
}
@@ -721,8 +717,6 @@ static void module_unload_free(struct module *mod)
721717
kfree(use);
722718
}
723719
mutex_unlock(&module_mutex);
724-
725-
free_percpu(mod->refptr);
726720
}
727721

728722
#ifdef CONFIG_MODULE_FORCE_UNLOAD
@@ -772,28 +766,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
772766

773767
unsigned long module_refcount(struct module *mod)
774768
{
775-
unsigned long incs = 0, decs = 0;
776-
int cpu;
777-
778-
for_each_possible_cpu(cpu)
779-
decs += per_cpu_ptr(mod->refptr, cpu)->decs;
780-
/*
781-
* ensure the incs are added up after the decs.
782-
* module_put ensures incs are visible before decs with smp_wmb.
783-
*
784-
* This 2-count scheme avoids the situation where the refcount
785-
* for CPU0 is read, then CPU0 increments the module refcount,
786-
* then CPU1 drops that refcount, then the refcount for CPU1 is
787-
* read. We would record a decrement but not its corresponding
788-
* increment so we would see a low count (disaster).
789-
*
790-
* Rare situation? But module_refcount can be preempted, and we
791-
* might be tallying up 4096+ CPUs. So it is not impossible.
792-
*/
793-
smp_rmb();
794-
for_each_possible_cpu(cpu)
795-
incs += per_cpu_ptr(mod->refptr, cpu)->incs;
796-
return incs - decs;
769+
return (unsigned long)atomic_read(&mod->refcnt);
797770
}
798771
EXPORT_SYMBOL(module_refcount);
799772

@@ -935,7 +908,7 @@ void __module_get(struct module *module)
935908
{
936909
if (module) {
937910
preempt_disable();
938-
__this_cpu_inc(module->refptr->incs);
911+
atomic_inc(&module->refcnt);
939912
trace_module_get(module, _RET_IP_);
940913
preempt_enable();
941914
}
@@ -950,7 +923,7 @@ bool try_module_get(struct module *module)
950923
preempt_disable();
951924

952925
if (likely(module_is_live(module))) {
953-
__this_cpu_inc(module->refptr->incs);
926+
atomic_inc(&module->refcnt);
954927
trace_module_get(module, _RET_IP_);
955928
} else
956929
ret = false;
@@ -965,9 +938,7 @@ void module_put(struct module *module)
965938
{
966939
if (module) {
967940
preempt_disable();
968-
smp_wmb(); /* see comment in module_refcount */
969-
__this_cpu_inc(module->refptr->decs);
970-
941+
atomic_dec(&module->refcnt);
971942
trace_module_put(module, _RET_IP_);
972943
preempt_enable();
973944
}

0 commit comments

Comments
 (0)