Skip to content

Commit e513cc1

Browse files
mhiramathitachirustyrussell
authored andcommitted
module: Remove stop_machine from module unloading
Remove stop_machine from module unloading by adding new reference counting algorithm. This atomic refcounter works like a semaphore, it can get (be incremented) only when the counter is not 0. When loading a module, kmodule subsystem sets the counter MODULE_REF_BASE (= 1). And when unloading the module, it subtracts MODULE_REF_BASE from the counter. If no one refers the module, the refcounter becomes 0 and we can remove the module safely. If someone referes it, we try to recover the counter by adding MODULE_REF_BASE unless the counter becomes 0, because the referrer can put the module right before recovering. If the recovering is failed, we can get the 0 refcount and it never be incremented again, it can be removed safely too. Note that __module_get() forcibly gets the module refcounter, users should use try_module_get() instead of that. Signed-off-by: Masami Hiramatsu <[email protected]> Signed-off-by: Rusty Russell <[email protected]>
1 parent 2f35c41 commit e513cc1

File tree

1 file changed

+39
-28
lines changed

1 file changed

+39
-28
lines changed

kernel/module.c

Lines changed: 39 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242
#include <linux/vermagic.h>
4343
#include <linux/notifier.h>
4444
#include <linux/sched.h>
45-
#include <linux/stop_machine.h>
4645
#include <linux/device.h>
4746
#include <linux/string.h>
4847
#include <linux/mutex.h>
@@ -98,7 +97,7 @@
9897
* 1) List of modules (also safely readable with preempt_disable),
9998
* 2) module_use links,
10099
* 3) module_addr_min/module_addr_max.
101-
* (delete uses stop_machine/add uses RCU list operations). */
100+
* (delete and add uses RCU list operations). */
102101
DEFINE_MUTEX(module_mutex);
103102
EXPORT_SYMBOL_GPL(module_mutex);
104103
static LIST_HEAD(modules);
@@ -628,14 +627,23 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
628627

629628
EXPORT_TRACEPOINT_SYMBOL(module_get);
630629

630+
/* MODULE_REF_BASE is the base reference count by kmodule loader. */
631+
#define MODULE_REF_BASE 1
632+
631633
/* Init the unload section of the module. */
632634
static int module_unload_init(struct module *mod)
633635
{
636+
/*
637+
* Initialize reference counter to MODULE_REF_BASE.
638+
* refcnt == 0 means module is going.
639+
*/
640+
atomic_set(&mod->refcnt, MODULE_REF_BASE);
641+
634642
INIT_LIST_HEAD(&mod->source_list);
635643
INIT_LIST_HEAD(&mod->target_list);
636644

637645
/* Hold reference count during initialization. */
638-
atomic_set(&mod->refcnt, 1);
646+
atomic_inc(&mod->refcnt);
639647

640648
return 0;
641649
}
@@ -734,39 +742,39 @@ static inline int try_force_unload(unsigned int flags)
734742
}
735743
#endif /* CONFIG_MODULE_FORCE_UNLOAD */
736744

737-
struct stopref
745+
/* Try to release refcount of module, 0 means success. */
746+
static int try_release_module_ref(struct module *mod)
738747
{
739-
struct module *mod;
740-
int flags;
741-
int *forced;
742-
};
748+
int ret;
743749

744-
/* Whole machine is stopped with interrupts off when this runs. */
745-
static int __try_stop_module(void *_sref)
746-
{
747-
struct stopref *sref = _sref;
750+
/* Try to decrement refcnt which we set at loading */
751+
ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
752+
BUG_ON(ret < 0);
753+
if (ret)
754+
/* Someone can put this right now, recover with checking */
755+
ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
756+
757+
return ret;
758+
}
748759

760+
static int try_stop_module(struct module *mod, int flags, int *forced)
761+
{
749762
/* If it's not unused, quit unless we're forcing. */
750-
if (module_refcount(sref->mod) != 0) {
751-
if (!(*sref->forced = try_force_unload(sref->flags)))
763+
if (try_release_module_ref(mod) != 0) {
764+
*forced = try_force_unload(flags);
765+
if (!(*forced))
752766
return -EWOULDBLOCK;
753767
}
754768

755769
/* Mark it as dying. */
756-
sref->mod->state = MODULE_STATE_GOING;
757-
return 0;
758-
}
759-
760-
static int try_stop_module(struct module *mod, int flags, int *forced)
761-
{
762-
struct stopref sref = { mod, flags, forced };
770+
mod->state = MODULE_STATE_GOING;
763771

764-
return stop_machine(__try_stop_module, &sref, NULL);
772+
return 0;
765773
}
766774

767775
unsigned long module_refcount(struct module *mod)
768776
{
769-
return (unsigned long)atomic_read(&mod->refcnt);
777+
return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
770778
}
771779
EXPORT_SYMBOL(module_refcount);
772780

@@ -921,11 +929,11 @@ bool try_module_get(struct module *module)
921929

922930
if (module) {
923931
preempt_disable();
924-
925-
if (likely(module_is_live(module))) {
926-
atomic_inc(&module->refcnt);
932+
/* Note: here, we can fail to get a reference */
933+
if (likely(module_is_live(module) &&
934+
atomic_inc_not_zero(&module->refcnt) != 0))
927935
trace_module_get(module, _RET_IP_);
928-
} else
936+
else
929937
ret = false;
930938

931939
preempt_enable();
@@ -936,9 +944,12 @@ EXPORT_SYMBOL(try_module_get);
936944

937945
void module_put(struct module *module)
938946
{
947+
int ret;
948+
939949
if (module) {
940950
preempt_disable();
941-
atomic_dec(&module->refcnt);
951+
ret = atomic_dec_if_positive(&module->refcnt);
952+
WARN_ON(ret < 0); /* Failed to put refcount */
942953
trace_module_put(module, _RET_IP_);
943954
preempt_enable();
944955
}

0 commit comments

Comments
 (0)