Skip to content

Commit 47b2ca1

Browse files
KAGA-KOKOhemantbeast
authored andcommitted
softirq: Use hotplug thread infrastructure
[ paulmck: Call rcu_note_context_switch() with interrupts enabled. ] Signed-off-by: Thomas Gleixner <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Srivatsa S. Bhat <[email protected]> Cc: Rusty Russell <[email protected]> Reviewed-by: Paul E. McKenney <[email protected]> Cc: Namhyung Kim <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: franciscofranco <[email protected]> Signed-off-by: tarun93 <[email protected]>
1 parent 88bd663 commit 47b2ca1

File tree

1 file changed

+27
-84
lines changed

1 file changed

+27
-84
lines changed

kernel/softirq.c

Lines changed: 27 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/rcupdate.h>
2424
#include <linux/ftrace.h>
2525
#include <linux/smp.h>
26+
#include <linux/smpboot.h>
2627
#include <linux/tick.h>
2728

2829
#define CREATE_TRACE_POINTS
@@ -733,49 +734,22 @@ void __init softirq_init(void)
733734
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
734735
}
735736

736-
static int run_ksoftirqd(void * __bind_cpu)
737+
static int ksoftirqd_should_run(unsigned int cpu)
737738
{
738-
set_current_state(TASK_INTERRUPTIBLE);
739-
740-
while (!kthread_should_stop()) {
741-
preempt_disable();
742-
if (!local_softirq_pending()) {
743-
schedule_preempt_disabled();
744-
}
745-
746-
__set_current_state(TASK_RUNNING);
747-
748-
while (local_softirq_pending()) {
749-
/* Preempt disable stops cpu going offline.
750-
If already offline, we'll be on wrong CPU:
751-
don't process */
752-
if (cpu_is_offline((long)__bind_cpu))
753-
goto wait_to_die;
754-
local_irq_disable();
755-
if (local_softirq_pending())
756-
__do_softirq();
757-
local_irq_enable();
758-
sched_preempt_enable_no_resched();
759-
cond_resched();
760-
preempt_disable();
761-
rcu_note_context_switch((long)__bind_cpu);
762-
}
763-
preempt_enable();
764-
set_current_state(TASK_INTERRUPTIBLE);
765-
}
766-
__set_current_state(TASK_RUNNING);
767-
return 0;
739+
return local_softirq_pending();
740+
}
768741

769-
wait_to_die:
770-
preempt_enable();
771-
/* Wait for kthread_stop */
772-
set_current_state(TASK_INTERRUPTIBLE);
773-
while (!kthread_should_stop()) {
774-
schedule();
775-
set_current_state(TASK_INTERRUPTIBLE);
742+
static void run_ksoftirqd(unsigned int cpu)
743+
{
744+
local_irq_disable();
745+
if (local_softirq_pending()) {
746+
__do_softirq();
747+
rcu_note_context_switch(cpu);
748+
local_irq_enable();
749+
cond_resched();
750+
return;
776751
}
777-
__set_current_state(TASK_RUNNING);
778-
return 0;
752+
local_irq_enable();
779753
}
780754

781755
#ifdef CONFIG_HOTPLUG_CPU
@@ -841,65 +815,34 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
841815
unsigned long action,
842816
void *hcpu)
843817
{
844-
int hotcpu = (unsigned long)hcpu;
845-
struct task_struct *p;
846-
847818
switch (action) {
848-
case CPU_UP_PREPARE:
849-
case CPU_UP_PREPARE_FROZEN:
850-
p = kthread_create_on_node(run_ksoftirqd,
851-
hcpu,
852-
cpu_to_node(hotcpu),
853-
"ksoftirqd/%d", hotcpu);
854-
if (IS_ERR(p)) {
855-
printk("ksoftirqd for %i failed\n", hotcpu);
856-
return notifier_from_errno(PTR_ERR(p));
857-
}
858-
kthread_bind(p, hotcpu);
859-
per_cpu(ksoftirqd, hotcpu) = p;
860-
break;
861-
case CPU_ONLINE:
862-
case CPU_ONLINE_FROZEN:
863-
wake_up_process(per_cpu(ksoftirqd, hotcpu));
864-
break;
865819
#ifdef CONFIG_HOTPLUG_CPU
866-
case CPU_UP_CANCELED:
867-
case CPU_UP_CANCELED_FROZEN:
868-
if (!per_cpu(ksoftirqd, hotcpu))
869-
break;
870-
/* Unbind so it can run. Fall thru. */
871-
kthread_bind(per_cpu(ksoftirqd, hotcpu),
872-
cpumask_any(cpu_online_mask));
873820
case CPU_DEAD:
874-
case CPU_DEAD_FROZEN: {
875-
static const struct sched_param param = {
876-
.sched_priority = MAX_RT_PRIO-1
877-
};
878-
879-
p = per_cpu(ksoftirqd, hotcpu);
880-
per_cpu(ksoftirqd, hotcpu) = NULL;
881-
sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
882-
kthread_stop(p);
883-
takeover_tasklets(hotcpu);
821+
case CPU_DEAD_FROZEN:
822+
takeover_tasklets((unsigned long)hcpu);
884823
break;
885-
}
886824
#endif /* CONFIG_HOTPLUG_CPU */
887-
}
825+
}
888826
return NOTIFY_OK;
889827
}
890828

891829
static struct notifier_block __cpuinitdata cpu_nfb = {
892830
.notifier_call = cpu_callback
893831
};
894832

833+
static struct smp_hotplug_thread softirq_threads = {
834+
.store = &ksoftirqd,
835+
.thread_should_run = ksoftirqd_should_run,
836+
.thread_fn = run_ksoftirqd,
837+
.thread_comm = "ksoftirqd/%u",
838+
};
839+
895840
static __init int spawn_ksoftirqd(void)
896841
{
897-
void *cpu = (void *)(long)smp_processor_id();
898-
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
899-
900-
BUG_ON(err != NOTIFY_OK);
901-
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
902842
register_cpu_notifier(&cpu_nfb);
843+
844+
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
845+
903846
return 0;
904847
}
905848
early_initcall(spawn_ksoftirqd);

0 commit comments

Comments
 (0)