|
23 | 23 | #include <linux/rcupdate.h>
|
24 | 24 | #include <linux/ftrace.h>
|
25 | 25 | #include <linux/smp.h>
|
| 26 | +#include <linux/smpboot.h> |
26 | 27 | #include <linux/tick.h>
|
27 | 28 |
|
28 | 29 | #define CREATE_TRACE_POINTS
|
@@ -733,49 +734,22 @@ void __init softirq_init(void)
|
733 | 734 | open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
734 | 735 | }
|
735 | 736 |
|
736 |
| -static int run_ksoftirqd(void * __bind_cpu) |
| 737 | +static int ksoftirqd_should_run(unsigned int cpu) |
737 | 738 | {
|
738 |
| - set_current_state(TASK_INTERRUPTIBLE); |
739 |
| - |
740 |
| - while (!kthread_should_stop()) { |
741 |
| - preempt_disable(); |
742 |
| - if (!local_softirq_pending()) { |
743 |
| - schedule_preempt_disabled(); |
744 |
| - } |
745 |
| - |
746 |
| - __set_current_state(TASK_RUNNING); |
747 |
| - |
748 |
| - while (local_softirq_pending()) { |
749 |
| - /* Preempt disable stops cpu going offline. |
750 |
| - If already offline, we'll be on wrong CPU: |
751 |
| - don't process */ |
752 |
| - if (cpu_is_offline((long)__bind_cpu)) |
753 |
| - goto wait_to_die; |
754 |
| - local_irq_disable(); |
755 |
| - if (local_softirq_pending()) |
756 |
| - __do_softirq(); |
757 |
| - local_irq_enable(); |
758 |
| - sched_preempt_enable_no_resched(); |
759 |
| - cond_resched(); |
760 |
| - preempt_disable(); |
761 |
| - rcu_note_context_switch((long)__bind_cpu); |
762 |
| - } |
763 |
| - preempt_enable(); |
764 |
| - set_current_state(TASK_INTERRUPTIBLE); |
765 |
| - } |
766 |
| - __set_current_state(TASK_RUNNING); |
767 |
| - return 0; |
| 739 | + return local_softirq_pending(); |
| 740 | +} |
768 | 741 |
|
769 |
| -wait_to_die: |
770 |
| - preempt_enable(); |
771 |
| - /* Wait for kthread_stop */ |
772 |
| - set_current_state(TASK_INTERRUPTIBLE); |
773 |
| - while (!kthread_should_stop()) { |
774 |
| - schedule(); |
775 |
| - set_current_state(TASK_INTERRUPTIBLE); |
| 742 | +static void run_ksoftirqd(unsigned int cpu) |
| 743 | +{ |
| 744 | + local_irq_disable(); |
| 745 | + if (local_softirq_pending()) { |
| 746 | + __do_softirq(); |
| 747 | + rcu_note_context_switch(cpu); |
| 748 | + local_irq_enable(); |
| 749 | + cond_resched(); |
| 750 | + return; |
776 | 751 | }
|
777 |
| - __set_current_state(TASK_RUNNING); |
778 |
| - return 0; |
| 752 | + local_irq_enable(); |
779 | 753 | }
|
780 | 754 |
|
781 | 755 | #ifdef CONFIG_HOTPLUG_CPU
|
@@ -841,65 +815,34 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
841 | 815 | unsigned long action,
|
842 | 816 | void *hcpu)
|
843 | 817 | {
|
844 |
| - int hotcpu = (unsigned long)hcpu; |
845 |
| - struct task_struct *p; |
846 |
| - |
847 | 818 | switch (action) {
|
848 |
| - case CPU_UP_PREPARE: |
849 |
| - case CPU_UP_PREPARE_FROZEN: |
850 |
| - p = kthread_create_on_node(run_ksoftirqd, |
851 |
| - hcpu, |
852 |
| - cpu_to_node(hotcpu), |
853 |
| - "ksoftirqd/%d", hotcpu); |
854 |
| - if (IS_ERR(p)) { |
855 |
| - printk("ksoftirqd for %i failed\n", hotcpu); |
856 |
| - return notifier_from_errno(PTR_ERR(p)); |
857 |
| - } |
858 |
| - kthread_bind(p, hotcpu); |
859 |
| - per_cpu(ksoftirqd, hotcpu) = p; |
860 |
| - break; |
861 |
| - case CPU_ONLINE: |
862 |
| - case CPU_ONLINE_FROZEN: |
863 |
| - wake_up_process(per_cpu(ksoftirqd, hotcpu)); |
864 |
| - break; |
865 | 819 | #ifdef CONFIG_HOTPLUG_CPU
|
866 |
| - case CPU_UP_CANCELED: |
867 |
| - case CPU_UP_CANCELED_FROZEN: |
868 |
| - if (!per_cpu(ksoftirqd, hotcpu)) |
869 |
| - break; |
870 |
| - /* Unbind so it can run. Fall thru. */ |
871 |
| - kthread_bind(per_cpu(ksoftirqd, hotcpu), |
872 |
| - cpumask_any(cpu_online_mask)); |
873 | 820 | case CPU_DEAD:
|
874 |
| - case CPU_DEAD_FROZEN: { |
875 |
| - static const struct sched_param param = { |
876 |
| - .sched_priority = MAX_RT_PRIO-1 |
877 |
| - }; |
878 |
| - |
879 |
| - p = per_cpu(ksoftirqd, hotcpu); |
880 |
| - per_cpu(ksoftirqd, hotcpu) = NULL; |
881 |
| - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
882 |
| - kthread_stop(p); |
883 |
| - takeover_tasklets(hotcpu); |
| 821 | + case CPU_DEAD_FROZEN: |
| 822 | + takeover_tasklets((unsigned long)hcpu); |
884 | 823 | break;
|
885 |
| - } |
886 | 824 | #endif /* CONFIG_HOTPLUG_CPU */
|
887 |
| - } |
| 825 | + } |
888 | 826 | return NOTIFY_OK;
|
889 | 827 | }
|
890 | 828 |
|
891 | 829 | static struct notifier_block __cpuinitdata cpu_nfb = {
|
892 | 830 | .notifier_call = cpu_callback
|
893 | 831 | };
|
894 | 832 |
|
| 833 | +static struct smp_hotplug_thread softirq_threads = { |
| 834 | + .store = &ksoftirqd, |
| 835 | + .thread_should_run = ksoftirqd_should_run, |
| 836 | + .thread_fn = run_ksoftirqd, |
| 837 | + .thread_comm = "ksoftirqd/%u", |
| 838 | +}; |
| 839 | + |
895 | 840 | static __init int spawn_ksoftirqd(void)
|
896 | 841 | {
|
897 |
| - void *cpu = (void *)(long)smp_processor_id(); |
898 |
| - int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
899 |
| - |
900 |
| - BUG_ON(err != NOTIFY_OK); |
901 |
| - cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
902 | 842 | register_cpu_notifier(&cpu_nfb);
|
| 843 | + |
| 844 | + BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
| 845 | + |
903 | 846 | return 0;
|
904 | 847 | }
|
905 | 848 | early_initcall(spawn_ksoftirqd);
|
|
0 commit comments