@@ -1241,6 +1241,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
12411241
12421242#endif /* #else #ifdef CONFIG_RCU_TRACE */
12431243
1244+ static void rcu_wake_cond (struct task_struct * t , int status )
1245+ {
1246+ /*
1247+ * If the thread is yielding, only wake it when this
1248+ * is invoked from idle
1249+ */
1250+ if (status != RCU_KTHREAD_YIELDING || is_idle_task (current ))
1251+ wake_up_process (t );
1252+ }
1253+
12441254/*
12451255 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
12461256 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1312,17 +1322,6 @@ static int rcu_boost(struct rcu_node *rnp)
13121322 ACCESS_ONCE (rnp -> boost_tasks ) != NULL ;
13131323}
13141324
1315- /*
1316- * Timer handler to initiate waking up of boost kthreads that
1317- * have yielded the CPU due to excessive numbers of tasks to
1318- * boost. We wake up the per-rcu_node kthread, which in turn
1319- * will wake up the booster kthread.
1320- */
1321- static void rcu_boost_kthread_timer (unsigned long arg )
1322- {
1323- invoke_rcu_node_kthread ((struct rcu_node * )arg );
1324- }
1325-
13261325/*
13271326 * Priority-boosting kthread. One per leaf rcu_node and one for the
13281327 * root rcu_node.
@@ -1346,8 +1345,9 @@ static int rcu_boost_kthread(void *arg)
13461345 else
13471346 spincnt = 0 ;
13481347 if (spincnt > 10 ) {
1348+ rnp -> boost_kthread_status = RCU_KTHREAD_YIELDING ;
13491349 trace_rcu_utilization ("End boost kthread@rcu_yield" );
1350- rcu_yield ( rcu_boost_kthread_timer , ( unsigned long ) rnp );
1350+ schedule_timeout_interruptible ( 2 );
13511351 trace_rcu_utilization ("Start boost kthread@rcu_yield" );
13521352 spincnt = 0 ;
13531353 }
@@ -1385,8 +1385,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
13851385 rnp -> boost_tasks = rnp -> gp_tasks ;
13861386 raw_spin_unlock_irqrestore (& rnp -> lock , flags );
13871387 t = rnp -> boost_kthread_task ;
1388- if (t != NULL )
1389- wake_up_process ( t );
1388+ if (t )
1389+ rcu_wake_cond ( t , rnp -> boost_kthread_status );
13901390 } else {
13911391 rcu_initiate_boost_trace (rnp );
13921392 raw_spin_unlock_irqrestore (& rnp -> lock , flags );
@@ -1403,8 +1403,10 @@ static void invoke_rcu_callbacks_kthread(void)
14031403 local_irq_save (flags );
14041404 __this_cpu_write (rcu_cpu_has_work , 1 );
14051405 if (__this_cpu_read (rcu_cpu_kthread_task ) != NULL &&
1406- current != __this_cpu_read (rcu_cpu_kthread_task ))
1407- wake_up_process (__this_cpu_read (rcu_cpu_kthread_task ));
1406+ current != __this_cpu_read (rcu_cpu_kthread_task )) {
1407+ rcu_wake_cond (__this_cpu_read (rcu_cpu_kthread_task ),
1408+ __this_cpu_read (rcu_cpu_kthread_status ));
1409+ }
14081410 local_irq_restore (flags );
14091411}
14101412
@@ -1417,21 +1419,6 @@ static bool rcu_is_callbacks_kthread(void)
14171419 return __get_cpu_var (rcu_cpu_kthread_task ) == current ;
14181420}
14191421
1420- /*
1421- * Set the affinity of the boost kthread. The CPU-hotplug locks are
1422- * held, so no one should be messing with the existence of the boost
1423- * kthread.
1424- */
1425- static void rcu_boost_kthread_setaffinity (struct rcu_node * rnp ,
1426- cpumask_var_t cm )
1427- {
1428- struct task_struct * t ;
1429-
1430- t = rnp -> boost_kthread_task ;
1431- if (t != NULL )
1432- set_cpus_allowed_ptr (rnp -> boost_kthread_task , cm );
1433- }
1434-
14351422#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
14361423
14371424/*
@@ -1448,15 +1435,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
14481435 * Returns zero if all is well, a negated errno otherwise.
14491436 */
14501437static int __cpuinit rcu_spawn_one_boost_kthread (struct rcu_state * rsp ,
1451- struct rcu_node * rnp ,
1452- int rnp_index )
1438+ struct rcu_node * rnp )
14531439{
1440+ int rnp_index = rnp - & rsp -> node [0 ];
14541441 unsigned long flags ;
14551442 struct sched_param sp ;
14561443 struct task_struct * t ;
14571444
14581445 if (& rcu_preempt_state != rsp )
14591446 return 0 ;
1447+
1448+ if (!rcu_scheduler_fully_active || rnp -> qsmaskinit == 0 )
1449+ return 0 ;
1450+
14601451 rsp -> boost = 1 ;
14611452 if (rnp -> boost_kthread_task != NULL )
14621453 return 0 ;
@@ -1499,20 +1490,6 @@ static void rcu_kthread_do_work(void)
14991490 rcu_preempt_do_callbacks ();
15001491}
15011492
1502- /*
1503- * Wake up the specified per-rcu_node-structure kthread.
1504- * Because the per-rcu_node kthreads are immortal, we don't need
1505- * to do anything to keep them alive.
1506- */
1507- static void invoke_rcu_node_kthread (struct rcu_node * rnp )
1508- {
1509- struct task_struct * t ;
1510-
1511- t = rnp -> node_kthread_task ;
1512- if (t != NULL )
1513- wake_up_process (t );
1514- }
1515-
15161493/*
15171494 * Set the specified CPU's kthread to run RT or not, as specified by
15181495 * the to_rt argument. The CPU-hotplug locks are held, so the task
@@ -1537,45 +1514,6 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
15371514 sched_setscheduler_nocheck (t , policy , & sp );
15381515}
15391516
1540- /*
1541- * Timer handler to initiate the waking up of per-CPU kthreads that
1542- * have yielded the CPU due to excess numbers of RCU callbacks.
1543- * We wake up the per-rcu_node kthread, which in turn will wake up
1544- * the booster kthread.
1545- */
1546- static void rcu_cpu_kthread_timer (unsigned long arg )
1547- {
1548- struct rcu_data * rdp = per_cpu_ptr (rcu_state -> rda , arg );
1549- struct rcu_node * rnp = rdp -> mynode ;
1550-
1551- atomic_or (rdp -> grpmask , & rnp -> wakemask );
1552- invoke_rcu_node_kthread (rnp );
1553- }
1554-
1555- /*
1556- * Drop to non-real-time priority and yield, but only after posting a
1557- * timer that will cause us to regain our real-time priority if we
1558- * remain preempted. Either way, we restore our real-time priority
1559- * before returning.
1560- */
1561- static void rcu_yield (void (* f )(unsigned long ), unsigned long arg )
1562- {
1563- struct sched_param sp ;
1564- struct timer_list yield_timer ;
1565- int prio = current -> rt_priority ;
1566-
1567- setup_timer_on_stack (& yield_timer , f , arg );
1568- mod_timer (& yield_timer , jiffies + 2 );
1569- sp .sched_priority = 0 ;
1570- sched_setscheduler_nocheck (current , SCHED_NORMAL , & sp );
1571- set_user_nice (current , 19 );
1572- schedule ();
1573- set_user_nice (current , 0 );
1574- sp .sched_priority = prio ;
1575- sched_setscheduler_nocheck (current , SCHED_FIFO , & sp );
1576- del_timer (& yield_timer );
1577- }
1578-
15791517/*
15801518 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
15811519 * This can happen while the corresponding CPU is either coming online
@@ -1648,7 +1586,7 @@ static int rcu_cpu_kthread(void *arg)
16481586 if (spincnt > 10 ) {
16491587 * statusp = RCU_KTHREAD_YIELDING ;
16501588 trace_rcu_utilization ("End CPU kthread@rcu_yield" );
1651- rcu_yield ( rcu_cpu_kthread_timer , ( unsigned long ) cpu );
1589+ schedule_timeout_interruptible ( 2 );
16521590 trace_rcu_utilization ("Start CPU kthread@rcu_yield" );
16531591 spincnt = 0 ;
16541592 }
@@ -1704,48 +1642,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
17041642 return 0 ;
17051643}
17061644
1707- /*
1708- * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1709- * kthreads when needed. We ignore requests to wake up kthreads
1710- * for offline CPUs, which is OK because force_quiescent_state()
1711- * takes care of this case.
1712- */
1713- static int rcu_node_kthread (void * arg )
1714- {
1715- int cpu ;
1716- unsigned long flags ;
1717- unsigned long mask ;
1718- struct rcu_node * rnp = (struct rcu_node * )arg ;
1719- struct sched_param sp ;
1720- struct task_struct * t ;
1721-
1722- for (;;) {
1723- rnp -> node_kthread_status = RCU_KTHREAD_WAITING ;
1724- rcu_wait (atomic_read (& rnp -> wakemask ) != 0 );
1725- rnp -> node_kthread_status = RCU_KTHREAD_RUNNING ;
1726- raw_spin_lock_irqsave (& rnp -> lock , flags );
1727- mask = atomic_xchg (& rnp -> wakemask , 0 );
1728- rcu_initiate_boost (rnp , flags ); /* releases rnp->lock. */
1729- for (cpu = rnp -> grplo ; cpu <= rnp -> grphi ; cpu ++ , mask >>= 1 ) {
1730- if ((mask & 0x1 ) == 0 )
1731- continue ;
1732- preempt_disable ();
1733- t = per_cpu (rcu_cpu_kthread_task , cpu );
1734- if (!cpu_online (cpu ) || t == NULL ) {
1735- preempt_enable ();
1736- continue ;
1737- }
1738- per_cpu (rcu_cpu_has_work , cpu ) = 1 ;
1739- sp .sched_priority = RCU_KTHREAD_PRIO ;
1740- sched_setscheduler_nocheck (t , SCHED_FIFO , & sp );
1741- preempt_enable ();
1742- }
1743- }
1744- /* NOTREACHED */
1745- rnp -> node_kthread_status = RCU_KTHREAD_STOPPED ;
1746- return 0 ;
1747- }
1748-
17491645/*
17501646 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
17511647 * served by the rcu_node in question. The CPU hotplug lock is still
@@ -1755,17 +1651,17 @@ static int rcu_node_kthread(void *arg)
17551651 * no outgoing CPU. If there are no CPUs left in the affinity set,
17561652 * this function allows the kthread to execute on any CPU.
17571653 */
1758- static void rcu_node_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
1654+ static void rcu_boost_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
17591655{
1656+ struct task_struct * t = rnp -> boost_kthread_task ;
1657+ unsigned long mask = rnp -> qsmaskinit ;
17601658 cpumask_var_t cm ;
17611659 int cpu ;
1762- unsigned long mask = rnp -> qsmaskinit ;
17631660
1764- if (rnp -> node_kthread_task == NULL )
1661+ if (! t )
17651662 return ;
1766- if (!alloc_cpumask_var (& cm , GFP_KERNEL ))
1663+ if (!zalloc_cpumask_var (& cm , GFP_KERNEL ))
17671664 return ;
1768- cpumask_clear (cm );
17691665 for (cpu = rnp -> grplo ; cpu <= rnp -> grphi ; cpu ++ , mask >>= 1 )
17701666 if ((mask & 0x1 ) && cpu != outgoingcpu )
17711667 cpumask_set_cpu (cpu , cm );
@@ -1775,50 +1671,17 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
17751671 cpumask_clear_cpu (cpu , cm );
17761672 WARN_ON_ONCE (cpumask_weight (cm ) == 0 );
17771673 }
1778- set_cpus_allowed_ptr (rnp -> node_kthread_task , cm );
1779- rcu_boost_kthread_setaffinity (rnp , cm );
1674+ set_cpus_allowed_ptr (t , cm );
17801675 free_cpumask_var (cm );
17811676}
17821677
1783- /*
1784- * Spawn a per-rcu_node kthread, setting priority and affinity.
1785- * Called during boot before online/offline can happen, or, if
1786- * during runtime, with the main CPU-hotplug locks held. So only
1787- * one of these can be executing at a time.
1788- */
1789- static int __cpuinit rcu_spawn_one_node_kthread (struct rcu_state * rsp ,
1790- struct rcu_node * rnp )
1791- {
1792- unsigned long flags ;
1793- int rnp_index = rnp - & rsp -> node [0 ];
1794- struct sched_param sp ;
1795- struct task_struct * t ;
1796-
1797- if (!rcu_scheduler_fully_active ||
1798- rnp -> qsmaskinit == 0 )
1799- return 0 ;
1800- if (rnp -> node_kthread_task == NULL ) {
1801- t = kthread_create (rcu_node_kthread , (void * )rnp ,
1802- "rcun/%d" , rnp_index );
1803- if (IS_ERR (t ))
1804- return PTR_ERR (t );
1805- raw_spin_lock_irqsave (& rnp -> lock , flags );
1806- rnp -> node_kthread_task = t ;
1807- raw_spin_unlock_irqrestore (& rnp -> lock , flags );
1808- sp .sched_priority = 99 ;
1809- sched_setscheduler_nocheck (t , SCHED_FIFO , & sp );
1810- wake_up_process (t ); /* get to TASK_INTERRUPTIBLE quickly. */
1811- }
1812- return rcu_spawn_one_boost_kthread (rsp , rnp , rnp_index );
1813- }
1814-
18151678/*
18161679 * Spawn all kthreads -- called as soon as the scheduler is running.
18171680 */
18181681static int __init rcu_spawn_kthreads (void )
18191682{
1820- int cpu ;
18211683 struct rcu_node * rnp ;
1684+ int cpu ;
18221685
18231686 rcu_scheduler_fully_active = 1 ;
18241687 for_each_possible_cpu (cpu ) {
@@ -1827,10 +1690,10 @@ static int __init rcu_spawn_kthreads(void)
18271690 (void )rcu_spawn_one_cpu_kthread (cpu );
18281691 }
18291692 rnp = rcu_get_root (rcu_state );
1830- (void )rcu_spawn_one_node_kthread (rcu_state , rnp );
1693+ (void )rcu_spawn_one_boost_kthread (rcu_state , rnp );
18311694 if (NUM_RCU_NODES > 1 ) {
18321695 rcu_for_each_leaf_node (rcu_state , rnp )
1833- (void )rcu_spawn_one_node_kthread (rcu_state , rnp );
1696+ (void )rcu_spawn_one_boost_kthread (rcu_state , rnp );
18341697 }
18351698 return 0 ;
18361699}
@@ -1844,8 +1707,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
18441707 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
18451708 if (rcu_scheduler_fully_active ) {
18461709 (void )rcu_spawn_one_cpu_kthread (cpu );
1847- if (rnp -> node_kthread_task == NULL )
1848- (void )rcu_spawn_one_node_kthread (rcu_state , rnp );
1710+ (void )rcu_spawn_one_boost_kthread (rcu_state , rnp );
18491711 }
18501712}
18511713
@@ -1878,7 +1740,7 @@ static void rcu_stop_cpu_kthread(int cpu)
18781740
18791741#endif /* #ifdef CONFIG_HOTPLUG_CPU */
18801742
1881- static void rcu_node_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
1743+ static void rcu_boost_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
18821744{
18831745}
18841746
0 commit comments