@@ -1241,6 +1241,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1241
1241
1242
1242
#endif /* #else #ifdef CONFIG_RCU_TRACE */
1243
1243
1244
+ static void rcu_wake_cond (struct task_struct * t , int status )
1245
+ {
1246
+ /*
1247
+ * If the thread is yielding, only wake it when this
1248
+ * is invoked from idle
1249
+ */
1250
+ if (status != RCU_KTHREAD_YIELDING || is_idle_task (current ))
1251
+ wake_up_process (t );
1252
+ }
1253
+
1244
1254
/*
1245
1255
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
1246
1256
* or ->boost_tasks, advancing the pointer to the next task in the
@@ -1312,17 +1322,6 @@ static int rcu_boost(struct rcu_node *rnp)
1312
1322
ACCESS_ONCE (rnp -> boost_tasks ) != NULL ;
1313
1323
}
1314
1324
1315
- /*
1316
- * Timer handler to initiate waking up of boost kthreads that
1317
- * have yielded the CPU due to excessive numbers of tasks to
1318
- * boost. We wake up the per-rcu_node kthread, which in turn
1319
- * will wake up the booster kthread.
1320
- */
1321
- static void rcu_boost_kthread_timer (unsigned long arg )
1322
- {
1323
- invoke_rcu_node_kthread ((struct rcu_node * )arg );
1324
- }
1325
-
1326
1325
/*
1327
1326
* Priority-boosting kthread. One per leaf rcu_node and one for the
1328
1327
* root rcu_node.
@@ -1346,8 +1345,9 @@ static int rcu_boost_kthread(void *arg)
1346
1345
else
1347
1346
spincnt = 0 ;
1348
1347
if (spincnt > 10 ) {
1348
+ rnp -> boost_kthread_status = RCU_KTHREAD_YIELDING ;
1349
1349
trace_rcu_utilization ("End boost kthread@rcu_yield" );
1350
- rcu_yield ( rcu_boost_kthread_timer , ( unsigned long ) rnp );
1350
+ schedule_timeout_interruptible ( 2 );
1351
1351
trace_rcu_utilization ("Start boost kthread@rcu_yield" );
1352
1352
spincnt = 0 ;
1353
1353
}
@@ -1385,8 +1385,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1385
1385
rnp -> boost_tasks = rnp -> gp_tasks ;
1386
1386
raw_spin_unlock_irqrestore (& rnp -> lock , flags );
1387
1387
t = rnp -> boost_kthread_task ;
1388
- if (t != NULL )
1389
- wake_up_process ( t );
1388
+ if (t )
1389
+ rcu_wake_cond ( t , rnp -> boost_kthread_status );
1390
1390
} else {
1391
1391
rcu_initiate_boost_trace (rnp );
1392
1392
raw_spin_unlock_irqrestore (& rnp -> lock , flags );
@@ -1403,8 +1403,10 @@ static void invoke_rcu_callbacks_kthread(void)
1403
1403
local_irq_save (flags );
1404
1404
__this_cpu_write (rcu_cpu_has_work , 1 );
1405
1405
if (__this_cpu_read (rcu_cpu_kthread_task ) != NULL &&
1406
- current != __this_cpu_read (rcu_cpu_kthread_task ))
1407
- wake_up_process (__this_cpu_read (rcu_cpu_kthread_task ));
1406
+ current != __this_cpu_read (rcu_cpu_kthread_task )) {
1407
+ rcu_wake_cond (__this_cpu_read (rcu_cpu_kthread_task ),
1408
+ __this_cpu_read (rcu_cpu_kthread_status ));
1409
+ }
1408
1410
local_irq_restore (flags );
1409
1411
}
1410
1412
@@ -1417,21 +1419,6 @@ static bool rcu_is_callbacks_kthread(void)
1417
1419
return __get_cpu_var (rcu_cpu_kthread_task ) == current ;
1418
1420
}
1419
1421
1420
- /*
1421
- * Set the affinity of the boost kthread. The CPU-hotplug locks are
1422
- * held, so no one should be messing with the existence of the boost
1423
- * kthread.
1424
- */
1425
- static void rcu_boost_kthread_setaffinity (struct rcu_node * rnp ,
1426
- cpumask_var_t cm )
1427
- {
1428
- struct task_struct * t ;
1429
-
1430
- t = rnp -> boost_kthread_task ;
1431
- if (t != NULL )
1432
- set_cpus_allowed_ptr (rnp -> boost_kthread_task , cm );
1433
- }
1434
-
1435
1422
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1436
1423
1437
1424
/*
@@ -1448,15 +1435,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1448
1435
* Returns zero if all is well, a negated errno otherwise.
1449
1436
*/
1450
1437
static int __cpuinit rcu_spawn_one_boost_kthread (struct rcu_state * rsp ,
1451
- struct rcu_node * rnp ,
1452
- int rnp_index )
1438
+ struct rcu_node * rnp )
1453
1439
{
1440
+ int rnp_index = rnp - & rsp -> node [0 ];
1454
1441
unsigned long flags ;
1455
1442
struct sched_param sp ;
1456
1443
struct task_struct * t ;
1457
1444
1458
1445
if (& rcu_preempt_state != rsp )
1459
1446
return 0 ;
1447
+
1448
+ if (!rcu_scheduler_fully_active || rnp -> qsmaskinit == 0 )
1449
+ return 0 ;
1450
+
1460
1451
rsp -> boost = 1 ;
1461
1452
if (rnp -> boost_kthread_task != NULL )
1462
1453
return 0 ;
@@ -1499,20 +1490,6 @@ static void rcu_kthread_do_work(void)
1499
1490
rcu_preempt_do_callbacks ();
1500
1491
}
1501
1492
1502
- /*
1503
- * Wake up the specified per-rcu_node-structure kthread.
1504
- * Because the per-rcu_node kthreads are immortal, we don't need
1505
- * to do anything to keep them alive.
1506
- */
1507
- static void invoke_rcu_node_kthread (struct rcu_node * rnp )
1508
- {
1509
- struct task_struct * t ;
1510
-
1511
- t = rnp -> node_kthread_task ;
1512
- if (t != NULL )
1513
- wake_up_process (t );
1514
- }
1515
-
1516
1493
/*
1517
1494
* Set the specified CPU's kthread to run RT or not, as specified by
1518
1495
* the to_rt argument. The CPU-hotplug locks are held, so the task
@@ -1537,45 +1514,6 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1537
1514
sched_setscheduler_nocheck (t , policy , & sp );
1538
1515
}
1539
1516
1540
- /*
1541
- * Timer handler to initiate the waking up of per-CPU kthreads that
1542
- * have yielded the CPU due to excess numbers of RCU callbacks.
1543
- * We wake up the per-rcu_node kthread, which in turn will wake up
1544
- * the booster kthread.
1545
- */
1546
- static void rcu_cpu_kthread_timer (unsigned long arg )
1547
- {
1548
- struct rcu_data * rdp = per_cpu_ptr (rcu_state -> rda , arg );
1549
- struct rcu_node * rnp = rdp -> mynode ;
1550
-
1551
- atomic_or (rdp -> grpmask , & rnp -> wakemask );
1552
- invoke_rcu_node_kthread (rnp );
1553
- }
1554
-
1555
- /*
1556
- * Drop to non-real-time priority and yield, but only after posting a
1557
- * timer that will cause us to regain our real-time priority if we
1558
- * remain preempted. Either way, we restore our real-time priority
1559
- * before returning.
1560
- */
1561
- static void rcu_yield (void (* f )(unsigned long ), unsigned long arg )
1562
- {
1563
- struct sched_param sp ;
1564
- struct timer_list yield_timer ;
1565
- int prio = current -> rt_priority ;
1566
-
1567
- setup_timer_on_stack (& yield_timer , f , arg );
1568
- mod_timer (& yield_timer , jiffies + 2 );
1569
- sp .sched_priority = 0 ;
1570
- sched_setscheduler_nocheck (current , SCHED_NORMAL , & sp );
1571
- set_user_nice (current , 19 );
1572
- schedule ();
1573
- set_user_nice (current , 0 );
1574
- sp .sched_priority = prio ;
1575
- sched_setscheduler_nocheck (current , SCHED_FIFO , & sp );
1576
- del_timer (& yield_timer );
1577
- }
1578
-
1579
1517
/*
1580
1518
* Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1581
1519
* This can happen while the corresponding CPU is either coming online
@@ -1648,7 +1586,7 @@ static int rcu_cpu_kthread(void *arg)
1648
1586
if (spincnt > 10 ) {
1649
1587
* statusp = RCU_KTHREAD_YIELDING ;
1650
1588
trace_rcu_utilization ("End CPU kthread@rcu_yield" );
1651
- rcu_yield ( rcu_cpu_kthread_timer , ( unsigned long ) cpu );
1589
+ schedule_timeout_interruptible ( 2 );
1652
1590
trace_rcu_utilization ("Start CPU kthread@rcu_yield" );
1653
1591
spincnt = 0 ;
1654
1592
}
@@ -1704,48 +1642,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1704
1642
return 0 ;
1705
1643
}
1706
1644
1707
- /*
1708
- * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1709
- * kthreads when needed. We ignore requests to wake up kthreads
1710
- * for offline CPUs, which is OK because force_quiescent_state()
1711
- * takes care of this case.
1712
- */
1713
- static int rcu_node_kthread (void * arg )
1714
- {
1715
- int cpu ;
1716
- unsigned long flags ;
1717
- unsigned long mask ;
1718
- struct rcu_node * rnp = (struct rcu_node * )arg ;
1719
- struct sched_param sp ;
1720
- struct task_struct * t ;
1721
-
1722
- for (;;) {
1723
- rnp -> node_kthread_status = RCU_KTHREAD_WAITING ;
1724
- rcu_wait (atomic_read (& rnp -> wakemask ) != 0 );
1725
- rnp -> node_kthread_status = RCU_KTHREAD_RUNNING ;
1726
- raw_spin_lock_irqsave (& rnp -> lock , flags );
1727
- mask = atomic_xchg (& rnp -> wakemask , 0 );
1728
- rcu_initiate_boost (rnp , flags ); /* releases rnp->lock. */
1729
- for (cpu = rnp -> grplo ; cpu <= rnp -> grphi ; cpu ++ , mask >>= 1 ) {
1730
- if ((mask & 0x1 ) == 0 )
1731
- continue ;
1732
- preempt_disable ();
1733
- t = per_cpu (rcu_cpu_kthread_task , cpu );
1734
- if (!cpu_online (cpu ) || t == NULL ) {
1735
- preempt_enable ();
1736
- continue ;
1737
- }
1738
- per_cpu (rcu_cpu_has_work , cpu ) = 1 ;
1739
- sp .sched_priority = RCU_KTHREAD_PRIO ;
1740
- sched_setscheduler_nocheck (t , SCHED_FIFO , & sp );
1741
- preempt_enable ();
1742
- }
1743
- }
1744
- /* NOTREACHED */
1745
- rnp -> node_kthread_status = RCU_KTHREAD_STOPPED ;
1746
- return 0 ;
1747
- }
1748
-
1749
1645
/*
1750
1646
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
1751
1647
* served by the rcu_node in question. The CPU hotplug lock is still
@@ -1755,17 +1651,17 @@ static int rcu_node_kthread(void *arg)
1755
1651
* no outgoing CPU. If there are no CPUs left in the affinity set,
1756
1652
* this function allows the kthread to execute on any CPU.
1757
1653
*/
1758
- static void rcu_node_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
1654
+ static void rcu_boost_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
1759
1655
{
1656
+ struct task_struct * t = rnp -> boost_kthread_task ;
1657
+ unsigned long mask = rnp -> qsmaskinit ;
1760
1658
cpumask_var_t cm ;
1761
1659
int cpu ;
1762
- unsigned long mask = rnp -> qsmaskinit ;
1763
1660
1764
- if (rnp -> node_kthread_task == NULL )
1661
+ if (! t )
1765
1662
return ;
1766
- if (!alloc_cpumask_var (& cm , GFP_KERNEL ))
1663
+ if (!zalloc_cpumask_var (& cm , GFP_KERNEL ))
1767
1664
return ;
1768
- cpumask_clear (cm );
1769
1665
for (cpu = rnp -> grplo ; cpu <= rnp -> grphi ; cpu ++ , mask >>= 1 )
1770
1666
if ((mask & 0x1 ) && cpu != outgoingcpu )
1771
1667
cpumask_set_cpu (cpu , cm );
@@ -1775,50 +1671,17 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1775
1671
cpumask_clear_cpu (cpu , cm );
1776
1672
WARN_ON_ONCE (cpumask_weight (cm ) == 0 );
1777
1673
}
1778
- set_cpus_allowed_ptr (rnp -> node_kthread_task , cm );
1779
- rcu_boost_kthread_setaffinity (rnp , cm );
1674
+ set_cpus_allowed_ptr (t , cm );
1780
1675
free_cpumask_var (cm );
1781
1676
}
1782
1677
1783
- /*
1784
- * Spawn a per-rcu_node kthread, setting priority and affinity.
1785
- * Called during boot before online/offline can happen, or, if
1786
- * during runtime, with the main CPU-hotplug locks held. So only
1787
- * one of these can be executing at a time.
1788
- */
1789
- static int __cpuinit rcu_spawn_one_node_kthread (struct rcu_state * rsp ,
1790
- struct rcu_node * rnp )
1791
- {
1792
- unsigned long flags ;
1793
- int rnp_index = rnp - & rsp -> node [0 ];
1794
- struct sched_param sp ;
1795
- struct task_struct * t ;
1796
-
1797
- if (!rcu_scheduler_fully_active ||
1798
- rnp -> qsmaskinit == 0 )
1799
- return 0 ;
1800
- if (rnp -> node_kthread_task == NULL ) {
1801
- t = kthread_create (rcu_node_kthread , (void * )rnp ,
1802
- "rcun/%d" , rnp_index );
1803
- if (IS_ERR (t ))
1804
- return PTR_ERR (t );
1805
- raw_spin_lock_irqsave (& rnp -> lock , flags );
1806
- rnp -> node_kthread_task = t ;
1807
- raw_spin_unlock_irqrestore (& rnp -> lock , flags );
1808
- sp .sched_priority = 99 ;
1809
- sched_setscheduler_nocheck (t , SCHED_FIFO , & sp );
1810
- wake_up_process (t ); /* get to TASK_INTERRUPTIBLE quickly. */
1811
- }
1812
- return rcu_spawn_one_boost_kthread (rsp , rnp , rnp_index );
1813
- }
1814
-
1815
1678
/*
1816
1679
* Spawn all kthreads -- called as soon as the scheduler is running.
1817
1680
*/
1818
1681
static int __init rcu_spawn_kthreads (void )
1819
1682
{
1820
- int cpu ;
1821
1683
struct rcu_node * rnp ;
1684
+ int cpu ;
1822
1685
1823
1686
rcu_scheduler_fully_active = 1 ;
1824
1687
for_each_possible_cpu (cpu ) {
@@ -1827,10 +1690,10 @@ static int __init rcu_spawn_kthreads(void)
1827
1690
(void )rcu_spawn_one_cpu_kthread (cpu );
1828
1691
}
1829
1692
rnp = rcu_get_root (rcu_state );
1830
- (void )rcu_spawn_one_node_kthread (rcu_state , rnp );
1693
+ (void )rcu_spawn_one_boost_kthread (rcu_state , rnp );
1831
1694
if (NUM_RCU_NODES > 1 ) {
1832
1695
rcu_for_each_leaf_node (rcu_state , rnp )
1833
- (void )rcu_spawn_one_node_kthread (rcu_state , rnp );
1696
+ (void )rcu_spawn_one_boost_kthread (rcu_state , rnp );
1834
1697
}
1835
1698
return 0 ;
1836
1699
}
@@ -1844,8 +1707,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1844
1707
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1845
1708
if (rcu_scheduler_fully_active ) {
1846
1709
(void )rcu_spawn_one_cpu_kthread (cpu );
1847
- if (rnp -> node_kthread_task == NULL )
1848
- (void )rcu_spawn_one_node_kthread (rcu_state , rnp );
1710
+ (void )rcu_spawn_one_boost_kthread (rcu_state , rnp );
1849
1711
}
1850
1712
}
1851
1713
@@ -1878,7 +1740,7 @@ static void rcu_stop_cpu_kthread(int cpu)
1878
1740
1879
1741
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1880
1742
1881
- static void rcu_node_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
1743
+ static void rcu_boost_kthread_setaffinity (struct rcu_node * rnp , int outgoingcpu )
1882
1744
{
1883
1745
}
1884
1746
0 commit comments