@@ -862,7 +862,8 @@ static DEFINE_MUTEX(scx_ops_enable_mutex);
862
862
DEFINE_STATIC_KEY_FALSE (__scx_ops_enabled );
863
863
DEFINE_STATIC_PERCPU_RWSEM (scx_fork_rwsem );
864
864
static atomic_t scx_ops_enable_state_var = ATOMIC_INIT (SCX_OPS_DISABLED );
865
- static atomic_t scx_ops_bypass_depth = ATOMIC_INIT (0 );
865
+ static int scx_ops_bypass_depth ;
866
+ static DEFINE_RAW_SPINLOCK (__scx_ops_bypass_lock );
866
867
static bool scx_ops_init_task_enabled ;
867
868
static bool scx_switching_all ;
868
869
DEFINE_STATIC_KEY_FALSE (__scx_switched_all );
@@ -4298,18 +4299,20 @@ bool task_should_scx(struct task_struct *p)
4298
4299
*/
4299
4300
static void scx_ops_bypass (bool bypass )
4300
4301
{
4301
- int depth , cpu ;
4302
+ int cpu ;
4303
+ unsigned long flags ;
4302
4304
4305
+ raw_spin_lock_irqsave (& __scx_ops_bypass_lock , flags );
4303
4306
if (bypass ) {
4304
- depth = atomic_inc_return ( & scx_ops_bypass_depth ) ;
4305
- WARN_ON_ONCE (depth <= 0 );
4306
- if (depth != 1 )
4307
- return ;
4307
+ scx_ops_bypass_depth ++ ;
4308
+ WARN_ON_ONCE (scx_ops_bypass_depth <= 0 );
4309
+ if (scx_ops_bypass_depth != 1 )
4310
+ goto unlock ;
4308
4311
} else {
4309
- depth = atomic_dec_return ( & scx_ops_bypass_depth ) ;
4310
- WARN_ON_ONCE (depth < 0 );
4311
- if (depth != 0 )
4312
- return ;
4312
+ scx_ops_bypass_depth -- ;
4313
+ WARN_ON_ONCE (scx_ops_bypass_depth < 0 );
4314
+ if (scx_ops_bypass_depth != 0 )
4315
+ goto unlock ;
4313
4316
}
4314
4317
4315
4318
/*
@@ -4326,7 +4329,7 @@ static void scx_ops_bypass(bool bypass)
4326
4329
struct rq_flags rf ;
4327
4330
struct task_struct * p , * n ;
4328
4331
4329
- rq_lock_irqsave (rq , & rf );
4332
+ rq_lock (rq , & rf );
4330
4333
4331
4334
if (bypass ) {
4332
4335
WARN_ON_ONCE (rq -> scx .flags & SCX_RQ_BYPASSING );
@@ -4362,11 +4365,13 @@ static void scx_ops_bypass(bool bypass)
4362
4365
sched_enq_and_set_task (& ctx );
4363
4366
}
4364
4367
4365
- rq_unlock_irqrestore (rq , & rf );
4368
+ rq_unlock (rq , & rf );
4366
4369
4367
4370
/* resched to restore ticks and idle state */
4368
4371
resched_cpu (cpu );
4369
4372
}
4373
+ unlock :
4374
+ raw_spin_unlock_irqrestore (& __scx_ops_bypass_lock , flags );
4370
4375
}
4371
4376
4372
4377
static void free_exit_info (struct scx_exit_info * ei )
0 commit comments