13
13
bool z_priq_rb_lessthan (struct rbnode * a , struct rbnode * b );
14
14
15
15
/* Dumb Scheduling */
16
- #if defined(CONFIG_SCHED_DUMB )
17
- #define _priq_run_init z_priq_dumb_init
18
- #define _priq_run_add z_priq_dumb_add
19
- #define _priq_run_remove z_priq_dumb_remove
20
- #define _priq_run_yield z_priq_dumb_yield
16
+ #if defined(CONFIG_SCHED_SIMPLE )
17
+ #define _priq_run_init z_priq_simple_init
18
+ #define _priq_run_add z_priq_simple_add
19
+ #define _priq_run_remove z_priq_simple_remove
20
+ #define _priq_run_yield z_priq_simple_yield
21
21
# if defined(CONFIG_SCHED_CPU_MASK )
22
- # define _priq_run_best z_priq_dumb_mask_best
22
+ # define _priq_run_best z_priq_simple_mask_best
23
23
# else
24
- # define _priq_run_best z_priq_dumb_best
24
+ # define _priq_run_best z_priq_simple_best
25
25
# endif /* CONFIG_SCHED_CPU_MASK */
26
26
/* Scalable Scheduling */
27
27
#elif defined(CONFIG_SCHED_SCALABLE )
@@ -45,10 +45,10 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
45
45
#define _priq_wait_remove z_priq_rb_remove
46
46
#define _priq_wait_best z_priq_rb_best
47
47
/* Dumb Wait Queue */
48
- #elif defined(CONFIG_WAITQ_DUMB )
49
- #define _priq_wait_add z_priq_dumb_add
50
- #define _priq_wait_remove z_priq_dumb_remove
51
- #define _priq_wait_best z_priq_dumb_best
48
+ #elif defined(CONFIG_WAITQ_SIMPLE )
49
+ #define _priq_wait_add z_priq_simple_add
50
+ #define _priq_wait_remove z_priq_simple_remove
51
+ #define _priq_wait_best z_priq_simple_best
52
52
#endif
53
53
54
54
#if defined(CONFIG_64BIT )
@@ -59,7 +59,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
59
59
#define TRAILING_ZEROS u32_count_trailing_zeros
60
60
#endif /* CONFIG_64BIT */
61
61
62
- static ALWAYS_INLINE void z_priq_dumb_init (sys_dlist_t * pq )
62
+ static ALWAYS_INLINE void z_priq_simple_init (sys_dlist_t * pq )
63
63
{
64
64
sys_dlist_init (pq );
65
65
}
@@ -105,7 +105,7 @@ static ALWAYS_INLINE int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct
105
105
return 0 ;
106
106
}
107
107
108
- static ALWAYS_INLINE void z_priq_dumb_add (sys_dlist_t * pq , struct k_thread * thread )
108
+ static ALWAYS_INLINE void z_priq_simple_add (sys_dlist_t * pq , struct k_thread * thread )
109
109
{
110
110
struct k_thread * t ;
111
111
@@ -119,14 +119,14 @@ static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thre
119
119
sys_dlist_append (pq , & thread -> base .qnode_dlist );
120
120
}
121
121
122
- static ALWAYS_INLINE void z_priq_dumb_remove (sys_dlist_t * pq , struct k_thread * thread )
122
+ static ALWAYS_INLINE void z_priq_simple_remove (sys_dlist_t * pq , struct k_thread * thread )
123
123
{
124
124
ARG_UNUSED (pq );
125
125
126
126
sys_dlist_remove (& thread -> base .qnode_dlist );
127
127
}
128
128
129
- static ALWAYS_INLINE void z_priq_dumb_yield (sys_dlist_t * pq )
129
+ static ALWAYS_INLINE void z_priq_simple_yield (sys_dlist_t * pq )
130
130
{
131
131
#ifndef CONFIG_SMP
132
132
sys_dnode_t * n ;
@@ -157,7 +157,7 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq)
157
157
#endif
158
158
}
159
159
160
- static ALWAYS_INLINE struct k_thread * z_priq_dumb_best (sys_dlist_t * pq )
160
+ static ALWAYS_INLINE struct k_thread * z_priq_simple_best (sys_dlist_t * pq )
161
161
{
162
162
struct k_thread * thread = NULL ;
163
163
sys_dnode_t * n = sys_dlist_peek_head (pq );
@@ -169,7 +169,7 @@ static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
169
169
}
170
170
171
171
#ifdef CONFIG_SCHED_CPU_MASK
172
- static ALWAYS_INLINE struct k_thread * z_priq_dumb_mask_best (sys_dlist_t * pq )
172
+ static ALWAYS_INLINE struct k_thread * z_priq_simple_mask_best (sys_dlist_t * pq )
173
173
{
174
174
/* With masks enabled we need to be prepared to walk the list
175
175
* looking for one we can run
0 commit comments