start_kernel()
-> rest_init()
-> kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
kernel_init()
-> kernel_init_freeable()
-> smp_init()
-> idle_threads_init()
-> sched_init_smp()
-> sched_init_numa()
-> init_sched_domains(cpu_active_mask)
-> alloc_sched_domains(ndoms_cur = 1)
-> build_sched_domains(doms_cur[0], NULL)
-> __visit_domain_allocation_hell()
-> __sdt_alloc(cpu_map)
-> register_sched_domain_sysctl()
-> run_init_process()
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_domain_shared **__percpu sds;
struct sched_group **__percpu sg;
struct sched_group_capacity **__percpu sgc;
};
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
int flags;
int numa_level;
struct sd_data data;
};
...
/*
* Topology list, bottom-up.
*/
static struct sched_domain_topology_level default_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ sd_init_SIBLING, cpu_smt_mask, },
#endif
#ifdef CONFIG_SCHED_MC
{ sd_init_MC, cpu_coregroup_mask, },
#endif
#ifdef CONFIG_SCHED_BOOK
{ sd_init_BOOK, cpu_book_mask, },
#endif
{ sd_init_CPU, cpu_cpu_mask, },
{ NULL, },
};
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
#define for_each_sd_topology(tl) \
for (tl = sched_domain_topology; tl->mask; tl++)
...**```
- ζ―δΈͺηΊ§ε«ηθ°εΊ¦εεθ°εΊ¦η»δΌε¨
__sdt_alloc()
η¨kzalloc_node()
ει
εΊζ₯
register_sched_domain_sysctl()
ε»Ίη«ε¦δΈ sysctl
ζ§εΆι‘Ή
proc/sys/kernel/sched_domain/
βββ cpu0
βΒ Β βββ domain0
βΒ Β βΒ Β βββ busy_factor
βΒ Β βΒ Β βββ busy_idx
βΒ Β βΒ Β βββ cache_nice_tries
βΒ Β βΒ Β βββ flags
βΒ Β βΒ Β βββ forkexec_idx
βΒ Β βΒ Β βββ idle_idx
βΒ Β βΒ Β βββ imbalance_pct
βΒ Β βΒ Β βββ max_interval
βΒ Β βΒ Β βββ min_interval
βΒ Β βΒ Β βββ name
βΒ Β βΒ Β βββ newidle_idx
βΒ Β βΒ Β βββ wake_idx
βΒ Β βββ domain1
βββ cpu1
βΒ Β βββ domain0
βΒ Β βββ domain1
βββ cpu2
βΒ Β βββ domain0
βΒ Β βββ domain1
βββ cpu3
βΒ Β βββ domain0
βΒ Β βββ domain1
βββ cpu4
Β Β βββ domain0
Β Β βββ domain1