Skip to content

Commit a445433

Browse files
authored
Merge pull request #221 from Freax13/aslr
add ASLR
2 parents dc74ce9 + bae5fb8 commit a445433

File tree

9 files changed

+263
-36
lines changed

9 files changed

+263
-36
lines changed

Diff for: Cargo.lock

+45-2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Diff for: Cargo.toml

+6-2
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ name = "uefi"
3434
required-features = ["uefi_bin"]
3535

3636
[dependencies]
37-
xmas-elf = { version = "0.6.2", optional = true }
37+
xmas-elf = { version = "0.8.0", optional = true }
3838
x86_64 = { version = "0.14.7", optional = true, default-features = false, features = ["instructions", "inline_asm"] }
3939
usize_conversions = { version = "0.2.0", optional = true }
4040
bit_field = { version = "0.10.0", optional = true }
@@ -51,6 +51,9 @@ json = { version = "0.12.4", optional = true }
5151
rsdp = { version = "1.0.0", optional = true }
5252
fatfs = { version = "0.3.4", optional = true }
5353
gpt = { version = "2.0.0", optional = true }
54+
raw-cpuid = { version = "10.2.0", optional = true }
55+
rand = { version = "0.8.4", optional = true, default-features = false }
56+
rand_chacha = { version = "0.3.1", optional = true, default-features = false }
5457

5558
[dependencies.noto-sans-mono-bitmap]
5659
version = "0.1.2"
@@ -72,7 +75,8 @@ bios_bin = ["binary", "rsdp"]
7275
uefi_bin = ["binary", "uefi"]
7376
binary = [
7477
"llvm-tools-build", "x86_64", "toml", "xmas-elf", "usize_conversions", "log", "conquer-once",
75-
"spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2",
78+
"spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2", "raw-cpuid", "rand",
79+
"rand_chacha"
7680
]
7781

7882
[profile.dev]

Diff for: build.rs

+4
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,8 @@ mod binary {
356356
pub map_page_table_recursively: bool,
357357
#[serde(default = "val_true")]
358358
pub map_framebuffer: bool,
359+
#[serde(default)]
360+
pub aslr: bool,
359361
pub kernel_stack_size: Option<AlignedAddress>,
360362
pub physical_memory_offset: Option<AlignedAddress>,
361363
pub recursive_index: Option<u16>,
@@ -376,6 +378,7 @@ mod binary {
376378
let map_physical_memory = self.map_physical_memory;
377379
let map_page_table_recursively = self.map_page_table_recursively;
378380
let map_framebuffer = self.map_framebuffer;
381+
let aslr = self.aslr;
379382
let kernel_stack_size = optional(self.kernel_stack_size);
380383
let physical_memory_offset = optional(self.physical_memory_offset);
381384
let recursive_index = optional(self.recursive_index);
@@ -389,6 +392,7 @@ mod binary {
389392
map_physical_memory: #map_physical_memory,
390393
map_page_table_recursively: #map_page_table_recursively,
391394
map_framebuffer: #map_framebuffer,
395+
aslr: #aslr,
392396
kernel_stack_size: #kernel_stack_size,
393397
physical_memory_offset: #physical_memory_offset,
394398
recursive_index: #recursive_index,

Diff for: src/binary/entropy.rs

+97
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng};
2+
use raw_cpuid::CpuId;
3+
use x86_64::instructions::{port::Port, random::RdRand};
4+
5+
/// Gather entropy from various sources to seed a RNG.
6+
pub fn build_rng() -> ChaCha20Rng {
7+
const ENTROPY_SOURCES: [fn() -> [u8; 32]; 3] = [rd_rand_entropy, tsc_entropy, pit_entropy];
8+
9+
// Collect entropy from different sources and xor them all together.
10+
let mut seed = [0; 32];
11+
for entropy_source in ENTROPY_SOURCES {
12+
let entropy = entropy_source();
13+
14+
for (seed, entropy) in seed.iter_mut().zip(entropy) {
15+
*seed ^= entropy;
16+
}
17+
}
18+
19+
// Construct the RNG.
20+
ChaCha20Rng::from_seed(seed)
21+
}
22+
23+
/// Gather entropy by requesting random numbers with `RDRAND` instruction if it's available.
24+
///
25+
/// This function provides excellent entropy (unless you don't trust the CPU vendors).
26+
fn rd_rand_entropy() -> [u8; 32] {
27+
let mut entropy = [0; 32];
28+
29+
// Check if the CPU supports `RDRAND`.
30+
if let Some(rd_rand) = RdRand::new() {
31+
for i in 0..4 {
32+
if let Some(value) = get_random_64(rd_rand) {
33+
entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes());
34+
}
35+
}
36+
}
37+
38+
entropy
39+
}
40+
41+
/// Try to fetch a 64 bit random value with a retry count limit of 10.
42+
///
43+
/// This function is a port of the C implementation provided in Intel's Software Developer's Manual, Volume 1, 7.3.17.1.
44+
fn get_random_64(rd_rand: RdRand) -> Option<u64> {
45+
const RETRY_LIMIT: u32 = 10;
46+
for _ in 0..RETRY_LIMIT {
47+
if let Some(value) = rd_rand.get_u64() {
48+
return Some(value);
49+
}
50+
}
51+
None
52+
}
53+
54+
/// Gather entropy by reading the current time with the `RDTSC` instruction if it's available.
55+
///
56+
/// This function doesn't provide particulary good entropy, but it's better than nothing.
57+
fn tsc_entropy() -> [u8; 32] {
58+
let mut entropy = [0; 32];
59+
60+
// Check if the CPU supports `RDTSC`.
61+
let cpu_id = CpuId::new();
62+
if let Some(feature_info) = cpu_id.get_feature_info() {
63+
if !feature_info.has_tsc() {
64+
for i in 0..4 {
65+
let value = unsafe {
66+
// SAFETY: We checked that the cpu supports `RDTSC` and we run in ring 0.
67+
core::arch::x86_64::_rdtsc()
68+
};
69+
entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes());
70+
}
71+
}
72+
}
73+
74+
entropy
75+
}
76+
77+
/// Gather entropy by reading the current count of PIT channel 1-3.
78+
///
79+
/// This function doesn't provide particulary good entropy, but it's always available.
80+
fn pit_entropy() -> [u8; 32] {
81+
let mut entropy = [0; 32];
82+
83+
for (i, entropy_byte) in entropy.iter_mut().enumerate() {
84+
// Cycle through channels 1-3.
85+
let channel = i % 3;
86+
87+
let mut port = Port::<u8>::new(0x40 + channel as u16);
88+
let value = unsafe {
89+
// SAFETY: It's safe to read from ports 0x40-0x42.
90+
port.read()
91+
};
92+
93+
*entropy_byte = value;
94+
}
95+
96+
entropy
97+
}

Diff for: src/binary/level_4_entries.rs

+53-13
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
11
use core::{alloc::Layout, convert::TryInto};
2+
use rand::{
3+
distributions::{Distribution, Uniform},
4+
seq::IteratorRandom,
5+
};
6+
use rand_chacha::ChaCha20Rng;
27
use usize_conversions::IntoUsize;
38
use x86_64::{
49
structures::paging::{Page, PageTableIndex, Size4KiB},
@@ -7,15 +12,19 @@ use x86_64::{
712
use xmas_elf::program::ProgramHeader;
813

914
use crate::{
10-
binary::{MemoryRegion, CONFIG},
15+
binary::{entropy, MemoryRegion, CONFIG},
1116
BootInfo,
1217
};
1318

1419
/// Keeps track of used entries in a level 4 page table.
1520
///
1621
/// Useful for determining a free virtual memory block, e.g. for mapping additional data.
1722
pub struct UsedLevel4Entries {
18-
entry_state: [bool; 512], // whether an entry is in use by the kernel
23+
/// Whether an entry is in use by the kernel.
24+
entry_state: [bool; 512],
25+
/// A random number generator that should be used to generate random addresses or
26+
/// `None` if aslr is disabled.
27+
rng: Option<ChaCha20Rng>,
1928
}
2029

2130
impl UsedLevel4Entries {
@@ -25,6 +34,7 @@ impl UsedLevel4Entries {
2534
pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self {
2635
let mut used = UsedLevel4Entries {
2736
entry_state: [false; 512],
37+
rng: CONFIG.aslr.then(entropy::build_rng),
2838
};
2939

3040
used.entry_state[0] = true; // TODO: Can we do this dynamically?
@@ -99,28 +109,58 @@ impl UsedLevel4Entries {
99109
}
100110
}
101111

102-
/// Returns a unused level 4 entry and marks it as used.
112+
/// Returns a unused level 4 entry and marks it as used. If `CONFIG.aslr` is
113+
/// enabled, this will return a random available entry.
103114
///
104115
/// Since this method marks each returned index as used, it can be used multiple times
105116
/// to determine multiple unused virtual memory regions.
106117
pub fn get_free_entry(&mut self) -> PageTableIndex {
107-
let (idx, entry) = self
118+
// Create an iterator over all available p4 indices.
119+
let mut free_entries = self
108120
.entry_state
109-
.iter_mut()
121+
.iter()
122+
.copied()
110123
.enumerate()
111-
.find(|(_, &mut entry)| entry == false)
112-
.expect("no usable level 4 entries found");
124+
.filter(|(_, used)| !used)
125+
.map(|(idx, _)| idx);
126+
127+
// Choose the free entry index.
128+
let idx_opt = if let Some(rng) = self.rng.as_mut() {
129+
// Randomly choose an index.
130+
free_entries.choose(rng)
131+
} else {
132+
// Choose the first index.
133+
free_entries.next()
134+
};
135+
let idx = idx_opt.expect("no usable level 4 entry found");
136+
137+
// Mark the entry as used.
138+
self.entry_state[idx] = true;
113139

114-
*entry = true;
115140
PageTableIndex::new(idx.try_into().unwrap())
116141
}
117142

118-
/// Returns the virtual start address of an unused level 4 entry and marks it as used.
143+
/// Returns a virtual address in an unused level 4 entry and marks it as used.
119144
///
120-
/// This is a convenience method around [`get_free_entry`], so all of its docs applies here
145+
/// This functions call [`get_free_entry`] internally, so all of its docs applies here
121146
/// too.
122-
pub fn get_free_address(&mut self) -> VirtAddr {
123-
Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0))
124-
.start_address()
147+
pub fn get_free_address(&mut self, size: u64, alignment: u64) -> VirtAddr {
148+
assert!(alignment.is_power_of_two());
149+
150+
let base =
151+
Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0))
152+
.start_address();
153+
154+
let offset = if let Some(rng) = self.rng.as_mut() {
155+
// Choose a random offset.
156+
const LEVEL_4_SIZE: u64 = 4096 * 512 * 512 * 512;
157+
let end = LEVEL_4_SIZE - size;
158+
let uniform_range = Uniform::from(0..end / alignment);
159+
uniform_range.sample(rng) * alignment
160+
} else {
161+
0
162+
};
163+
164+
base + offset
125165
}
126166
}

Diff for: src/binary/load_kernel.rs

+17-5
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,28 @@ where
5252
}
5353

5454
let elf_file = ElfFile::new(bytes)?;
55+
for program_header in elf_file.program_iter() {
56+
program::sanity_check(program_header, &elf_file)?;
57+
}
5558

5659
let virtual_address_offset = match elf_file.header.pt2.type_().as_type() {
5760
header::Type::None => unimplemented!(),
5861
header::Type::Relocatable => unimplemented!(),
5962
header::Type::Executable => 0,
60-
header::Type::SharedObject => used_entries.get_free_address().as_u64(),
63+
header::Type::SharedObject => {
64+
// Find the highest virtual memory address and the biggest alignment.
65+
let load_program_headers = elf_file
66+
.program_iter()
67+
.filter(|h| matches!(h.get_type(), Ok(Type::Load)));
68+
let size = load_program_headers
69+
.clone()
70+
.map(|h| h.virtual_addr() + h.mem_size())
71+
.max()
72+
.unwrap_or(0);
73+
let align = load_program_headers.map(|h| h.align()).max().unwrap_or(1);
74+
75+
used_entries.get_free_address(size, align).as_u64()
76+
}
6177
header::Type::Core => unimplemented!(),
6278
header::Type::ProcessorSpecific(_) => unimplemented!(),
6379
};
@@ -79,10 +95,6 @@ where
7995
}
8096

8197
fn load_segments(&mut self) -> Result<Option<TlsTemplate>, &'static str> {
82-
for program_header in self.elf_file.program_iter() {
83-
program::sanity_check(program_header, &self.elf_file)?;
84-
}
85-
8698
// Load the segments into virtual memory.
8799
let mut tls_template = None;
88100
for program_header in self.elf_file.program_iter() {

0 commit comments

Comments
 (0)