Skip to content

Commit

Permalink
Merge pull request #221 from Freax13/aslr
Browse files Browse the repository at this point in the history
add ASLR
  • Loading branch information
phil-opp authored Feb 6, 2022
2 parents dc74ce9 + bae5fb8 commit a445433
Show file tree
Hide file tree
Showing 9 changed files with 263 additions and 36 deletions.
47 changes: 45 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 6 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ name = "uefi"
required-features = ["uefi_bin"]

[dependencies]
xmas-elf = { version = "0.6.2", optional = true }
xmas-elf = { version = "0.8.0", optional = true }
x86_64 = { version = "0.14.7", optional = true, default-features = false, features = ["instructions", "inline_asm"] }
usize_conversions = { version = "0.2.0", optional = true }
bit_field = { version = "0.10.0", optional = true }
Expand All @@ -51,6 +51,9 @@ json = { version = "0.12.4", optional = true }
rsdp = { version = "1.0.0", optional = true }
fatfs = { version = "0.3.4", optional = true }
gpt = { version = "2.0.0", optional = true }
raw-cpuid = { version = "10.2.0", optional = true }
rand = { version = "0.8.4", optional = true, default-features = false }
rand_chacha = { version = "0.3.1", optional = true, default-features = false }

[dependencies.noto-sans-mono-bitmap]
version = "0.1.2"
Expand All @@ -72,7 +75,8 @@ bios_bin = ["binary", "rsdp"]
uefi_bin = ["binary", "uefi"]
binary = [
"llvm-tools-build", "x86_64", "toml", "xmas-elf", "usize_conversions", "log", "conquer-once",
"spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2",
"spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2", "raw-cpuid", "rand",
"rand_chacha"
]

[profile.dev]
Expand Down
4 changes: 4 additions & 0 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,8 @@ mod binary {
pub map_page_table_recursively: bool,
#[serde(default = "val_true")]
pub map_framebuffer: bool,
#[serde(default)]
pub aslr: bool,
pub kernel_stack_size: Option<AlignedAddress>,
pub physical_memory_offset: Option<AlignedAddress>,
pub recursive_index: Option<u16>,
Expand All @@ -376,6 +378,7 @@ mod binary {
let map_physical_memory = self.map_physical_memory;
let map_page_table_recursively = self.map_page_table_recursively;
let map_framebuffer = self.map_framebuffer;
let aslr = self.aslr;
let kernel_stack_size = optional(self.kernel_stack_size);
let physical_memory_offset = optional(self.physical_memory_offset);
let recursive_index = optional(self.recursive_index);
Expand All @@ -389,6 +392,7 @@ mod binary {
map_physical_memory: #map_physical_memory,
map_page_table_recursively: #map_page_table_recursively,
map_framebuffer: #map_framebuffer,
aslr: #aslr,
kernel_stack_size: #kernel_stack_size,
physical_memory_offset: #physical_memory_offset,
recursive_index: #recursive_index,
Expand Down
97 changes: 97 additions & 0 deletions src/binary/entropy.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng};
use raw_cpuid::CpuId;
use x86_64::instructions::{port::Port, random::RdRand};

/// Gather entropy from various sources to seed a RNG.
pub fn build_rng() -> ChaCha20Rng {
const ENTROPY_SOURCES: [fn() -> [u8; 32]; 3] = [rd_rand_entropy, tsc_entropy, pit_entropy];

// Collect entropy from different sources and xor them all together.
let mut seed = [0; 32];
for entropy_source in ENTROPY_SOURCES {
let entropy = entropy_source();

for (seed, entropy) in seed.iter_mut().zip(entropy) {
*seed ^= entropy;
}
}

// Construct the RNG.
ChaCha20Rng::from_seed(seed)
}

/// Gather entropy by requesting random numbers with `RDRAND` instruction if it's available.
///
/// This function provides excellent entropy (unless you don't trust the CPU vendors).
fn rd_rand_entropy() -> [u8; 32] {
let mut entropy = [0; 32];

// Check if the CPU supports `RDRAND`.
if let Some(rd_rand) = RdRand::new() {
for i in 0..4 {
if let Some(value) = get_random_64(rd_rand) {
entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes());
}
}
}

entropy
}

/// Try to fetch a 64 bit random value with a retry count limit of 10.
///
/// This function is a port of the C implementation provided in Intel's Software Developer's Manual, Volume 1, 7.3.17.1.
fn get_random_64(rd_rand: RdRand) -> Option<u64> {
const RETRY_LIMIT: u32 = 10;
for _ in 0..RETRY_LIMIT {
if let Some(value) = rd_rand.get_u64() {
return Some(value);
}
}
None
}

/// Gather entropy by reading the current time with the `RDTSC` instruction if it's available.
///
/// This function doesn't provide particulary good entropy, but it's better than nothing.
fn tsc_entropy() -> [u8; 32] {
let mut entropy = [0; 32];

// Check if the CPU supports `RDTSC`.
let cpu_id = CpuId::new();
if let Some(feature_info) = cpu_id.get_feature_info() {
if !feature_info.has_tsc() {
for i in 0..4 {
let value = unsafe {
// SAFETY: We checked that the cpu supports `RDTSC` and we run in ring 0.
core::arch::x86_64::_rdtsc()
};
entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes());
}
}
}

entropy
}

/// Gather entropy by reading the current count of PIT channel 1-3.
///
/// This function doesn't provide particulary good entropy, but it's always available.
fn pit_entropy() -> [u8; 32] {
let mut entropy = [0; 32];

for (i, entropy_byte) in entropy.iter_mut().enumerate() {
// Cycle through channels 1-3.
let channel = i % 3;

let mut port = Port::<u8>::new(0x40 + channel as u16);
let value = unsafe {
// SAFETY: It's safe to read from ports 0x40-0x42.
port.read()
};

*entropy_byte = value;
}

entropy
}
66 changes: 53 additions & 13 deletions src/binary/level_4_entries.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
use core::{alloc::Layout, convert::TryInto};
use rand::{
distributions::{Distribution, Uniform},
seq::IteratorRandom,
};
use rand_chacha::ChaCha20Rng;
use usize_conversions::IntoUsize;
use x86_64::{
structures::paging::{Page, PageTableIndex, Size4KiB},
Expand All @@ -7,15 +12,19 @@ use x86_64::{
use xmas_elf::program::ProgramHeader;

use crate::{
binary::{MemoryRegion, CONFIG},
binary::{entropy, MemoryRegion, CONFIG},
BootInfo,
};

/// Keeps track of used entries in a level 4 page table.
///
/// Useful for determining a free virtual memory block, e.g. for mapping additional data.
pub struct UsedLevel4Entries {
entry_state: [bool; 512], // whether an entry is in use by the kernel
/// Whether an entry is in use by the kernel.
entry_state: [bool; 512],
/// A random number generator that should be used to generate random addresses or
/// `None` if aslr is disabled.
rng: Option<ChaCha20Rng>,
}

impl UsedLevel4Entries {
Expand All @@ -25,6 +34,7 @@ impl UsedLevel4Entries {
pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self {
let mut used = UsedLevel4Entries {
entry_state: [false; 512],
rng: CONFIG.aslr.then(entropy::build_rng),
};

used.entry_state[0] = true; // TODO: Can we do this dynamically?
Expand Down Expand Up @@ -99,28 +109,58 @@ impl UsedLevel4Entries {
}
}

/// Returns a unused level 4 entry and marks it as used.
/// Returns a unused level 4 entry and marks it as used. If `CONFIG.aslr` is
/// enabled, this will return a random available entry.
///
/// Since this method marks each returned index as used, it can be used multiple times
/// to determine multiple unused virtual memory regions.
pub fn get_free_entry(&mut self) -> PageTableIndex {
let (idx, entry) = self
// Create an iterator over all available p4 indices.
let mut free_entries = self
.entry_state
.iter_mut()
.iter()
.copied()
.enumerate()
.find(|(_, &mut entry)| entry == false)
.expect("no usable level 4 entries found");
.filter(|(_, used)| !used)
.map(|(idx, _)| idx);

// Choose the free entry index.
let idx_opt = if let Some(rng) = self.rng.as_mut() {
// Randomly choose an index.
free_entries.choose(rng)
} else {
// Choose the first index.
free_entries.next()
};
let idx = idx_opt.expect("no usable level 4 entry found");

// Mark the entry as used.
self.entry_state[idx] = true;

*entry = true;
PageTableIndex::new(idx.try_into().unwrap())
}

/// Returns the virtual start address of an unused level 4 entry and marks it as used.
/// Returns a virtual address in an unused level 4 entry and marks it as used.
///
/// This is a convenience method around [`get_free_entry`], so all of its docs applies here
/// This functions call [`get_free_entry`] internally, so all of its docs applies here
/// too.
pub fn get_free_address(&mut self) -> VirtAddr {
Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0))
.start_address()
pub fn get_free_address(&mut self, size: u64, alignment: u64) -> VirtAddr {
assert!(alignment.is_power_of_two());

let base =
Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0))
.start_address();

let offset = if let Some(rng) = self.rng.as_mut() {
// Choose a random offset.
const LEVEL_4_SIZE: u64 = 4096 * 512 * 512 * 512;
let end = LEVEL_4_SIZE - size;
let uniform_range = Uniform::from(0..end / alignment);
uniform_range.sample(rng) * alignment
} else {
0
};

base + offset
}
}
22 changes: 17 additions & 5 deletions src/binary/load_kernel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,28 @@ where
}

let elf_file = ElfFile::new(bytes)?;
for program_header in elf_file.program_iter() {
program::sanity_check(program_header, &elf_file)?;
}

let virtual_address_offset = match elf_file.header.pt2.type_().as_type() {
header::Type::None => unimplemented!(),
header::Type::Relocatable => unimplemented!(),
header::Type::Executable => 0,
header::Type::SharedObject => used_entries.get_free_address().as_u64(),
header::Type::SharedObject => {
// Find the highest virtual memory address and the biggest alignment.
let load_program_headers = elf_file
.program_iter()
.filter(|h| matches!(h.get_type(), Ok(Type::Load)));
let size = load_program_headers
.clone()
.map(|h| h.virtual_addr() + h.mem_size())
.max()
.unwrap_or(0);
let align = load_program_headers.map(|h| h.align()).max().unwrap_or(1);

used_entries.get_free_address(size, align).as_u64()
}
header::Type::Core => unimplemented!(),
header::Type::ProcessorSpecific(_) => unimplemented!(),
};
Expand All @@ -79,10 +95,6 @@ where
}

fn load_segments(&mut self) -> Result<Option<TlsTemplate>, &'static str> {
for program_header in self.elf_file.program_iter() {
program::sanity_check(program_header, &self.elf_file)?;
}

// Load the segments into virtual memory.
let mut tls_template = None;
for program_header in self.elf_file.program_iter() {
Expand Down
Loading

0 comments on commit a445433

Please sign in to comment.