seed_riscv/
image.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
/*
 * Copyright 2022, Isaac Woods
 * SPDX-License-Identifier: MPL-2.0
 */

use crate::{fs::File, memory::MemoryManager};
use core::{
    ptr,
    slice,
    str::{self, FromStr},
};
use hal::memory::{Flags, FrameAllocator, FrameSize, PAddr, Page, PageTable, Size4KiB, VAddr};
use hal_riscv::platform::kernel_map;
use mer::{
    program::{ProgramHeader, SegmentType},
    Elf,
};
use mulch::math::align_up;
use seed::boot_info::{LoadedImage, Segment};

#[derive(Clone, Debug)]
pub struct LoadedKernel {
    pub entry_point: VAddr,
    pub stack_top: VAddr,
    pub global_pointer: VAddr,

    /// The kernel is loaded to the base of the kernel address space, and then we dynamically map stuff into the
    /// space after it. This is the address of the first available page after the loaded kernel.
    pub next_available_address: VAddr,
}

pub fn load_kernel<P>(file: &File<'_>, page_table: &mut P, memory_manager: &MemoryManager) -> LoadedKernel
where
    P: PageTable<Size4KiB>,
{
    let elf = Elf::new(file.data).expect("Failed to parse kernel ELF");

    let entry_point = VAddr::new(elf.entry_point());
    let mut next_available_address = kernel_map::KERNEL_BASE;

    for segment in elf.segments() {
        match segment.segment_type() {
            SegmentType::Load if segment.mem_size > 0 => {
                let segment = load_segment(segment, &elf, false, memory_manager);

                /*
                 * If this segment loads past `next_available_address`, update it.
                 */
                if (segment.virtual_address + segment.size) > next_available_address {
                    next_available_address =
                        (Page::<Size4KiB>::contains(segment.virtual_address + segment.size) + 1).start;
                }

                assert!(
                    segment.virtual_address.is_aligned(Size4KiB::SIZE),
                    "Segment's virtual address is not page-aligned"
                );
                assert!(
                    segment.physical_address.is_aligned(Size4KiB::SIZE),
                    "Segment's physical address is not frame-aligned"
                );
                assert!(segment.size % Size4KiB::SIZE == 0, "Segment size is not a multiple of page size!");
                page_table
                    .map_area(
                        segment.virtual_address,
                        segment.physical_address,
                        segment.size,
                        segment.flags,
                        memory_manager,
                    )
                    .unwrap();
            }

            _ => (),
        }
    }

    let stack_top = match elf.symbols().find(|symbol| symbol.name(&elf) == Some("_stack_top")) {
        Some(symbol) => VAddr::new(symbol.value as usize),
        None => panic!("Kernel does not have a '_stack_top' symbol!"),
    };

    let global_pointer = match elf.symbols().find(|symbol| symbol.name(&elf) == Some("__global_pointer$")) {
        Some(symbol) => VAddr::new(symbol.value as usize),
        None => panic!("Kernel does not have a '__global_pointer$' symbol!"),
    };

    // Unmap the stack guard page
    let guard_page_address = match elf.symbols().find(|symbol| symbol.name(&elf) == Some("_guard_page")) {
        Some(symbol) => VAddr::new(symbol.value as usize),
        None => panic!("Kernel does not have a '_guard_page' symbol!"),
    };
    assert!(guard_page_address.is_aligned(Size4KiB::SIZE), "Guard page address is not page aligned");
    page_table.unmap::<Size4KiB>(Page::starts_with(guard_page_address));

    LoadedKernel { entry_point, stack_top, global_pointer, next_available_address }
}

pub fn load_image(file: &File<'_>, name: &str, memory_manager: &MemoryManager) -> LoadedImage {
    let elf = Elf::new(file.data).expect("Failed to parse user task ELF");
    let mut image_data = LoadedImage::default();
    image_data.entry_point = VAddr::new(elf.entry_point());
    image_data.name = heapless::String::from_str(name).unwrap();

    for segment in elf.segments() {
        match segment.segment_type() {
            SegmentType::Load if segment.mem_size > 0 => {
                let segment = load_segment(segment, &elf, true, memory_manager);

                match image_data.segments.push(segment) {
                    Ok(()) => (),
                    Err(_) => panic!("Image for '{}' has too many load segments!", name),
                }
            }
            _ => (),
        }
    }

    image_data
}

fn load_segment(
    segment: ProgramHeader,
    elf: &Elf,
    user_accessible: bool,
    memory_manager: &MemoryManager,
) -> Segment {
    /*
     * We don't require each segment to fill up all the pages it needs - as long as the start of each segment is
     * page-aligned so they don't overlap, it's fine. This is mainly to support images linked by `lld` with the `-z
     * separate-loadable-segments` flag, which does this, and also so TLS segments don't fill up more space than
     * they need (so the kernel knows its actual size, and can align that to a page if it needs to).
     *
     * However, we do need to align up to the page margin here so we zero all the memory allocated.
     */
    let mem_size = align_up(segment.mem_size as usize, Size4KiB::SIZE);

    let num_frames = (mem_size as usize) / Size4KiB::SIZE;
    let physical_address = memory_manager.allocate_n(num_frames).start.start;

    /*
     * Copy `file_size` bytes from the image into the segment's new home. Note that
     * `file_size` may be less than `mem_size`, but must never be greater than it.
     * NOTE: we use the segment's memory size here, before we align it up to the page margin.
     */
    assert!(segment.file_size <= segment.mem_size, "Segment's data will not fit in requested memory");
    unsafe {
        slice::from_raw_parts_mut(usize::from(physical_address) as *mut u8, segment.file_size as usize)
            .copy_from_slice(segment.data(&elf));
    }

    /*
     * Zero the remainder of the segment.
     */
    unsafe {
        ptr::write_bytes(
            (usize::from(physical_address) + (segment.file_size as usize)) as *mut u8,
            0,
            mem_size - (segment.file_size as usize),
        );
    }

    Segment {
        physical_address: PAddr::new(usize::from(physical_address)).unwrap(),
        virtual_address: VAddr::new(segment.virtual_address as usize),
        size: num_frames * Size4KiB::SIZE,
        flags: Flags {
            writable: segment.is_writable(),
            executable: segment.is_executable(),
            user_accessible,
            ..Default::default()
        },
    }
}