seed_riscv/
memory.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
/*
 * Copyright 2022, Isaac Woods
 * SPDX-License-Identifier: MPL-2.0
 */

use arrayvec::ArrayVec;
use core::{fmt, ops::Range, ptr::NonNull};
use fdt::Fdt;
use hal::memory::{Frame, FrameAllocator, FrameSize, PAddr, Size4KiB};
use mulch::{math::align_up, ranges::RangeIntersect};
use spinning_top::Spinlock;
use tracing::trace;

#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)]
pub enum RegionType {
    #[default]
    Usable,
    Reserved(Usage),
}

#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Usage {
    Firmware,
    DeviceTree,
    Seed,
    Ramdisk,
    Unknown,
}

#[derive(Clone, Copy, Default)]
pub struct Region {
    pub typ: RegionType,
    pub address: PAddr,
    pub size: usize,
}

impl Region {
    pub fn new(typ: RegionType, address: PAddr, size: usize) -> Region {
        assert_eq!(size % Size4KiB::SIZE, 0);
        Region { typ, address, size }
    }

    pub fn usable(address: PAddr, size: usize) -> Region {
        Self::new(RegionType::Usable, address, size)
    }

    pub fn reserved(usage: Usage, address: PAddr, size: usize) -> Region {
        Self::new(RegionType::Reserved(usage), address, size)
    }

    pub fn range(&self) -> Range<PAddr> {
        self.address..(self.address + self.size)
    }
}

impl fmt::Debug for Region {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "Region({:?}, {:#x}..{:#x})", self.typ, self.address, self.address + self.size)
    }
}

const MAX_REGIONS: usize = 32;

/// The region map provides a high-level view of the physical memory space, containing large regions of memory that
/// are either usable, or reserved for one of a variety of reasons. This information is static: we don't allocate
/// out of the regions directly - a physical memory allocator is provided by `MemoryManager`.
#[derive(Clone, Debug)]
pub struct MemoryRegions(ArrayVec<Region, MAX_REGIONS>);

impl MemoryRegions {
    pub fn new(fdt: &Fdt, fdt_address: PAddr) -> MemoryRegions {
        use crate::{_seed_end, _seed_start};
        use tracing::info;

        let mut regions = MemoryRegions(ArrayVec::new());

        for region in fdt.memory().regions() {
            info!("Memory region: {:?}", region);
            regions.add_region(Region::usable(
                PAddr::new(region.starting_address as usize).unwrap(),
                region.size.unwrap(),
            ));
        }
        if let Some(reservations) = fdt.find_node("/reserved-memory") {
            for reservation in reservations.children() {
                let reg = reservation.reg().unwrap().next().unwrap();
                info!("Memory reservation with name {}. Reg = {:?}", reservation.name, reg);
                let usage =
                    if reservation.name.starts_with("mmode_resv") { Usage::Firmware } else { Usage::Unknown };
                regions.add_region(Region::reserved(
                    usage,
                    PAddr::new(reg.starting_address as usize).unwrap(),
                    reg.size.unwrap(),
                ));
            }
        } else {
            info!("No memory reservations found");
        }

        // Reserve the memory `seed` has been loaded into
        let seed_start = unsafe { _seed_start.ptr() as usize };
        let seed_end = unsafe { _seed_end.ptr() as usize };
        regions.add_region(Region::reserved(
            Usage::Seed,
            PAddr::new(unsafe { _seed_start.ptr() as usize }).unwrap(),
            align_up(seed_end - seed_start, Size4KiB::SIZE),
        ));

        // Reserve the memory the FDT is in
        regions.add_region(Region::reserved(
            Usage::DeviceTree,
            fdt_address,
            align_up(fdt.total_size(), Size4KiB::SIZE),
        ));

        regions
    }

    /// Add a region of memory to the manager, merging and handling intersecting regions as needed.
    pub fn add_region(&mut self, region: Region) {
        let mut added = false;

        for existing in &mut self.0 {
            if region.typ == existing.typ {
                /*
                 * The new region is the same type as the existing region - see if the new region is contained
                 * inside the existing one, or if we we can merge it onto the front or end.
                 * TODO: this doesn't consider the case of a new region connecting two regions so that all three
                 * can be merged - do we care?
                 */
                if existing.range().encompasses(region.range()) {
                    added = true;
                } else if (region.address + region.size) == existing.address {
                    existing.address = region.address;
                    existing.size += region.size;
                    added = true;
                } else if (existing.address + existing.size) == region.address {
                    existing.size += region.size;
                    added = true;
                }
            }
        }

        /*
         * We now consider regions that aren't the same type as the region we're adding; if this new region covers
         * part of an existing region, we need to 'shrink' that region so that they don't overlap. If the new
         * region is in the middle of another, we have to split the existing region into two.
         */
        self.0 = self
            .0
            .clone()
            .into_iter()
            .flat_map(|existing| {
                let mut new_entries = ArrayVec::<Region, 2>::new();

                if existing.typ == region.typ {
                    new_entries.push(existing);
                    return new_entries;
                }

                let (before, middle, after) = existing.range().split(region.range());
                if middle.is_none() {
                    // The regions don't intersect - add the existing region back
                    new_entries.push(existing);
                } else {
                    /*
                     * The regions do intersect, and so we need to remove the portion that intersects. Add the
                     * portions that don't intersect the new region (potentially one before and one after) back as
                     * two separate regions.
                     */
                    if let Some(before) = before {
                        new_entries.push(Region::new(
                            existing.typ,
                            before.start,
                            usize::from(before.end) - usize::from(before.start),
                        ));
                    }
                    if let Some(after) = after {
                        new_entries.push(Region::new(
                            existing.typ,
                            after.start,
                            usize::from(after.end) - usize::from(after.start),
                        ));
                    }
                }
                new_entries
            })
            .collect();

        if !added {
            self.0.push(region);
        }
    }
}

/// The physical memory manager - this consumes a `MemoryRegions` map, and uses it to initialise an
/// instrusive free list of all usable physical memory. This can then be used to allocate physical memory
/// as needed, at frame granularity.
pub struct MemoryManager(Spinlock<MemoryManagerInner>);

unsafe impl Send for MemoryManager {}
unsafe impl Sync for MemoryManager {}

pub struct MemoryManagerInner {
    regions: Option<MemoryRegions>,
    usable_head: Option<NonNull<Node>>,
    usable_tail: Option<NonNull<Node>>,
}

/// Memory nodes are stored intrusively in the memory that they manage.
#[derive(Clone, Copy, Debug)]
pub struct Node {
    size: usize,
    prev: Option<NonNull<Node>>,
    next: Option<NonNull<Node>>,
}

impl MemoryManager {
    pub const fn new() -> MemoryManager {
        MemoryManager(Spinlock::new(MemoryManagerInner { regions: None, usable_head: None, usable_tail: None }))
    }

    pub fn init(&self, regions: MemoryRegions) {
        let mut inner = self.0.lock();
        let mut prev: Option<NonNull<Node>> = None;

        for region in regions.0.iter().filter(|region| region.typ == RegionType::Usable) {
            trace!("Initialising free list in usable region: {:?}", region);

            let node = Node { size: region.size, prev, next: None };
            let node_ptr = NonNull::new(usize::from(region.address) as *mut Node).unwrap();
            unsafe {
                node_ptr.as_ptr().write(node);
            }

            if prev.is_none() {
                assert!(inner.usable_head.is_none());
                assert!(inner.usable_tail.is_none());
                inner.usable_head = Some(node_ptr);
                inner.usable_tail = Some(node_ptr);
            } else {
                unsafe {
                    prev.as_mut().unwrap().as_mut().next = Some(node_ptr);
                }
                inner.usable_tail = Some(node_ptr);
            }

            prev = Some(node_ptr);
        }

        inner.regions = Some(regions);
    }

    pub fn walk_usable_memory(&self) {
        trace!("Tracing usable memory");
        let inner = self.0.lock();

        let mut current_node = inner.usable_head;
        while let Some(node) = current_node {
            let inner_node = unsafe { *node.as_ptr() };
            trace!("Found some usable memory at {:#x}, {} bytes of it!", node.as_ptr().addr(), inner_node.size);
            current_node = inner_node.next;
        }
        trace!("Finished tracing usable memory");
    }

    pub fn populate_memory_map(&self, memory_map: &mut seed::boot_info::MemoryMap) {
        use seed::boot_info::{MemoryMapEntry, MemoryType};

        trace!("Populating memory map from gathered regions");
        let mut inner = self.0.lock();

        for region in &inner.regions.as_ref().unwrap().0 {
            trace!("Considering region: {:?}", region);

            /*
             * First, we walk the region map.
             */
            match region.typ {
                // For usable regions, we need to check if any of it has already been allocated. We ignore these
                // regions here and instead walk the free list.
                RegionType::Usable => (),
                RegionType::Reserved(Usage::Firmware) => (),
                RegionType::Reserved(Usage::DeviceTree) => memory_map
                    .push(MemoryMapEntry::new(MemoryType::FdtReclaimable, region.address, region.size))
                    .unwrap(),
                RegionType::Reserved(Usage::Seed) => memory_map
                    .push(MemoryMapEntry::new(MemoryType::Conventional, region.address, region.size))
                    .unwrap(),
                RegionType::Reserved(Usage::Ramdisk) => memory_map
                    .push(MemoryMapEntry::new(MemoryType::Conventional, region.address, region.size))
                    .unwrap(),
                RegionType::Reserved(Usage::Unknown) => (),
            }
        }

        /*
         * We now walk the free list to reconstruct a map of usable memory.
         */
        let mut current_node = inner.usable_head;
        while let Some(node) = current_node {
            let inner_node = unsafe { *node.as_ptr() };
            trace!("Found some usable memory at {:#x}, {} bytes of it!", node.as_ptr().addr(), inner_node.size);
            memory_map
                .push(MemoryMapEntry::new(
                    MemoryType::Conventional,
                    PAddr::new(node.as_ptr().addr()).unwrap(),
                    inner_node.size,
                ))
                .unwrap();
            current_node = inner_node.next;
        }

        // Sort the entries by address to make it more readable. This is probably not necessary but makes debugging
        // easier.
        memory_map.sort_unstable_by_key(|entry| entry.start);

        /*
         * From this point, we don't want the bootloader to make any more allocations. We prevent this by removing
         * all allocatable memory.
         */
        inner.usable_head = None;
        inner.usable_tail = None;
    }
}

impl FrameAllocator<Size4KiB> for MemoryManager {
    fn allocate_n(&self, n: usize) -> Range<Frame<Size4KiB>> {
        let mut inner = self.0.lock();
        let mut current_node = inner.usable_head;

        while let Some(node) = current_node {
            let inner_node = unsafe { &mut *node.as_ptr() };
            if inner_node.size >= (n * Size4KiB::SIZE) {
                let start_addr = node.as_ptr().addr();

                // Allocate from the end of the region so we don't need to alter the node pointers
                inner_node.size -= n * Size4KiB::SIZE;

                /*
                 * If we've exhausted this region, move to the next one (the current node's memory
                 * is invalidated when we return the last frame of this region, which holds the
                 * list entry).
                 */
                if inner_node.size == 0 {
                    inner.usable_head = inner_node.next;
                }

                return Frame::starts_with(PAddr::new(start_addr + inner_node.size).unwrap())
                    ..Frame::starts_with(PAddr::new(start_addr + inner_node.size + n * Size4KiB::SIZE).unwrap());
            }

            current_node = inner_node.next;
        }

        panic!("Can't allocate {} frames :(", n);
    }

    fn free_n(&self, _start: Frame<Size4KiB>, _n: usize) {
        unimplemented!();
    }
}

impl virtio::virtqueue::Mapper for MemoryManager {
    fn alloc(&self, size: usize) -> (usize, usize) {
        // TODO: this wastes a bunch of memory but whatevs for now. Just alloc some whole frames to avoid breaking the
        // allocator
        let frames = self.allocate_n(Size4KiB::frames_needed(size));
        let addr = usize::from(frames.start.start);

        // Zero the memory (TODO: this is probably an unsound way of doing it, technically)
        unsafe {
            core::slice::from_raw_parts_mut(addr as *mut u8, size).fill(0);
        }

        (addr, addr)
    }
}