kernel/object/
memory_object.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
use super::{alloc_kernel_object_id, KernelObject, KernelObjectId, KernelObjectType};
use alloc::{sync::Arc, vec::Vec};
use hal::memory::{Flags, PAddr};
use seed::boot_info::Segment;
use spinning_top::Spinlock;

#[derive(Debug)]
pub struct MemoryObject {
    pub id: KernelObjectId,
    pub owner: KernelObjectId,
    pub inner: Spinlock<Inner>,
}

#[derive(Debug)]
pub struct Inner {
    /// Size of this MemoryObject in bytes.
    pub size: usize,
    pub flags: Flags,
    pub backing: Vec<(PAddr, usize)>,
}

impl MemoryObject {
    pub fn new(owner: KernelObjectId, physical_address: PAddr, size: usize, flags: Flags) -> Arc<MemoryObject> {
        Arc::new(MemoryObject {
            id: alloc_kernel_object_id(),
            owner,
            inner: Spinlock::new(Inner { size, flags, backing: vec![(physical_address, size)] }),
        })
    }

    pub fn from_boot_info(owner: KernelObjectId, segment: &Segment) -> Arc<MemoryObject> {
        Arc::new(MemoryObject {
            id: alloc_kernel_object_id(),
            owner,
            inner: Spinlock::new(Inner {
                size: segment.size,
                flags: segment.flags,
                backing: vec![(segment.physical_address, segment.size)],
            }),
        })
    }

    /// Extend this `MemoryObject` by `extend_by` bytes. The new portion of the object is backed
    /// by physical memory starting at `new_backing`.
    ///
    /// ### Note
    /// Note that this does not map the new portion of the object into address spaces that this
    /// memory object is already mapped into.
    pub unsafe fn extend(&self, extend_by: usize, new_backing: PAddr) {
        assert!(extend_by > 0);
        let mut inner = self.inner.lock();
        inner.size += extend_by;
        inner.backing.push((new_backing, extend_by));
    }

    pub fn size(&self) -> usize {
        self.inner.lock().size
    }

    pub fn flags(&self) -> Flags {
        self.inner.lock().flags
    }
}

impl KernelObject for MemoryObject {
    fn id(&self) -> KernelObjectId {
        self.id
    }

    fn typ(&self) -> KernelObjectType {
        KernelObjectType::MemoryObject
    }
}

impl PartialEq for MemoryObject {
    fn eq(&self, other: &Self) -> bool {
        self.id == other.id
    }
}