1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
use super::page_table::entry::Entry;
use super::page_table::{attr, L1Table};
use crate::config::{PAGE_SIZE, RMM_SHARED_BUFFER_START};
use crate::mm::page::BasePageSize;
use crate::mm::page_table::entry::PTDesc;

use vmsa::address::{PhysAddr, VirtAddr};
use vmsa::page::Page;
use vmsa::page_table::PageTable as RootPageTable;
use vmsa::page_table::{Level, PageTableMethods};

use armv9a::bits_in_reg;
use core::ffi::c_void;
use core::fmt;
use lazy_static::lazy_static;
use spin::mutex::Mutex;

extern "C" {
    static __RMM_BASE__: u64;
    static __RW_START__: u64;
    static __RW_END__: u64;
}

pub struct PageTable {
    page_table: &'static Mutex<Inner<'static>>,
}

impl PageTable {
    pub fn get_ref() -> Self {
        Self {
            page_table: &RMM_PAGE_TABLE,
        }
    }

    pub fn map(&self, addr: usize, secure: bool) -> bool {
        self.page_table.lock().set_pages_for_rmi(addr, secure)
    }

    pub fn unmap(&self, addr: usize) -> bool {
        self.page_table.lock().unset_pages_for_rmi(addr)
    }
}

lazy_static! {
    static ref RMM_PAGE_TABLE: Mutex<Inner<'static>> = Mutex::new(Inner::new());
}

pub fn get_page_table() -> u64 {
    let mut page_table = RMM_PAGE_TABLE.lock();
    page_table.fill();
    page_table.get_base_address() as u64
}

// initial lookup starts at level 1 with 2 page tables concatenated
pub const NUM_ROOT_PAGE: usize = 1;
pub const ALIGN_ROOT_PAGE: usize = 2;

struct Inner<'a> {
    // We will set the translation granule with 4KB.
    // To reduce the level of page lookup, initial lookup will start from L1.
    root_pgtlb:
        &'a mut RootPageTable<VirtAddr, L1Table, Entry, { <L1Table as Level>::NUM_ENTRIES }>,
    dirty: bool,
}

impl<'a> Inner<'a> {
    pub fn new() -> Self {
        let root_pgtlb = unsafe {
            &mut *RootPageTable::<VirtAddr, L1Table, Entry, { <L1Table as Level>::NUM_ENTRIES }>::new_with_align(
                NUM_ROOT_PAGE,
                ALIGN_ROOT_PAGE,
            )
            .unwrap()
        };

        Self {
            root_pgtlb,
            dirty: false,
        }
    }

    fn fill(&mut self) {
        if self.dirty {
            return;
        }

        let ro_flags = bits_in_reg(PTDesc::AP, attr::permission::RO);
        let rw_flags = bits_in_reg(PTDesc::AP, attr::permission::RW);
        let rmm_flags = bits_in_reg(PTDesc::INDX, attr::mair_idx::RMM_MEM);
        let device_flags = bits_in_reg(PTDesc::INDX, attr::mair_idx::DEVICE_MEM);

        unsafe {
            let base_address = &__RMM_BASE__ as *const u64 as u64;
            let rw_start = &__RW_START__ as *const u64 as u64;
            let ro_size = rw_start - base_address;
            let rw_size = &__RW_END__ as *const u64 as u64 - rw_start;
            let uart_phys: u64 = 0x1c0c_0000;
            let shared_start = RMM_SHARED_BUFFER_START;
            self.set_pages(
                VirtAddr::from(base_address),
                PhysAddr::from(base_address),
                ro_size as usize,
                ro_flags | rmm_flags,
            );
            self.set_pages(
                VirtAddr::from(rw_start),
                PhysAddr::from(rw_start),
                rw_size as usize,
                rw_flags | rmm_flags,
            );
            // UART
            self.set_pages(
                VirtAddr::from(uart_phys),
                PhysAddr::from(uart_phys),
                1,
                rw_flags | device_flags,
            );
            self.set_pages(
                VirtAddr::from(shared_start),
                PhysAddr::from(shared_start),
                PAGE_SIZE,
                rw_flags | rmm_flags,
            );
        }
        //TODO Set dirty only if pages are updated, not added
        self.dirty = true;
    }

    fn get_base_address(&self) -> *const c_void {
        self.root_pgtlb as *const _ as *const c_void
    }

    fn set_pages(&mut self, va: VirtAddr, phys: PhysAddr, size: usize, flags: u64) {
        let virtaddr = Page::<BasePageSize, VirtAddr>::range_with_size(va, size);
        let phyaddr = Page::<BasePageSize, PhysAddr>::range_with_size(phys, size);

        if self
            .root_pgtlb
            .set_pages(virtaddr, phyaddr, flags, false)
            .is_err()
        {
            warn!("set_pages error");
        }
    }

    fn unset_page(&mut self, addr: usize) {
        let va = VirtAddr::from(addr);
        let page = Page::<BasePageSize, VirtAddr>::including_address(va);
        self.root_pgtlb.unset_page(page);
    }

    fn set_pages_for_rmi(&mut self, addr: usize, secure: bool) -> bool {
        if addr == 0 {
            warn!("map address is empty");
            return false;
        }

        let rw_flags = bits_in_reg(PTDesc::AP, attr::permission::RW);
        let memattr_flags = bits_in_reg(PTDesc::INDX, attr::mair_idx::RMM_MEM);
        let sh_flags = bits_in_reg(PTDesc::SH, attr::shareable::INNER);
        let secure_flags = bits_in_reg(PTDesc::NS, !secure as u64);
        let xn_flags = bits_in_reg(PTDesc::UXN, 1) | bits_in_reg(PTDesc::PXN, 1);
        let valid_flags = bits_in_reg(PTDesc::VALID, 1);

        let va = VirtAddr::from(addr);
        let phys = PhysAddr::from(addr);

        self.set_pages(
            va,
            phys,
            PAGE_SIZE,
            rw_flags | memattr_flags | secure_flags | sh_flags | xn_flags | valid_flags,
        );

        true
    }

    fn unset_pages_for_rmi(&mut self, addr: usize) -> bool {
        if addr == 0 {
            warn!("map address is empty");
            return false;
        }

        self.unset_page(addr);
        true
    }
}

impl<'a> fmt::Debug for Inner<'a> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct(stringify!(Self)).finish()
    }
}

impl<'a> Drop for Inner<'a> {
    fn drop(&mut self) {
        info!("drop PageTable");
        self.root_pgtlb.drop();
    }
}