summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Santmyer <jon@jonsantmyer.com>2024-03-19 13:03:52 -0400
committerJon Santmyer <jon@jonsantmyer.com>2024-03-19 13:03:52 -0400
commitf004c1ade8d617a82cea2fe249434cccb47a2358 (patch)
tree34571e76039cf2ee2fee93c3f1bdb1bc6d2de5f6
parentdd5d9e1d48396cbc226ff14fe557a55613c91fcb (diff)
downloadjove-kernel-master.tar.gz
jove-kernel-master.tar.bz2
jove-kernel-master.zip
rename abi to sys. better memory allocationHEADmaster
-rw-r--r--abi/permission.h8
-rw-r--r--abi/syscall.h48
-rw-r--r--arch/x86_64/elf.c10
-rw-r--r--arch/x86_64/paging.c234
-rw-r--r--arch/x86_64/paging.h20
-rw-r--r--arch/x86_64/syscall_setup.S3
-rw-r--r--arch/x86_64/tasking.c13
-rw-r--r--lib/spinlock.h27
-rw-r--r--main.c7
-rw-r--r--mem/memory.h72
-rw-r--r--mem/phys.c34
-rw-r--r--mem/slab.c18
-rw-r--r--mem/zone.c93
-rw-r--r--mem/zone.h33
-rw-r--r--sys/errno.h10
-rw-r--r--sys/permission.h11
-rw-r--r--sys/syscall.h69
-rw-r--r--sys/types.h32
-rw-r--r--usr/syscall.c86
-rw-r--r--usr/syscall.h2
-rw-r--r--usr/tasking.c (renamed from tsk/thread.c)0
-rw-r--r--usr/tasking.h (renamed from tsk/tasking.h)7
-rw-r--r--usr/umode.c10
23 files changed, 586 insertions, 261 deletions
diff --git a/abi/permission.h b/abi/permission.h
deleted file mode 100644
index 10bc6a3..0000000
--- a/abi/permission.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef JOVE_ABI_PERMISSION_H
-#define JOVE_ABI_PERMISSION_H 1
-
-#define PERM_MEM_PD 1 /* Permission to write to any PD. */
-#define PERM_MEM_MAP 2 /* Permission to map physical pages. */
-#define PERM_MEM_ALLOC 4 /* Permission to allocate and free physical memory.*/
-
-#endif
diff --git a/abi/syscall.h b/abi/syscall.h
deleted file mode 100644
index e336fe6..0000000
--- a/abi/syscall.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef JOVE_ABI_SYSCALL_H
-#define JOVE_ABI_SYSCALL_H 1
-
-#include <stdint.h>
-
-typedef struct syscall {
- int id;
-} syscall_t;
-
-struct syscall_log {
- syscall_t syscall;
- const char *message;
-};
-
-struct syscall_mem_takefree {
- syscall_t syscall;
- uintptr_t npages;
-};
-
-enum
-{
- SYSCALL_LOG = 0,
- SYSCALL_TID,
-
- SYSCALL_MEM_TAKEFREE,
-
- SYSCALL_COUNT
-};
-
-#define _SYSCALL(data) \
- intmax_t ax; \
- __asm__ volatile("movq %0, %%rdi\nsyscall": "=a"(ax): "r"(data): "memory"); \
- return ax
-
-int _syscall_log(const char *message) {
- struct syscall_log syscall_data = {
- .syscall = (syscall_t){ .id = SYSCALL_LOG },
- .message = message
- };
- _SYSCALL(&syscall_data);
-}
-
-intmax_t _syscall_tid(void) {
- syscall_t syscall_data = { SYSCALL_TID };
- _SYSCALL(&syscall_data);
-}
-
-#endif
diff --git a/arch/x86_64/elf.c b/arch/x86_64/elf.c
index 60b8f13..969cbf0 100644
--- a/arch/x86_64/elf.c
+++ b/arch/x86_64/elf.c
@@ -37,7 +37,15 @@ elf_load(const void *data, size_t len)
struct ELF_phdr *phdr = &phdrs[phdri];
void *pdata = (void*)phdr->p_vaddr;
- mem_ensure_range(phdr->p_vaddr, phdr->p_vaddr + phdr->p_memsz, true, true);
+ mem_ensure_range(
+ phdr->p_vaddr,
+ phdr->p_vaddr + phdr->p_memsz,
+ (page_flags_t) {
+ .present = true,
+ .writeable = true,
+ .useraccess = true,
+ .executable = true
+ });
if(phdr->p_type == PT_LOAD)
{
memcpy(pdata, (void*)((uintptr_t)data + phdr->p_offset), phdr->p_filesz);
diff --git a/arch/x86_64/paging.c b/arch/x86_64/paging.c
index 9e8a5ed..dc27ca2 100644
--- a/arch/x86_64/paging.c
+++ b/arch/x86_64/paging.c
@@ -1,9 +1,10 @@
#include "paging.h"
#include "interrupt.h"
-#include <stddef.h>
-#include "lib/jove.h"
#include "io/log.h"
+#include "lib/jove.h"
#include "lib/string.h"
+#include "lib/hashtable.h"
+#include "mem/memory.h"
#include "boot/boot.h"
extern void *_kernel_end;
@@ -12,9 +13,16 @@ PAGEALIGN static uint64_t s_kernel_initial_pml4[512];
PAGEALIGN static uint64_t s_kernel_initial_pml3[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml2[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml1[2][512];
-static struct PageDirectory s_kernel_initial_pd;
+static intmax_t s_next_pdid = 0;
+static page_directory_t s_kernel_initial_pd;
+
+page_directory_t *current_page_directory;
-struct PageDirectory *mem_current_pd;
+struct PageStateCache {
+ page_directory_t *pd;
+ size_t pmli[4];
+ pmle_t *pml[4];
+} s_state_cache;
physptr_t
mem_linear_tophys_koffset(uintptr_t virt)
@@ -29,17 +37,20 @@ mem_phys_tolinear(physptr_t phys)
}
static size_t
-s_paging_pmle(size_t l, uintptr_t addr)
+s_paging_pmli(size_t l, uintptr_t addr)
{
size_t shift = (12 + (9 * l));
return (addr & (0x1FFULL << shift)) >> shift;
}
-static union PageEntry*
-s_paging_fetch_table(union PageEntry *pt, size_t l, uintptr_t virt)
+static pmle_t*
+s_paging_fetch_table(pmle_t *pml, size_t l, uintptr_t virt)
{
- size_t pmle = s_paging_pmle(l, virt);
- union PageEntry entry = pt[pmle];
+ size_t pmli = s_paging_pmli(l, virt);
+ if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
+ return s_state_cache.pml[l];
+
+ pmle_t entry = pml[pmli];
bool entry_new = false;
if(!entry.p) {
entry_new = true;
@@ -47,122 +58,134 @@ s_paging_fetch_table(union PageEntry *pt, size_t l, uintptr_t virt)
entry.p = 1;
entry.rw = 1;
entry.us = 1;
- pt[pmle] = entry;
+ pml[pmli] = entry;
}
- union PageEntry *table = (union PageEntry*)(mem_phys_tolinear(entry.paddr << 12));
- if(entry_new) memset(table, 0, PAGESIZE);
+ pmle_t *table = (pmle_t*)(mem_phys_tolinear(entry.paddr << 12));
+ if(entry_new) memset(table, 0, PAGE_SIZE);
+
+ s_state_cache.pmli[l] = pmli;
+ s_state_cache.pml[l] = table;
return table;
}
-static union PageEntry*
-s_paging_get_table(union PageEntry *pt, size_t l, uintptr_t virt)
+static void
+s_paging_cache_tables(page_directory_t *pd, uintptr_t virt)
{
- if(pt == NULL) return NULL;
- size_t pmle = s_paging_pmle(l, virt);
- union PageEntry entry = pt[pmle];
- if(!entry.p) return NULL;
- return (union PageEntry*)(mem_phys_tolinear(entry.paddr << 12));
+ pmle_t *pml4 = (pmle_t*)pd->virt;
+ if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));
+
+ pmle_t *pml3 = s_paging_fetch_table(pml4, 3, virt);
+ pmle_t *pml2 = s_paging_fetch_table(pml3, 2, virt);
+ pmle_t *pml1 = s_paging_fetch_table(pml2, 1, virt);
}
-physptr_t
-mem_linear_tophys(uintptr_t virt)
+static pmle_t*
+s_paging_get_table(pmle_t *pt, size_t l, uintptr_t virt)
{
- struct PageDirectory *pd = mem_current_pd;
- union PageEntry *pml3 = s_paging_get_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml2 = s_paging_get_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml1 = s_paging_get_table(pd->pml4_vaddr, 3, virt);
- if(pml1 == NULL) return 0;
-
- size_t pml1i = s_paging_pmle(0, virt);
+ if(pt == NULL) return NULL;
+ size_t pmli = s_paging_pmli(l, virt);
+ if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
+ return s_state_cache.pml[l];
- if(!pml1[pml1i].p) return 0;
- return pml1[pml1i].paddr << 12;
+ pmle_t entry = pt[pmli];
+ if(!entry.p) return NULL;
+ return (pmle_t*)(mem_phys_tolinear(entry.paddr << 12));
}
-bool
-mem_check_ptr(const void *ptr)
+page_mapping_t
+mem_get_mapping_as(page_directory_t *pd, uintptr_t addr)
{
- if(ptr == NULL) return false;
- return mem_linear_tophys((uintptr_t)ptr) != 0;
-}
+ spinlock_acquire(pd->lock);
+ page_mapping_t mapping = { 0 };
+
+ pmle_t *pml4 = (pmle_t*)pd->virt;
+ if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));
-void
-mem_paging_map4k(struct PageDirectory *pd, physptr_t phys, uintptr_t virt, uint8_t flg)
-{
- union PageEntry *pml3 = s_paging_fetch_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml2 = s_paging_fetch_table(pml3, 2, virt);
- union PageEntry *pml1 = s_paging_fetch_table(pml2, 1, virt);
- size_t pml1e = s_paging_pmle(0, virt);
-
- pml1[pml1e] = (union PageEntry) {
- .p = (flg & 1) > 0,
- .rw = (flg & 2) > 0,
- .us = (flg & 4) > 0,
- .paddr = phys >> 12
+ pmle_t *pml3 = s_paging_get_table(pml4, 3, addr);
+ pmle_t *pml2 = s_paging_get_table(pml3, 2, addr);
+ pmle_t *pml1 = s_paging_get_table(pml2, 1, addr);
+ if(pml1 == NULL) goto release_return;
+
+ size_t pml1i = s_paging_pmli(0, addr);
+ pmle_t pml1e = pml1[pml1i];
+
+ mapping = (page_mapping_t) {
+ .phys = (pml1e.paddr << 12) & ~PAGE_MASK,
+ .pf = {
+ .present = pml1e.p,
+ .writeable = pml1e.rw,
+ .useraccess = pml1e.us,
+ .executable = !pml1e.xd
+ }
};
+release_return:
+ spinlock_release(pd->lock);
+ return mapping;
}
-union PageEntry
-mem_paging_fetch4k(struct PageDirectory *pd, uintptr_t virt)
-{
- union PageEntry *pml3 = s_paging_fetch_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml2 = s_paging_fetch_table(pml3, 2, virt);
- union PageEntry *pml1 = s_paging_fetch_table(pml2, 1, virt);
- return pml1[s_paging_pmle(0, virt)];
-}
+page_mapping_t mem_get_mapping(uintptr_t addr)
+{ return mem_get_mapping_as(current_page_directory, addr); }
-void
-mem_pd_ensure_4k(struct PageDirectory *pd, uintptr_t virt, uint8_t flg)
-{
- union PageEntry pml1e = mem_paging_fetch4k(pd, virt);
- if(!pml1e.p) {
- uintptr_t phys = mem_phys_alloc(1);
- mem_paging_map4k(pd, phys, virt, flg);
- }
-}
-void
-mem_pd_ensure_range(struct PageDirectory *pd, uintptr_t from, uintptr_t to, uint8_t flg)
+bool
+mem_check_ptr(const void *ptr)
{
- from &= ~0xFFF;
- for(; from < to; from += PAGESIZE)
- mem_pd_ensure_4k(pd, from, flg);
+ return mem_get_mapping((uintptr_t)ptr).pf.present != 0;
}
void
-mem_ensure_range_for(void *pd, uintptr_t from, uintptr_t to, bool rw, bool user)
+mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt)
{
- mem_pd_ensure_range((struct PageDirectory*)pd, from, to, 1 | (rw << 1) | (user << 2));
+ spinlock_acquire(pd->lock);
+ s_paging_cache_tables(pd, virt);
+ pmle_t *pml1 = s_state_cache.pml[0];
+ size_t pml1i = s_paging_pmli(0, virt);
+
+ pml1[pml1i] = (pmle_t) {
+ .p = mapping.pf.present,
+ .rw = mapping.pf.writeable,
+ .us = mapping.pf.useraccess,
+ .xd = !mapping.pf.executable,
+ .paddr = mapping.phys >> 12
+ };
+ spinlock_release(pd->lock);
}
+void mem_set_mapping(page_mapping_t mapping, uintptr_t virt)
+{ mem_set_mapping_as(current_page_directory, mapping, virt); }
+
void
-mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user)
+mem_ensure_range_as(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg)
{
- mem_ensure_range_for(mem_current_pd, from, to, rw, user);
-}
+ spinlock_acquire(pd->lock);
+ from &= ~(PAGE_SIZE - 1);
-void mem_pd_new(struct PageDirectory *pd)
-{
- physptr_t pml4p = mem_phys_alloc(1);
- union PageEntry *pml4 = (union PageEntry*)mem_phys_tolinear(pml4p);
- memset(pml4, 0, PAGESIZE);
- memcpy(&pml4[256], &pd->pml4_vaddr[256], PAGESIZE / 2);
-
- *pd = (struct PageDirectory){
- .pml4_vaddr = pml4,
- .pml4_paddr = pml4p,
- .references = 1
- };
-}
+ if(to < from) to = from;
+ size_t pages = (to - from) >> 12;
+ if(pages == 0) pages = 1;
+ for(size_t i = 0; i < pages; i++) {
+ uintptr_t waddr = from + (i << 12);
+ s_paging_cache_tables(pd, waddr);
+ pmle_t *pml1 = s_state_cache.pml[1];
+ size_t pml1i = s_paging_pmli(0, waddr);
-void mem_pd_clone(struct PageDirectory *pd, struct PageDirectory *parent)
-{
- mem_pd_new(pd);
- for(size_t i = 0; i < 256; i++) {
- //TODO: Impl pd cloning
+ if(!pml1[pml1i].p) {
+ physptr_t phys = mem_phys_alloc(1);
+ pml1[pml1i] = (pmle_t) {
+ .p = flg.present,
+ .rw = flg.writeable,
+ .us = flg.useraccess,
+ .xd = !flg.executable,
+ .paddr = phys >> 12
+ };
+ }
}
+ spinlock_release(pd->lock);
}
+void mem_ensure_range(uintptr_t from, uintptr_t to, page_flags_t flg)
+{ mem_ensure_range_as(current_page_directory, from, to, flg); }
+
struct Registers*
s_pagefault_handler(struct Registers *state)
{
@@ -189,16 +212,17 @@ s_pagefault_handler(struct Registers *state)
void
mem_paging_setup(void)
{
- memset(s_kernel_initial_pml4, 0, PAGESIZE);
- memset(s_kernel_initial_pml3, 0, 2 * PAGESIZE);
- memset(s_kernel_initial_pml2, 0, 2 * PAGESIZE);
- memset(s_kernel_initial_pml1, 0, PAGESIZE);
- s_kernel_initial_pd = (struct PageDirectory){
- .pml4_vaddr = (union PageEntry*)&s_kernel_initial_pml4,
- .pml4_paddr = mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml4),
- .references = 1
+ memset(s_kernel_initial_pml4, 0, PAGE_SIZE);
+ memset(s_kernel_initial_pml3, 0, 2 * PAGE_SIZE);
+ memset(s_kernel_initial_pml2, 0, 2 * PAGE_SIZE);
+ memset(s_kernel_initial_pml1, 0, PAGE_SIZE);
+ s_kernel_initial_pd = (page_directory_t){
+ .phys = mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml4),
+ .virt = (union PageEntry*)&s_kernel_initial_pml4,
+ .ref = 1,
+ .id = s_next_pdid++
};
- mem_current_pd = &s_kernel_initial_pd;
+ current_page_directory = &s_kernel_initial_pd;
/* Map first few GiBs */
s_kernel_initial_pml4[256] =
@@ -208,7 +232,7 @@ mem_paging_setup(void)
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml2[0])
| 3;
for(int i = 0; i < 512; i++) {
- s_kernel_initial_pml2[0][i] = (i * (PAGESIZE * 512)) | 0x80 | 3;
+ s_kernel_initial_pml2[0][i] = (i * (PAGE_SIZE * 512)) | 0x80 | 3;
}
size_t kernel_pml4e = (boot_kernel_virtual_base >> (39));
@@ -225,9 +249,9 @@ mem_paging_setup(void)
s_kernel_initial_pml2[1][kernel_pml2e] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml1[0]) | 3;
for(int i = 0; i < kernel_npages; i++) {
- s_kernel_initial_pml1[0][i] = (i * PAGESIZE) + boot_kernel_physical_address | 3;
+ s_kernel_initial_pml1[0][i] = (i * PAGE_SIZE) + boot_kernel_physical_address | 3;
}
int_set_handler(14, s_pagefault_handler);
- __asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.pml4_paddr));
+ __asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.phys));
}
diff --git a/arch/x86_64/paging.h b/arch/x86_64/paging.h
index 1e88a0b..28dfad2 100644
--- a/arch/x86_64/paging.h
+++ b/arch/x86_64/paging.h
@@ -2,9 +2,8 @@
#define JOVE_ARCH_x86_64_PAGING_H 1
#include <stdint.h>
-#include "mem/memory.h"
-union PageEntry
+typedef union PageMappingLevelEntry
{
struct {
uint8_t p : 1; /* Present */
@@ -24,21 +23,6 @@ union PageEntry
uint8_t xd : 1;
}__attribute__((packed));
uint64_t value;
-}__attribute__((packed));
-
-struct PageDirectory
-{
- union PageEntry *pml4_vaddr;
- physptr_t pml4_paddr;
- size_t references;
-};
-
-extern struct PageDirectory *mem_current_pd;
-
-void mem_pd_new(struct PageDirectory *pd);
-void mem_pd_clone(struct PageDirectory *pd, struct PageDirectory *parent);
-
-void mem_pd_ensure_4k(struct PageDirectory *pd, uintptr_t virt, uint8_t flg);
-void mem_pd_ensure_range(struct PageDirectory *pd, uintptr_t from, uintptr_t to, uint8_t flg);
+} __attribute__((packed)) pmle_t;
#endif
diff --git a/arch/x86_64/syscall_setup.S b/arch/x86_64/syscall_setup.S
index 972e345..4f5c6f0 100644
--- a/arch/x86_64/syscall_setup.S
+++ b/arch/x86_64/syscall_setup.S
@@ -6,12 +6,11 @@
syscall_entry:
swapgs
movq %rsp, %rax
- movq (_kernel_task_bp), %rsp
+ movq _kernel_task_bp, %rsp
pushq %rax
pushq %rbp
pushq %rcx
pushq %r11
- movq %rsp, %rbp
call syscall_handler
popq %r11
popq %rcx
diff --git a/arch/x86_64/tasking.c b/arch/x86_64/tasking.c
index fe6ecdb..9b29330 100644
--- a/arch/x86_64/tasking.c
+++ b/arch/x86_64/tasking.c
@@ -1,4 +1,4 @@
-#include "tsk/tasking.h"
+#include "usr/tasking.h"
#include "mem/memory.h"
#include "io/log.h"
#include "lib/hashtable.h"
@@ -9,7 +9,6 @@
struct TaskBody {
struct Task base;
- struct PageDirectory *pd;
struct Registers state;
};
@@ -44,7 +43,7 @@ tasking_setup(void)
.base.id = s_task_id_next++,
.base.kbp = ((uintptr_t)mem_slab_alloc(&s_kbp_cache)) + 0xFF0,
.base.perm = (size_t)-1,
- .pd = mem_current_pd
+ .base.pd = current_page_directory
};
hashtable_insert(&s_tasks, 0, ktask);
@@ -80,14 +79,8 @@ void
task_free(struct Task *task)
{
struct TaskBody *body = (struct TaskBody*)task;
- body->pd->references--;
+ task->pd->ref--;
task->kbp -= 0xFFF;
mem_slab_free(&s_kbp_cache, (void*)(task->kbp));
klogf("Need impl for task_free\n");
}
-
-void*
-task_get_pd(struct Task *task)
-{
- return ((struct TaskBody*)task)->pd;
-}
diff --git a/lib/spinlock.h b/lib/spinlock.h
new file mode 100644
index 0000000..75af28a
--- /dev/null
+++ b/lib/spinlock.h
@@ -0,0 +1,27 @@
+#ifndef JOVE_LIB_SPINLOCK_H
+#define JOVE_LIB_SPINLOCK_H 1
+
+#include <stdatomic.h>
+
+typedef struct Spinlock
+{
+ atomic_flag flg;
+
+ const char *locker_file;
+ const char *locker_func;
+ int locker_line;
+} spinlock_t;
+
+#define spinlock_acquire(lock) \
+ while(atomic_flag_test_and_set_explicit(&lock.flg, memory_order_acquire)) \
+ __builtin_ia32_pause(); \
+ lock.locker_file = __FILE__; \
+ lock.locker_func = __FUNCTION__; \
+ lock.locker_line = __LINE__
+
+#define spinlock_release(lock) \
+ atomic_flag_clear_explicit(&lock.flg, memory_order_release); \
+ lock.locker_file = lock.locker_func = NULL; \
+ lock.locker_line = -1
+
+#endif
diff --git a/main.c b/main.c
index 9341489..91125af 100644
--- a/main.c
+++ b/main.c
@@ -3,8 +3,8 @@
#include "mem/memory.h"
#include "mem/zone.h"
#include "boot/cmdline.h"
-#include "tsk/tasking.h"
#include "ird/initrd.h"
+#include "usr/tasking.h"
#include "usr/umode.h"
#include "lib/jove.h"
@@ -12,15 +12,14 @@ void
kernel_main(void)
{
serial_setup();
- //arch_tables_setup();
+ arch_tables_setup();
mem_setup();
cmdline_kernel_setup();
+ initrd_setup();
tasking_setup();
-
- initrd_setup();
umode_setup();
kpanic("Reached end of kernel main\n");
diff --git a/mem/memory.h b/mem/memory.h
index 41323ed..251c6f5 100644
--- a/mem/memory.h
+++ b/mem/memory.h
@@ -1,36 +1,61 @@
#ifndef JOVE_MEM_H
#define JOVE_MEM_H 1
-#define PAGESIZE 4096ULL
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (PAGE_SIZE - 1)
+
#define KiB 1024ULL
#define MiB (KiB * KiB)
#define GiB (MiB * KiB)
#define TiB (GiB * KiB)
-#include <stddef.h>
-#include <stdint.h>
-#include <stdbool.h>
-typedef uintptr_t physptr_t;
+#include "lib/spinlock.h"
+#include "sys/types.h"
+
+typedef struct page_directory
+{
+ spinlock_t lock;
+ size_t id;
+ size_t ref;
+ physptr_t phys;
+ void *virt;
+} page_directory_t;
#include "slab.h"
-/*Linear*/
-void mem_paging_setup(void);
+extern page_directory_t *current_page_directory;
-physptr_t mem_linear_tophys(uintptr_t virt);
+/**Setup the kernel structures responsible for handling physical memory translation.*/
+void mem_paging_setup(void);
-/**Check if pointer is within valid memory.
+/**Check if a given pointer is valid.
* @param ptr pointer to check.
- * @return if the pointer is invalid.*/
+ * @return if the pointer is valid.*/
bool mem_check_ptr(const void *ptr);
-/**Make sure the range indicated is available in memory.
- * If necessary, allocate new pages using the passed flags
- * @param from start of the range.
- * @param to end of the range.
- * @param rw flag to mark page is writeable.
- * @param user flag to mark page as user accessable*/
-void mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user);
+/** Return the physical memory mapping for the given address.
+ * @param pd page directory to get mapping in.
+ * @param addr address to get mapping for.
+ * @return HAL compliant page mapping.*/
+page_mapping_t mem_get_mapping_as(page_directory_t *pd, uintptr_t addr);
+
+/** Return the physical memory mapping for the given address.
+ * @param addr address to get mapping for.
+ * @return HAL compliant page mapping.*/
+page_mapping_t mem_get_mapping(uintptr_t addr);
+
+/** Map a page mapping to a given virtual address.
+ * @param pd pointer to the page directory to edit.
+ * @param mapping mapping to apply.
+ * @param virt virtual address to map to. */
+void mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt);
+
+/** Map a page mapping to a given virtual address.
+ * @param pd pointer to the page directory to edit.
+ * @param mapping mapping to apply.
+ * @param virt virtual address to map to. */
+void mem_set_mapping(page_mapping_t mapping, uintptr_t virt);
/** Make sure the range indicated is available in memory for specified pd
* If necessary, allocate new pages using the passed flags
@@ -39,7 +64,15 @@ void mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user);
* @param to end of the range
* @param rw flag to mark page is writeable
* @param user flag to mark page as user accessable*/
-void mem_ensure_range_for(void *pd, uintptr_t from, uintptr_t to, bool rw, bool user);
+void mem_ensure_range_as(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg);
+
+/**Make sure the range indicated is available in memory
+ * If necessary, allocate new pages using the passed flags
+ * @param from start of the range.
+ * @param to end of the range.
+ * @param rw flag to mark page is writeable.
+ * @param user flag to mark page as user accessable*/
+void mem_ensure_range(uintptr_t from, uintptr_t to, page_flags_t flg);
void mem_slab_setup(void);
void mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize);
@@ -53,7 +86,8 @@ void mem_free(void *ptr);
/*Physical*/
physptr_t mem_phys_alloc(size_t pages);
-void mem_phys_reserve(physptr_t start, size_t len);
+void mem_phys_reserve(physptr_t start, physptr_t end);
+void mem_phys_release(physptr_t start, physptr_t end);
void mem_setup(void);
diff --git a/mem/phys.c b/mem/phys.c
index 00f3531..bf56b77 100644
--- a/mem/phys.c
+++ b/mem/phys.c
@@ -1,9 +1,39 @@
#include "memory.h"
#include "zone.h"
+#include "lib/jove.h"
+
+void
+mem_phys_reserve(physptr_t start, physptr_t end)
+{
+ size_t zone = mem_zone_for(start);
+ size_t limit = mem_zone_bound_upper(zone);
+
+ if(end > limit) {
+ mem_phys_reserve(limit, end);
+ end = limit;
+ }
+ mem_zone_resv(MEM_ZONE_STANDARD, start, end);
+}
+
+void
+mem_phys_release(physptr_t start, physptr_t end)
+{
+ size_t zone = mem_zone_for(start);
+ size_t limit = mem_zone_bound_upper(zone);
+
+ if(end > limit) {
+ mem_phys_release(limit, end);
+ end = limit;
+ }
+ mem_zone_free(MEM_ZONE_STANDARD, start, end);
+}
physptr_t
mem_phys_alloc(size_t pages)
{
- physptr_t ptr = mem_zone_alloc(MEM_ZONE_STANDARD, pages);
- return ptr;
+ if(mem_zone_pages_free(MEM_ZONE_HIGHER) >= pages)
+ return mem_zone_alloc(MEM_ZONE_HIGHER, pages);
+ if(mem_zone_pages_free(MEM_ZONE_STANDARD) >= pages)
+ return mem_zone_alloc(MEM_ZONE_STANDARD, pages);
+ kpanic("Kernel ran out of physical memory!\n");
}
diff --git a/mem/slab.c b/mem/slab.c
index 75b8302..30bc23a 100644
--- a/mem/slab.c
+++ b/mem/slab.c
@@ -17,7 +17,15 @@ s_next_free(size_t width)
{
uintptr_t ret = s_addr_next_free;
s_addr_next_free += width;
- mem_ensure_range(ret, s_addr_next_free, true, false);
+ mem_ensure_range(
+ ret,
+ s_addr_next_free,
+ (page_flags_t) {
+ .present = true,
+ .writeable = true,
+ .useraccess = false,
+ .executable = false
+ });
return ret;
}
@@ -36,7 +44,7 @@ s_get_free_listw(size_t slabw, size_t objw)
static struct SlabDescriptor
*s_slab_new(struct SlabCache *cache, struct SlabDescriptor *last)
{
- size_t slab_width = (cache->slab_pages * PAGESIZE);
+ size_t slab_width = (cache->slab_pages << PAGE_SHIFT);
uintptr_t descr_base = s_next_free(slab_width);
struct SlabDescriptor *descr = (struct SlabDescriptor*)descr_base;
@@ -122,7 +130,7 @@ mem_slab_free(struct SlabCache *cache, void *ptr)
{
uintptr_t base = (uintptr_t)slab->obj_base;
uintptr_t limit = ((uintptr_t)slab->slab_base)
- + (cache->slab_pages * PAGESIZE);
+ + (cache->slab_pages << PAGE_SHIFT);
if(addr > limit || addr < base) continue;
if((addr - base) % cache->obj_size != 0) {
klogf("Tried to free offset pointer %#016X in slab %s\n",
@@ -142,7 +150,7 @@ mem_slab_free(struct SlabCache *cache, void *ptr)
{
uintptr_t base = (uintptr_t)slab->obj_base;
uintptr_t limit = ((uintptr_t)slab->slab_base)
- + (cache->slab_pages * PAGESIZE);
+ + (cache->slab_pages << PAGE_SHIFT);
if(addr > limit || addr < base) continue;
if((addr - base) % cache->obj_size != 0) {
klogf("Tried to free offset pointer %#016X in slab %s\n",
@@ -189,7 +197,7 @@ mem_slab_setup(void)
{
s_addr_next_free = (uintptr_t)&_kernel_end;
s_addr_next_free = ((s_addr_next_free >> 12) + 1) << 12;
- s_get_free_listw(PAGESIZE - sizeof(struct SlabDescriptor), 32);
+ s_get_free_listw(PAGE_SIZE - sizeof(struct SlabDescriptor), 32);
for(int i = 0; i < GENERIC_CACHEC; i++)
{
diff --git a/mem/zone.c b/mem/zone.c
index 489383a..42a056e 100644
--- a/mem/zone.c
+++ b/mem/zone.c
@@ -2,9 +2,11 @@
#include "memory.h"
#include "boot/boot.h"
#include "lib/string.h"
+#include "lib/jove.h"
+#include "sys/errno.h"
#include "io/log.h"
-#define MEM_ZONE_STANDARD_PAGES (MEM_ZONE_STANDARD_LIMIT >> 12)
+#define MEM_ZONE_STANDARD_PAGES (MEM_ZONE_STANDARD_LIMIT >> PAGE_SHIFT)
static uintmax_t
s_zone_standard_freemap_blocks_flat[BUDDY_BLOCKS_FOR(MEM_ZONE_STANDARD_PAGES)];
@@ -17,8 +19,6 @@ static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] =
.name = "Standard",
.base = MEM_ZONE_STANDARD_BASE,
.limit = MEM_ZONE_STANDARD_LIMIT,
- .npages_total = MEM_ZONE_STANDARD_PAGES,
- .npages_free = 0,
.freemap = {
.orders = MEM_BUDDY_ORDERS,
.bits = MEM_ZONE_STANDARD_PAGES,
@@ -36,40 +36,89 @@ static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] =
}
};
+int
+mem_zone_for(uintptr_t addr)
+{
+ addr &= ~PAGE_MASK;
+ for(size_t zonei = 0; zonei < MEM_ZONE_COUNT; zonei++)
+ {
+ struct PhysicalMemoryZone *pmz = &s_zones[zonei];
+ if(addr >= pmz->base && addr < pmz->limit) return zonei;
+ }
+ return -ENOTFOUND;
+}
+
+uintptr_t
+mem_zone_bound_lower(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].base;
+}
+
+uintptr_t
+mem_zone_bound_upper(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].limit;
+}
+
+size_t
+mem_zone_pages_free(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].freemap.free;
+}
+
void
+_zone_resv(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
+{
+ buddy_mark_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
+}
+
+void
+_zone_free(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
+{
+ buddy_free_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
+}
+
+int
mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit)
{
- size_t base_off = base % PAGESIZE;
- size_t limit_off = limit % PAGESIZE;
-
- if(base_off > 0) base += (PAGESIZE - base_off);
- limit -= limit_off;
+ if(zone >= MEM_ZONE_COUNT) return -EINVAL;
- buddy_mark_range(&s_zones[zone].freemap, base >> 12, limit >> 12);
+ size_t base_off = base % PAGE_SIZE;
+
+ size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
+ size_t limit_real = limit & ~PAGE_MASK;
+ _zone_resv(&s_zones[zone], base_real, limit_real);
+ return 0;
}
-void
+int
mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit)
{
- size_t base_off = base % PAGESIZE;
- size_t limit_off = limit % PAGESIZE;
-
- if(base_off > 0) base += (PAGESIZE - base_off);
- limit -= limit_off;
-
- size_t npages = (limit - base) >> 12;
- s_zones[zone].npages_free += npages;
- buddy_free_range(&s_zones[zone].freemap, base >> 12, limit >> 12);
+ if(zone >= MEM_ZONE_COUNT) return -EINVAL;
+
+ size_t base_off = base % PAGE_SIZE;
+
+ size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
+ size_t limit_real = limit & ~PAGE_MASK;
+ _zone_free(&s_zones[zone], base_real, limit_real);
+ return 0;
}
uintptr_t
mem_zone_alloc(size_t zone, size_t pages)
-{
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+
struct PhysicalMemoryZone *pmz = &s_zones[zone];
intmax_t pagei = buddy_alloc(&pmz->freemap, pages);
- if(pagei < 0) return 0;
+ if(pagei < 0) {
+ return 0;
+ }
- return (((uintmax_t)pagei) << 12) + pmz->base;
+ return (((uintmax_t)pagei) << PAGE_SHIFT) + pmz->base;
}
void
diff --git a/mem/zone.h b/mem/zone.h
index 7e863bf..c0b0f52 100644
--- a/mem/zone.h
+++ b/mem/zone.h
@@ -22,23 +22,42 @@ struct PhysicalMemoryZone
uintptr_t base;
uintptr_t limit;
- size_t npages_total;
- size_t npages_free;
-
struct BuddyMap freemap;
};
+/**Return the zone index for the given address
+ * @param addr address to look up
+ * @return zone index*/
+int mem_zone_for(uintptr_t addr);
+
+/**Return the lower bound for the given zone index.
+ * @param zone index into zones.
+ * @return lower bound.*/
+uintptr_t mem_zone_bound_lower(size_t zone);
+
+/**Return the upper bound for the given zone index.
+ * @param zone index into zones.
+ * @return upper bound.*/
+uintptr_t mem_zone_bound_upper(size_t zone);
+
+/**Return the number of pages free in the given zone.
+ * @param zone index into zones.
+ * @return number of free pages.*/
+size_t mem_zone_pages_free(size_t zone);
+
/** Using a given zone, reserve a range of physical addresses
* @param zone identifier of zone to modify
* @param base starting address to reserve
- * @param limit ending address to reserve*/
-void mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit);
+ * @param limit ending address to reserve
+ * @return error code or 0 if success */
+int mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit);
/** Using a given zone, free a range of physical addresses
* @param zone identifier of zone to modify
* @param base starting address to free
- * @param limit ending address to free*/
-void mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit);
+ * @param limit ending address to free
+ * @return error code or 0 if success*/
+int mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit);
/** Allocate a number of pages from the given zone
* @param zone identifier of the zone to modify
diff --git a/sys/errno.h b/sys/errno.h
new file mode 100644
index 0000000..8d0c980
--- /dev/null
+++ b/sys/errno.h
@@ -0,0 +1,10 @@
+#ifndef _SYS_ERRNO_H
+#define _SYS_ERRNO_H 1
+
+#define ENOPERM 1
+#define EFAULT 2
+#define EINVAL 3
+#define ENOSYS 4
+#define ENOTFOUND 5
+
+#endif
diff --git a/sys/permission.h b/sys/permission.h
new file mode 100644
index 0000000..08bb765
--- /dev/null
+++ b/sys/permission.h
@@ -0,0 +1,11 @@
+#ifndef _SYS_PERMISSION_H
+#define _SYS_PERMISSION_H 1
+
+#define PERM_MEM_PHYS_RESV 1 /* Reserve physical memory. */
+#define PERM_MEM_PHYS_FREE 2 /* Free physical memory. */
+#define PERM_MEM_PHYS_ALLOC 4 /* Allocate physical memory. */
+
+#define PERM_MEM_VIRT_PD 8 /* Work on any PD. */
+#define PERM_MEM_VIRT_MAP 0x10 /* Map physical memory to virtual memory. */
+
+#endif
diff --git a/sys/syscall.h b/sys/syscall.h
new file mode 100644
index 0000000..d8b64bb
--- /dev/null
+++ b/sys/syscall.h
@@ -0,0 +1,69 @@
+#ifndef _SYS_SYSCALL_H
+#define _SYS_SYSCALL_H 1
+
+#include <stdint.h>
+#include <stddef.h>
+#include "types.h"
+
+typedef struct syscall {
+ int id;
+} syscall_t;
+
+struct syscall_log {
+ syscall_t syscall;
+ const char *message;
+};
+
+struct syscall_mem_phys_range_op {
+ syscall_t syscall;
+ uintptr_t base;
+ uintptr_t limit;
+};
+
+struct syscall_mem_phys_alloc {
+ syscall_t syscall;
+ size_t npages;
+ uintptr_t *result;
+};
+
+struct syscall_mem_virt_mapping {
+ syscall_t syscall;
+ linear_address_t addr;
+ page_mapping_t *result;
+};
+
+struct syscall_mem_virt_map {
+ syscall_t syscall;
+ linear_address_t addr;
+ page_mapping_t map;
+};
+
+struct syscall_mem_virt_alloc {
+ syscall_t syscall;
+ linear_address_t from;
+ uintptr_t to;
+ page_flags_t flg;
+};
+
+enum
+{
+ SYSCALL_LOG = 0,
+ SYSCALL_TID,
+
+ SYSCALL_MEM_PHYS_RESV,
+ SYSCALL_MEM_PHYS_FREE,
+ SYSCALL_MEM_PHYS_ALLOC,
+
+ SYSCALL_MEM_VIRT_MAPPING,
+ SYSCALL_MEM_VIRT_MAP,
+ SYSCALL_MEM_VIRT_ALLOC,
+
+ SYSCALL_COUNT
+};
+
+#define _SYSCALL(data) \
+ intmax_t ax; \
+ __asm__ volatile("movq %0, %%rdi\nsyscall": "=a"(ax): "r"(data): "memory"); \
+ return ax
+
+#endif
diff --git a/sys/types.h b/sys/types.h
new file mode 100644
index 0000000..0a519c5
--- /dev/null
+++ b/sys/types.h
@@ -0,0 +1,32 @@
+#ifndef _SYS_TYPES_H
+#define _SYS_TYPES_H 1
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+typedef intmax_t tid_t;
+
+typedef struct {
+ intmax_t tid;
+ uintmax_t addr;
+} linear_address_t;
+
+typedef uintptr_t physptr_t;
+
+typedef struct page_flags
+{
+ bool present;
+ bool writeable;
+ bool useraccess;
+ bool executable;
+} page_flags_t;
+
+typedef struct page_mapping
+{
+ physptr_t phys;
+ page_flags_t pf;
+} page_mapping_t;
+
+
+#endif
diff --git a/usr/syscall.c b/usr/syscall.c
index 8680845..fbc5fe7 100644
--- a/usr/syscall.c
+++ b/usr/syscall.c
@@ -1,11 +1,28 @@
#include "syscall.h"
-#include "tsk/tasking.h"
+#include "sys/errno.h"
+#include "sys/permission.h"
+#include "usr/tasking.h"
#include "mem/memory.h"
#include "io/log.h"
+#define ENSURE_ADDR(ptr) \
+ if(!mem_check_ptr(ptr)) { klogf("User passed bad address %#016X\n", ptr); return -EFAULT; }
+
+#define ENSURE_PERM(p) \
+ if(!(task_current->perm & p)) return -ENOPERM
+
+#define PD_FOR_LINEAR_ADDRESS(addr) current_page_directory; \
+ if(addr.tid == -1) addr.tid = task_current->id; \
+ if(addr.tid != task_current->id) { \
+ ENSURE_PERM(PERM_MEM_VIRT_PD); \
+ struct Task *task = task_get(addr.tid); \
+ if(task == NULL) return -EFAULT; \
+ pd = task->pd; \
+ }
+
int _syscall_handler_log(struct syscall_log *req)
{
- if(!mem_check_ptr(req->message)) return -1;
+ ENSURE_ADDR(req->message);
klogf("%s", req->message);
return 0;
}
@@ -15,14 +32,73 @@ intmax_t _syscall_handler_tid(syscall_t *req)
return task_current->id;
}
+int _syscall_handler_mem_phys_resv(struct syscall_mem_phys_range_op *req)
+{
+ ENSURE_PERM(PERM_MEM_PHYS_RESV);
+ mem_phys_reserve(req->base, req->limit);
+ return 0;
+}
+
+int _syscall_handler_mem_phys_free(struct syscall_mem_phys_range_op *req)
+{
+ ENSURE_PERM(PERM_MEM_PHYS_FREE);
+ mem_phys_release(req->base, req->limit);
+ return 0;
+}
+
+int _syscall_handler_mem_phys_alloc(struct syscall_mem_phys_alloc *req)
+{
+ ENSURE_ADDR(req->result);
+ ENSURE_PERM(PERM_MEM_PHYS_ALLOC);
+ *req->result = mem_phys_alloc(req->npages);
+ return 0;
+}
+
+int _syscall_handler_mem_virt_mapping(struct syscall_mem_virt_mapping *req)
+{
+ ENSURE_ADDR(req->result);
+ ENSURE_PERM(PERM_MEM_VIRT_MAP);
+ page_directory_t *pd = PD_FOR_LINEAR_ADDRESS(req->addr);
+ *req->result = mem_get_mapping_as(pd, req->addr.addr);
+ return 0;
+}
+
+int _syscall_handler_mem_virt_map(struct syscall_mem_virt_map *req)
+{
+ ENSURE_PERM(PERM_MEM_VIRT_MAP);
+ page_directory_t *pd = PD_FOR_LINEAR_ADDRESS(req->addr);
+ mem_set_mapping_as(pd, req->map, req->addr.addr);
+ return 0;
+}
+
+int _syscall_handler_mem_virt_alloc(struct syscall_mem_virt_alloc *req)
+{
+ ENSURE_PERM(PERM_MEM_VIRT_MAP);
+ ENSURE_PERM(PERM_MEM_PHYS_ALLOC);
+ page_directory_t *pd = PD_FOR_LINEAR_ADDRESS(req->from);
+ mem_ensure_range_as(pd, req->from.addr, req->to, req->flg);
+ return 0;
+}
+
void *_syscall_handlers[SYSCALL_COUNT] = {
- _syscall_handler_log
+ _syscall_handler_log,
+ _syscall_handler_tid,
+
+ _syscall_handler_mem_phys_resv,
+ _syscall_handler_mem_phys_free,
+ _syscall_handler_mem_phys_alloc,
+
+ _syscall_handler_mem_virt_mapping,
+ _syscall_handler_mem_virt_map,
+ _syscall_handler_mem_virt_alloc,
};
int
syscall_handler(syscall_t *req)
{
- if(!mem_check_ptr(req)) return -1;
- if(req->id >= SYSCALL_COUNT) return -1;
+ ENSURE_ADDR(req);
+ if(req->id >= SYSCALL_COUNT) return -ENOSYS;
+
+ ENSURE_ADDR(_syscall_handlers[req->id]);
return ((syscall_handler_t)(_syscall_handlers[req->id]))(req);
}
diff --git a/usr/syscall.h b/usr/syscall.h
index fe7843c..5cc82b8 100644
--- a/usr/syscall.h
+++ b/usr/syscall.h
@@ -1,7 +1,7 @@
#ifndef JOVE_USER_SYSCALL_H
#define JOVE_USER_SYSCALL_H 1
-#include "abi/syscall.h"
+#include "sys/syscall.h"
typedef int (*syscall_handler_t)(syscall_t*);
diff --git a/tsk/thread.c b/usr/tasking.c
index cb4df25..cb4df25 100644
--- a/tsk/thread.c
+++ b/usr/tasking.c
diff --git a/tsk/tasking.h b/usr/tasking.h
index b322682..4b11999 100644
--- a/tsk/tasking.h
+++ b/usr/tasking.h
@@ -3,8 +3,8 @@
#include <stddef.h>
#include <stdint.h>
-
-typedef size_t tid_t;
+#include "sys/types.h"
+#include "mem/memory.h"
struct Task
{
@@ -12,6 +12,8 @@ struct Task
tid_t id;
uintptr_t kbp;
size_t perm;
+
+ page_directory_t *pd;
};
extern struct Task *task_current;
@@ -21,7 +23,6 @@ void tasking_setup(void);
struct Task *task_new(struct Task *parent);
struct Task *task_get(tid_t id);
-void *task_get_pd(struct Task *task);
void task_perm_release(struct Task *task, size_t mask);
#endif
diff --git a/usr/umode.c b/usr/umode.c
index 4ef5306..1105d9e 100644
--- a/usr/umode.c
+++ b/usr/umode.c
@@ -24,7 +24,15 @@ umode_setup(void)
kpanic("Init file %s is incorrectly formatted (want ELF64)\n", init_path);
void *user_stack = (void*)(0x00007FFFFFFFFFFF);
- mem_ensure_range((uintptr_t)user_stack & ~0xFFF, (uintptr_t)user_stack, true, true);
+ mem_ensure_range(
+ (uintptr_t)user_stack & ~0xFFF,
+ (uintptr_t)user_stack,
+ (page_flags_t) {
+ .present = true,
+ .writeable = true,
+ .useraccess = true,
+ .executable = false
+ });
klogf("User entry point %#016X\n", entry_point);
umode_enter(entry_point, user_stack);