diff options
Diffstat (limited to 'mem')
-rw-r--r-- | mem/memory.h | 72 | ||||
-rw-r--r-- | mem/phys.c | 34 | ||||
-rw-r--r-- | mem/slab.c | 18 | ||||
-rw-r--r-- | mem/zone.c | 93 | ||||
-rw-r--r-- | mem/zone.h | 33 |
5 files changed, 195 insertions, 55 deletions
diff --git a/mem/memory.h b/mem/memory.h index 41323ed..251c6f5 100644 --- a/mem/memory.h +++ b/mem/memory.h @@ -1,36 +1,61 @@ #ifndef JOVE_MEM_H #define JOVE_MEM_H 1 -#define PAGESIZE 4096ULL +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (PAGE_SIZE - 1) + #define KiB 1024ULL #define MiB (KiB * KiB) #define GiB (MiB * KiB) #define TiB (GiB * KiB) -#include <stddef.h> -#include <stdint.h> -#include <stdbool.h> -typedef uintptr_t physptr_t; +#include "lib/spinlock.h" +#include "sys/types.h" + +typedef struct page_directory +{ + spinlock_t lock; + size_t id; + size_t ref; + physptr_t phys; + void *virt; +} page_directory_t; #include "slab.h" -/*Linear*/ -void mem_paging_setup(void); +extern page_directory_t *current_page_directory; -physptr_t mem_linear_tophys(uintptr_t virt); +/**Setup the kernel structures responsible for handling physical memory translation.*/ +void mem_paging_setup(void); -/**Check if pointer is within valid memory. +/**Check if a given pointer is valid. * @param ptr pointer to check. - * @return if the pointer is invalid.*/ + * @return if the pointer is valid.*/ bool mem_check_ptr(const void *ptr); -/**Make sure the range indicated is available in memory. - * If necessary, allocate new pages using the passed flags - * @param from start of the range. - * @param to end of the range. - * @param rw flag to mark page is writeable. - * @param user flag to mark page as user accessable*/ -void mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user); +/** Return the physical memory mapping for the given address. + * @param pd page directory to get mapping in. + * @param addr address to get mapping for. + * @return HAL compliant page mapping.*/ +page_mapping_t mem_get_mapping_as(page_directory_t *pd, uintptr_t addr); + +/** Return the physical memory mapping for the given address. + * @param addr address to get mapping for. + * @return HAL compliant page mapping.*/ +page_mapping_t mem_get_mapping(uintptr_t addr); + +/** Map a page mapping to a given virtual address. + * @param pd pointer to the page directory to edit. + * @param mapping mapping to apply. + * @param virt virtual address to map to. */ +void mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt); + +/** Map a page mapping to a given virtual address. + * @param pd pointer to the page directory to edit. + * @param mapping mapping to apply. + * @param virt virtual address to map to. */ +void mem_set_mapping(page_mapping_t mapping, uintptr_t virt); /** Make sure the range indicated is available in memory for specified pd * If necessary, allocate new pages using the passed flags @@ -39,7 +64,15 @@ void mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user); * @param to end of the range * @param rw flag to mark page is writeable * @param user flag to mark page as user accessable*/ -void mem_ensure_range_for(void *pd, uintptr_t from, uintptr_t to, bool rw, bool user); +void mem_ensure_range_as(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg); + +/**Make sure the range indicated is available in memory + * If necessary, allocate new pages using the passed flags + * @param from start of the range. + * @param to end of the range. + * @param rw flag to mark page is writeable. + * @param user flag to mark page as user accessable*/ +void mem_ensure_range(uintptr_t from, uintptr_t to, page_flags_t flg); void mem_slab_setup(void); void mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize); @@ -53,7 +86,8 @@ void mem_free(void *ptr); /*Physical*/ physptr_t mem_phys_alloc(size_t pages); -void mem_phys_reserve(physptr_t start, size_t len); +void mem_phys_reserve(physptr_t start, physptr_t end); +void mem_phys_release(physptr_t start, physptr_t end); void mem_setup(void); @@ -1,9 +1,39 @@ #include "memory.h" #include "zone.h" +#include "lib/jove.h" + +void +mem_phys_reserve(physptr_t start, physptr_t end) +{ + size_t zone = mem_zone_for(start); + size_t limit = mem_zone_bound_upper(zone); + + if(end > limit) { + mem_phys_reserve(limit, end); + end = limit; + } + mem_zone_resv(MEM_ZONE_STANDARD, start, end); +} + +void +mem_phys_release(physptr_t start, physptr_t end) +{ + size_t zone = mem_zone_for(start); + size_t limit = mem_zone_bound_upper(zone); + + if(end > limit) { + mem_phys_release(limit, end); + end = limit; + } + mem_zone_free(MEM_ZONE_STANDARD, start, end); +} physptr_t mem_phys_alloc(size_t pages) { - physptr_t ptr = mem_zone_alloc(MEM_ZONE_STANDARD, pages); - return ptr; + if(mem_zone_pages_free(MEM_ZONE_HIGHER) >= pages) + return mem_zone_alloc(MEM_ZONE_HIGHER, pages); + if(mem_zone_pages_free(MEM_ZONE_STANDARD) >= pages) + return mem_zone_alloc(MEM_ZONE_STANDARD, pages); + kpanic("Kernel ran out of physical memory!\n"); } @@ -17,7 +17,15 @@ s_next_free(size_t width) { uintptr_t ret = s_addr_next_free; s_addr_next_free += width; - mem_ensure_range(ret, s_addr_next_free, true, false); + mem_ensure_range( + ret, + s_addr_next_free, + (page_flags_t) { + .present = true, + .writeable = true, + .useraccess = false, + .executable = false + }); return ret; } @@ -36,7 +44,7 @@ s_get_free_listw(size_t slabw, size_t objw) static struct SlabDescriptor *s_slab_new(struct SlabCache *cache, struct SlabDescriptor *last) { - size_t slab_width = (cache->slab_pages * PAGESIZE); + size_t slab_width = (cache->slab_pages << PAGE_SHIFT); uintptr_t descr_base = s_next_free(slab_width); struct SlabDescriptor *descr = (struct SlabDescriptor*)descr_base; @@ -122,7 +130,7 @@ mem_slab_free(struct SlabCache *cache, void *ptr) { uintptr_t base = (uintptr_t)slab->obj_base; uintptr_t limit = ((uintptr_t)slab->slab_base) - + (cache->slab_pages * PAGESIZE); + + (cache->slab_pages << PAGE_SHIFT); if(addr > limit || addr < base) continue; if((addr - base) % cache->obj_size != 0) { klogf("Tried to free offset pointer %#016X in slab %s\n", @@ -142,7 +150,7 @@ mem_slab_free(struct SlabCache *cache, void *ptr) { uintptr_t base = (uintptr_t)slab->obj_base; uintptr_t limit = ((uintptr_t)slab->slab_base) - + (cache->slab_pages * PAGESIZE); + + (cache->slab_pages << PAGE_SHIFT); if(addr > limit || addr < base) continue; if((addr - base) % cache->obj_size != 0) { klogf("Tried to free offset pointer %#016X in slab %s\n", @@ -189,7 +197,7 @@ mem_slab_setup(void) { s_addr_next_free = (uintptr_t)&_kernel_end; s_addr_next_free = ((s_addr_next_free >> 12) + 1) << 12; - s_get_free_listw(PAGESIZE - sizeof(struct SlabDescriptor), 32); + s_get_free_listw(PAGE_SIZE - sizeof(struct SlabDescriptor), 32); for(int i = 0; i < GENERIC_CACHEC; i++) { @@ -2,9 +2,11 @@ #include "memory.h" #include "boot/boot.h" #include "lib/string.h" +#include "lib/jove.h" +#include "sys/errno.h" #include "io/log.h" -#define MEM_ZONE_STANDARD_PAGES (MEM_ZONE_STANDARD_LIMIT >> 12) +#define MEM_ZONE_STANDARD_PAGES (MEM_ZONE_STANDARD_LIMIT >> PAGE_SHIFT) static uintmax_t s_zone_standard_freemap_blocks_flat[BUDDY_BLOCKS_FOR(MEM_ZONE_STANDARD_PAGES)]; @@ -17,8 +19,6 @@ static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] = .name = "Standard", .base = MEM_ZONE_STANDARD_BASE, .limit = MEM_ZONE_STANDARD_LIMIT, - .npages_total = MEM_ZONE_STANDARD_PAGES, - .npages_free = 0, .freemap = { .orders = MEM_BUDDY_ORDERS, .bits = MEM_ZONE_STANDARD_PAGES, @@ -36,40 +36,89 @@ static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] = } }; +int +mem_zone_for(uintptr_t addr) +{ + addr &= ~PAGE_MASK; + for(size_t zonei = 0; zonei < MEM_ZONE_COUNT; zonei++) + { + struct PhysicalMemoryZone *pmz = &s_zones[zonei]; + if(addr >= pmz->base && addr < pmz->limit) return zonei; + } + return -ENOTFOUND; +} + +uintptr_t +mem_zone_bound_lower(size_t zone) +{ + if(zone >= MEM_ZONE_COUNT) return 0; + return s_zones[zone].base; +} + +uintptr_t +mem_zone_bound_upper(size_t zone) +{ + if(zone >= MEM_ZONE_COUNT) return 0; + return s_zones[zone].limit; +} + +size_t +mem_zone_pages_free(size_t zone) +{ + if(zone >= MEM_ZONE_COUNT) return 0; + return s_zones[zone].freemap.free; +} + void +_zone_resv(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit) +{ + buddy_mark_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT); +} + +void +_zone_free(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit) +{ + buddy_free_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT); +} + +int mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit) { - size_t base_off = base % PAGESIZE; - size_t limit_off = limit % PAGESIZE; - - if(base_off > 0) base += (PAGESIZE - base_off); - limit -= limit_off; + if(zone >= MEM_ZONE_COUNT) return -EINVAL; - buddy_mark_range(&s_zones[zone].freemap, base >> 12, limit >> 12); + size_t base_off = base % PAGE_SIZE; + + size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0); + size_t limit_real = limit & ~PAGE_MASK; + _zone_resv(&s_zones[zone], base_real, limit_real); + return 0; } -void +int mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit) { - size_t base_off = base % PAGESIZE; - size_t limit_off = limit % PAGESIZE; - - if(base_off > 0) base += (PAGESIZE - base_off); - limit -= limit_off; - - size_t npages = (limit - base) >> 12; - s_zones[zone].npages_free += npages; - buddy_free_range(&s_zones[zone].freemap, base >> 12, limit >> 12); + if(zone >= MEM_ZONE_COUNT) return -EINVAL; + + size_t base_off = base % PAGE_SIZE; + + size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0); + size_t limit_real = limit & ~PAGE_MASK; + _zone_free(&s_zones[zone], base_real, limit_real); + return 0; } uintptr_t mem_zone_alloc(size_t zone, size_t pages) -{ +{ + if(zone >= MEM_ZONE_COUNT) return 0; + struct PhysicalMemoryZone *pmz = &s_zones[zone]; intmax_t pagei = buddy_alloc(&pmz->freemap, pages); - if(pagei < 0) return 0; + if(pagei < 0) { + return 0; + } - return (((uintmax_t)pagei) << 12) + pmz->base; + return (((uintmax_t)pagei) << PAGE_SHIFT) + pmz->base; } void @@ -22,23 +22,42 @@ struct PhysicalMemoryZone uintptr_t base; uintptr_t limit; - size_t npages_total; - size_t npages_free; - struct BuddyMap freemap; }; +/**Return the zone index for the given address + * @param addr address to look up + * @return zone index*/ +int mem_zone_for(uintptr_t addr); + +/**Return the lower bound for the given zone index. + * @param zone index into zones. + * @return lower bound.*/ +uintptr_t mem_zone_bound_lower(size_t zone); + +/**Return the upper bound for the given zone index. + * @param zone index into zones. + * @return upper bound.*/ +uintptr_t mem_zone_bound_upper(size_t zone); + +/**Return the number of pages free in the given zone. + * @param zone index into zones. + * @return number of free pages.*/ +size_t mem_zone_pages_free(size_t zone); + /** Using a given zone, reserve a range of physical addresses * @param zone identifier of zone to modify * @param base starting address to reserve - * @param limit ending address to reserve*/ -void mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit); + * @param limit ending address to reserve + * @return error code or 0 if success */ +int mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit); /** Using a given zone, free a range of physical addresses * @param zone identifier of zone to modify * @param base starting address to free - * @param limit ending address to free*/ -void mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit); + * @param limit ending address to free + * @return error code or 0 if success*/ +int mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit); /** Allocate a number of pages from the given zone * @param zone identifier of the zone to modify |