summaryrefslogtreecommitdiffstats
path: root/mem/zone.c
blob: 489383a08857aa895fa79c67d3c7d9f32c593b89 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
#include "zone.h"
#include "memory.h"
#include "boot/boot.h"
#include "lib/string.h"
#include "io/log.h"

#define MEM_ZONE_STANDARD_PAGES  (MEM_ZONE_STANDARD_LIMIT >> 12)

static uintmax_t 
    s_zone_standard_freemap_blocks_flat[BUDDY_BLOCKS_FOR(MEM_ZONE_STANDARD_PAGES)];
static uintmax_t*
    s_zone_standard_freemap_blocks[MEM_BUDDY_ORDERS];

static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] = 
{
    {
        .name = "Standard",
        .base = MEM_ZONE_STANDARD_BASE,
        .limit = MEM_ZONE_STANDARD_LIMIT,
        .npages_total = MEM_ZONE_STANDARD_PAGES,
        .npages_free = 0,
        .freemap = { 
            .orders = MEM_BUDDY_ORDERS,
            .bits = MEM_ZONE_STANDARD_PAGES,
            .free = 0,
            .blocks = s_zone_standard_freemap_blocks
        }
    },
    {
        .name = "Higher",
        .base = MEM_ZONE_HIGHER_BASE,
        .limit = -1,
        .freemap = {
            .orders = MEM_BUDDY_ORDERS
        }
    }
};

void
mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit)
{
    size_t base_off = base % PAGESIZE;
    size_t limit_off = limit % PAGESIZE;
    
    if(base_off > 0) base += (PAGESIZE - base_off);
    limit -= limit_off;

    buddy_mark_range(&s_zones[zone].freemap, base >> 12, limit >> 12);
}

void
mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit)
{
    size_t base_off = base % PAGESIZE;
    size_t limit_off = limit % PAGESIZE;
    
    if(base_off > 0) base += (PAGESIZE - base_off);
    limit -= limit_off;

    size_t npages = (limit - base) >> 12;
    s_zones[zone].npages_free += npages;
    buddy_free_range(&s_zones[zone].freemap, base >> 12, limit >> 12);
}

uintptr_t
mem_zone_alloc(size_t zone, size_t pages)
{   
    struct PhysicalMemoryZone *pmz = &s_zones[zone];
    intmax_t pagei = buddy_alloc(&pmz->freemap, pages);
    if(pagei < 0) return 0;

    return (((uintmax_t)pagei) << 12) + pmz->base;
}

void
mem_zone_setup_standard(void)
{
    struct PhysicalMemoryZone *standard_zone = &s_zones[MEM_ZONE_STANDARD];
    uintmax_t *map_block_layer_base = s_zone_standard_freemap_blocks_flat;
    for(size_t i = 0; i < MEM_BUDDY_ORDERS; i++) {
        size_t layer_entries = (standard_zone->freemap.bits / BUDDY_BLOCK_BITS) >> i;
        standard_zone->freemap.blocks[i] = map_block_layer_base;
        memset(map_block_layer_base, 0xFF, layer_entries * sizeof(uintmax_t));
        map_block_layer_base = &map_block_layer_base[layer_entries];
    }

    for(int i = 0; i < boot_memorymap.count; i++) {
        struct MemoryMapEntry *entry = &boot_memorymap.entries[i];
        klogf("%2i\t%#016X -> %#016X (%i)\n",
                i, entry->base, entry->base + entry->length, entry->usable);
        if(entry->base > MEM_ZONE_STANDARD_LIMIT) continue;
        size_t limit = entry->base + entry->length;
        if(limit > MEM_ZONE_STANDARD_LIMIT) limit = MEM_ZONE_STANDARD_LIMIT;
        if(entry->usable)
            mem_zone_free(MEM_ZONE_STANDARD, entry->base, limit);
    }
}