summaryrefslogtreecommitdiffstats
path: root/lib/libjove/arch/x86_64/pager/ensure.c
blob: 874db3503ebc7d5aa1cfa0ba545b8a798b46efad (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#include "arch/x86_64/pager.h"
#include "object-untyped.h"
#include "jove.h"
#include <stdbool.h>

KernelObjectPageMap __jove_pagemap;
static unsigned d_cache[3] = { -1, -1, -1 };
static uintptr_t vptr_cache = 0;
static KernelObjectUntyped work_untyped;

#define PMLI_SHL(d) (((4 - d) * 9) + 3)
#define PMLI_DL(v, d) ((v >> PMLI_SHL(d)) % 512)

static uint64_t
pager_write_path(uintptr_t vptr, uint8_t depth)
{
    uint64_t r = 0;
    if(depth >= 4) return -1;

    uint16_t *path = (uint16_t*)&r;
    for(uint8_t i = 0; i < depth + 1; i++) path[i] = PMLI_DL(vptr, i);
    return r;
}

/* Places new page in work_page. */
static JoveError
pager_alloc_page_untyped(void)
{
    uint8_t lastmemb = jove_objdir_lastmemb(&__jove_untyped_directory);
    if(jove_errno) {
        JoveError err = jove_errno;
        jove_errno = EJOVE_OK;
        return err;
    }
    
    KernelObjectUntyped last_untyped;
    _jove_alloc_untyped_inplace(&last_untyped, &__jove_untyped_directory, lastmemb);
    
    if(jove_untyped_size(&last_untyped) == 0x1000) {
        jove_objdir_move(&__jove_untyped_directory, lastmemb, &__rootdir, __jove_work_obj.membi);
        return EJOVE_OK;
    }

    work_untyped.typed = __jove_work_obj;
    return jove_untyped_split_inplace(&last_untyped, 0x1000, &work_untyped);
}

static JoveError
pager_ensure_at_depth(KernelObjectPageMap *map, uint8_t depth, uint16_t *path)
{
    int exists = jove_pagemap_exists(map, depth, path);
    if(exists) {
        return EJOVE_OK;
    }

    JoveError pagealloc_err = pager_alloc_page_untyped();
    if(pagealloc_err) {
        return pagealloc_err;
    }
    return jove_pagemap_map(map, depth, path, &work_untyped);
}

#define PAGER_CACHE_ENSURE_DEPTH(map, path_seg, d) \
    if(path_seg[d] != d_cache[d]) { \
        JoveError d_err = pager_ensure_at_depth(map, d, path_seg); \
        if(d_err) return d_err; \
        d_cache[d] = path_seg[d]; \
    }

JoveError
jove_pager_ensure_for(KernelObjectPageMap *map, uintptr_t vptr)
{
    vptr &= ~0xFFFULL;

    uint64_t path = pager_write_path(vptr, 3);
    if(path == -1) return EJOVE_BADARG;
    uint16_t *path_seg = (uint16_t*)&path;

    jove_kprintf("%p Alloc %p : %x:%x:%x:%x\n", map, vptr, path_seg[0], path_seg[1], path_seg[2], path_seg[3]);

    PAGER_CACHE_ENSURE_DEPTH(map, path_seg, 0);
    PAGER_CACHE_ENSURE_DEPTH(map, path_seg, 1);
    PAGER_CACHE_ENSURE_DEPTH(map, path_seg, 2);
    if(vptr != vptr_cache) {
        return pager_ensure_at_depth(map, 3, path_seg);
    }
    return EJOVE_OK;
}

#define PAGER_CACHE_EXISTS_DEPTH(map, path_seg, d) \
    if(path_seg[d] != d_cache[d]) { \
        if(!jove_pagemap_exists(map, d, path_seg)) return 0; \
        d_cache[d] = path_seg[d]; \
    }


int
jove_pager_exists_for(KernelObjectPageMap *map, uintptr_t vptr)
{
    vptr &= ~0xFFFULL;

    uint64_t path = pager_write_path(vptr, 3);
    if(path == -1) return EJOVE_BADARG;
    uint16_t *path_seg = (uint16_t*)&path;

    PAGER_CACHE_EXISTS_DEPTH(map, path_seg, 0);
    PAGER_CACHE_EXISTS_DEPTH(map, path_seg, 1);
    PAGER_CACHE_EXISTS_DEPTH(map, path_seg, 2);
    return jove_pagemap_exists(map, 3, path_seg);
}