summaryrefslogtreecommitdiffstats
path: root/malloc.h
blob: 8281792ba08c2028289213631565713ed877da7b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
/*
 * Copyright (c) Jon Santmyer.
 * This source file is released under the LGPL Version 3 as detailed in
 * the LICENSE file provided in the following repository:
 * https://git.jonsantmyer.com/heap/
 * */

#ifndef _MALLOC_H
#define _MALLOC_H 1

/* A bucket-based malloc implementation for 2^n sized objects.
 * Allocates memory at pbrk for use in dynamic allocations, called "objects".
 * Each object is pre-empted by metadata, which stores a checksum, the width, 
 * and a bit to mark it as taken.
 *
 * Each alloc-able region is pre-empted by a "bucket", which stores metadata
 * about the bucket's state, the width, the first free object, and the next
 * bucket in the chain.
 *
 * Buckets are allocated when needed. The width of the new bucket is determined
 * by the object that needs the bucket.
 * Buckets are always page aligned, and the size is a multiple of a page width.
 *
 * Dependencies:
 *  stddef.h
 *  stdint.h
 *  errno.h
 *  assert.h
 *      assert
 *  string.h
 *      memset
 *      memcpy
 *  unistd.h
 *      sbrk
 *      brk
 *
 * with MALLOC_DBG:
 *  stdio.h
 *      printf*/

#include <stddef.h>
#include <stdint.h>

#define DBGPRINT(...)
#ifdef MALLOC_DBG
#include <stdio.h>
#undef DBGPRINT
#define DBGPRINT(...) printf(__VA_ARGS__)
#endif

/* Bucket checksum is a 64-bit value containing the string "BUCKET\0\0"*/
#define OBJ_CHECKSUM \
    (((uintmax_t)'B') | \
    ((uintmax_t)'U' << 8) | \
    ((uintmax_t)'C' << 16) | \
    ((uintmax_t)'K' << 24) | \
    ((uintmax_t)'E' << 32) | \
    ((uintmax_t)'T' << 40))

/* For an object to be valid, the object must have the correct checksum.*/
#define OBJ_VALID(obj) (obj->checksum == OBJ_CHECKSUM)

/* Object width is assumed to be a multiple of two. The first bit is reserved
 * for the taken flag.*/
#define OBJ_WIDTH(obj) (obj->size_taken & ~1)
#define OBJ_TAKEN(obj) (obj->size_taken & 1)

/* Gets the object struct after this object.
 * Since objects are allocated in-place, we can get the next object using
 * the object's size and it's pointer to the start of data.
 *
 * The same is true for getting the metadata for any given data pointer.
 * We take the pointer and subtract the size of the object struct from it
 * since the metadata is behind the actual data.*/
#define OBJ_AFTER(obj) \
    ((bucket_obj_t*)(((uintptr_t)obj->data) + OBJ_WIDTH(obj)))
#define OBJ_FROM_PTR(ptr) ((bucket_obj_t*)((uintptr_t)ptr - sizeof(bucket_obj_t)));

typedef struct bucket_obj
{
    uintmax_t checksum;
    size_t size_taken;
    char data[];
} bucket_obj_t;

#define LOG2(n) ((31) - __builtin_clz(n))
/* Buckets are always aligned to the page boundary for cache friendliness.*/
#define BUCKET_ALIGN (4096)
/* Buckets should always be a multiple of the page width, expanding with the
 * initial object size.*/
#define BUCKET_WIDTH(w) \
    (1UL << (12 + (w < BUCKET_ALIGN ? 0 : LOG2(w + 1) - 11)))

/* Bucket metadata is stored behind the object metadata.*/
typedef struct bucket
{
    size_t checksum;
    struct bucket *next;

    uintptr_t limit;
    bucket_obj_t *firstfree; /* Helpful for fast object allocations.*/
    char objs[];
} bucket_t;

/* The overall size of an object (data + metadata) should be 2*metadata.*/
#define OBJ_SIZE_MIN (sizeof(bucket_obj_t))
/* Data width is always a power of two.*/
#define REALW(n) \
    (n < OBJ_SIZE_MIN ? OBJ_SIZE_MIN : (1UL << (LOG2(n + 1))))

typedef struct heap_cache
{
    bucket_t *head;
    bucket_t *tail;
} heap_cache_t;

/**Given the heap cache and an object, find the bucket to which the object
 * belongs.
 * Since buckets are always page aligned, we can check each page boundary
 * for the checksum until we find it, then check the limit.
 * Since we also know the first bucket in the cache, we can also check if the
 * passed object is invalid and return NULL.
 * @param heap cache.
 * @param obj to find the bucket for.
 * @return bucket owning obj, or NULL if obj is invalid.*/
bucket_t *__bucket_from_obj(heap_cache_t *heap, bucket_obj_t *obj);

/**Allocate w bytes into the given heap.
 * If w == 0, returns NULL.
 * If w is not a power of 2, rounds to the nearest pow2.
 * If the heap is corrupted, assert fail.
 * If the program is OOM, errno is assumed to be set and returns (void*)-1
 * @param heap cache.
 * @param w n-bytes to allocate.
 * @return pointer to allocated memory.*/
void* __malloc_impl(heap_cache_t *heap, size_t w);

/**Allocate a contiguous array of nmemb members each w wide.
 * Newly allocated memory is zero'd.
 * If w == 0 || nmemb == 0, returns NULL
 * If w is not a power of 2, rounds to nearest pow2
 * If the heap is corrupted, assert fail
 * If the program is OON, ernno is assumed to be set and returns (void*)-1
 * @param heap cache.
 * @param nmemb number of members.
 * @param w number of bytes per member.
 * @return pointer to allocated memory.*/
void* __calloc_impl(heap_cache_t *heap, size_t nmemb, size_t w);

/**Resize the given allocated memory region to fit w.
 * If w is less than the original width, does nothing.
 * If w == 0, frees p
 * If the heap is corrupted, assert fail.
 * If the heap is OOM, errno is assumed to be set and returns (void*)-1
 * @param heap cache.
 * @param p pointer to reallocate.
 * @param w width to fit p to.
 * @return pointer to memory allocated to fit w.*/
void* __realloc_impl(heap_cache_t *heap, void *p, size_t w);

/**Resize the given allocated contiguous array to fit nmemb members of w width
 * objects.
 * Zeros the new members excluding already existing data.
 * If w * nmemb is less than the original width, does nothing.
 * If w == 0 || nmemb == 0, frees p
 * If the heap is corrupted, assert fail.
 * If the heap is OOM, errno is assumed to be set and returns (void*)-1
 * @param heap cache.
 * @param p pointer to reallocate.
 * @param nmemb number of members.
 * @param w width to fit p to.
 * @return pointer to memory allocated to fit w * nmemb.*/
void* __reallocarray_impl(heap_cache_t *heap, void *p, size_t nmemb, size_t w);

/**Free a pointer in the given heap.
 * If p == NULL, do nothing.
 * If p does not fall in the range of heap, assert fail.
 * If the heap is corrupted, assert fail.
 * @param heap cache.
 * @param p pointer to free.*/
void  __free_impl(heap_cache_t *heap, void *p);

#endif