12 #include "../../include/mem/vmm.h"
13 #include "../../include/mem/pmm.h"
17 bool vmm_debug =
false;
28 size_t pmm_alloc_and_map_self(
size_t* page_dir,
size_t bytes) {
29 size_t count = (bytes + 1) / PAGE_SIZE;
30 size_t pages = phys_alloc_multi_pages(count);
32 map_pages(page_dir, pages, pages, bytes, PAGE_WRITEABLE);
40 system_heap.capacity = PAGE_SIZE /
sizeof(
struct heap_entry);
42 system_heap.allocated_count = 0;
43 system_heap.used_memory = 0;
45 system_heap.start = 0x1000000;
46 system_heap.memory = (
struct heap_entry*)pmm_alloc_and_map_self(get_kernel_page_directory(), PAGE_SIZE);
48 memset(system_heap.memory, 0, PAGE_SIZE);
50 qemu_log(
"CAPACITY: %d", system_heap.capacity);
51 qemu_log(
"MEMORY AT: %x", (
size_t)system_heap.memory);
55 qemu_note(
"Heap info: %d entries of %d possible", system_heap.allocated_count, system_heap.capacity);
56 qemu_note(
" %d bytes of ? bytes used", system_heap.used_memory);
58 for(
int i = 0; i < system_heap.allocated_count; i++) {
59 qemu_log(
"[%d] [%x, %d => %x]",
61 system_heap.memory[i].address,
62 system_heap.memory[i].length,
63 system_heap.memory[i].address + system_heap.memory[i].length);
65 if(i < system_heap.allocated_count - 1) {
66 if(system_heap.memory[i].address + system_heap.memory[i].length < system_heap.memory[i + 1].address) {
67 qemu_log(
"FREE SPACE: %d bytes", system_heap.memory[i + 1].address - (system_heap.memory[i].address + system_heap.memory[i].length));
74 void* alloc_no_map(
size_t size,
size_t align) {
77 if(system_heap.allocated_count == 0) {
78 mem = (
void *) system_heap.start;
80 system_heap.memory[0].address = system_heap.start;
81 system_heap.memory[0].length = size;
86 if(system_heap.allocated_count == system_heap.capacity - 1) {
87 qemu_err(
"TODO: IMPLEMENT HEAP RESIZING!!!!");
91 for(
int i = 0; i < system_heap.allocated_count; i++) {
93 struct heap_entry next = system_heap.memory[i + 1];
95 size_t curend = cur.address + cur.length;
98 curend = ALIGN(curend, align);
100 if(next.address == 0) {
101 system_heap.memory[i + 1].address = curend;
102 system_heap.memory[i + 1].length = size;
104 mem = (
void*)system_heap.memory[i + 1].address;
107 }
else if(curend + size <= next.address) {
110 for(
size_t j = system_heap.allocated_count; j > i; j--) {
111 system_heap.memory[j] = system_heap.memory[j - 1];
114 mem = (
void*)(curend);
115 system_heap.memory[i + 1].address = curend;
116 system_heap.memory[i + 1].length = size;
124 system_heap.allocated_count++;
125 system_heap.used_memory += size;
132 void free_no_map(
void* ptr) {
139 for(; i < system_heap.allocated_count; i++) {
140 if(system_heap.memory[i].address == (
size_t)ptr) {
149 system_heap.used_memory -= system_heap.memory[i].length;
151 for (; i < system_heap.allocated_count - 1; i++) {
152 system_heap.memory[i] = system_heap.memory[i + 1];
155 system_heap.memory[i].address = 0;
156 system_heap.memory[i].length = 0;
158 system_heap.allocated_count--;
161 void* kmalloc_common(
size_t size,
size_t align) {
162 void* allocated = alloc_no_map(size, align);
168 size_t reg_addr = (size_t) allocated & ~0xfff;
170 for(
size_t i = 0; i <= ALIGN(size, 4096); i += PAGE_SIZE) {
171 size_t region = phys_get_page_data(get_kernel_page_directory(),
176 qemu_warn(
"Region is not yet mapped: %x", reg_addr);
179 size_t page = phys_alloc_single_page();
182 qemu_log(
"Obtained new page: %x", page);
185 map_single_page(get_kernel_page_directory(),
195 qemu_warn(
"Already mapped: %x (Size: %d)", reg_addr, size);
199 reg_addr += PAGE_SIZE;
203 qemu_ok(
"From %x to %x, here you are!", (
size_t)allocated, (
size_t)(allocated + size));
209 bool vmm_is_page_used_by_entries(
size_t address) {
210 for(
size_t i = 0; i < system_heap.allocated_count; i++) {
211 size_t start = system_heap.memory[i].address & ~0xfff;
212 size_t end = ALIGN(system_heap.memory[i].address + system_heap.memory[i].length, PAGE_SIZE);
216 if(address >= start && address < end) {
224 struct heap_entry heap_get_block(size_t address) {
225 for(
int i = 0; i < system_heap.allocated_count; i++) {
226 if(system_heap.memory[i].address == address) {
227 return system_heap.memory[i];
235 struct heap_entry* heap_get_block_ref(
size_t address) {
236 for(
int i = 0; i < system_heap.allocated_count; i++) {
237 if(system_heap.memory[i].address == address) {
238 return &(system_heap.memory[i]);
246 size_t heap_get_block_idx(
size_t address) {
247 for(
size_t i = 0; i < system_heap.allocated_count; i++) {
248 if(system_heap.memory[i].address == address) {
256 void kfree(
void* ptr) {
266 qemu_warn(
"No block!");
273 for(
size_t i = 0; i <
block.length; i += PAGE_SIZE) {
274 if(!vmm_is_page_used_by_entries(
block.address + i)) {
275 size_t phys_addr = phys_get_page_data(get_kernel_page_directory(),
block.address + i) & ~0xfff;
278 qemu_warn(
"Unmapping %x => %x",
block.address + i, phys_addr);
280 unmap_single_page(get_kernel_page_directory(),
block.address + i);
282 phys_free_single_page(phys_addr);
289 void* krealloc(
void* ptr,
size_t memory_size) {
300 if(memory_size >
block->length) {
303 size_t index = heap_get_block_idx((
size_t) ptr);
305 if(index == system_heap.allocated_count - 1) {
308 size_t reg_addr =
block->address & ~0xfff;
310 for(
int addr_offset = 0; addr_offset <= ALIGN(memory_size, 4096); addr_offset += PAGE_SIZE) {
311 size_t region = phys_get_page_data(get_kernel_page_directory(),
317 size_t page = phys_alloc_single_page();
320 map_single_page(get_kernel_page_directory(),
330 reg_addr += PAGE_SIZE;
333 system_heap.used_memory += memory_size -
block->length;
335 block->length = memory_size;
339 struct heap_entry next = system_heap.memory[index + 1];
341 size_t willend =
block->address + memory_size;
343 if(willend < next.address) {
346 size_t reg_addr =
block->address & ~0xfff;
348 for(
int addr_offset = 0; addr_offset <= ALIGN(memory_size, 4096); addr_offset += PAGE_SIZE) {
349 size_t region = phys_get_page_data(get_kernel_page_directory(),
355 size_t page = phys_alloc_single_page();
358 map_single_page(get_kernel_page_directory(),
368 reg_addr += PAGE_SIZE;
371 system_heap.used_memory += memory_size -
block->length;
373 block->length = memory_size;
377 void* new_block = kmalloc(memory_size);
388 }
else if(memory_size < block->length) {
389 qemu_warn(
"SHRINKING FROM %d to %d",
block->length, memory_size);
391 system_heap.used_memory -=
block->length - memory_size;
393 block->length = memory_size;
396 return (
void *)
block->address;
403 void* clone_kernel_page_directory(
size_t virts_out[1024]) {
404 uint32_t* page_dir = kmalloc_common(PAGE_SIZE, PAGE_SIZE);
405 memset(page_dir, 0, PAGE_SIZE);
407 uint32_t physaddr = virt2phys(get_kernel_page_directory(), (virtual_addr_t) page_dir);
409 const uint32_t* kern_dir = get_kernel_page_directory();
410 const uint32_t linaddr = (
const uint32_t)(page_directory_start);
414 for(
int i = 0; i < 1023; i++) {
416 uint32_t *page_table = kmalloc_common(PAGE_SIZE, PAGE_SIZE);
418 virts_out[i] = (size_t)page_table;
422 for(
int i = 0; i < 1023; i++) {
424 uint32_t* page_table = (uint32_t*)virts_out[i];
425 uint32_t physaddr_pt = virt2phys(kern_dir, (virtual_addr_t) page_table);
427 qemu_log(
"Copying from %x to %x", linaddr + (i * PAGE_SIZE), (
size_t)page_table);
429 memcpy(page_table, (
void*)(linaddr + (i * PAGE_SIZE)), PAGE_SIZE);
431 for(
int j = 0; j < 1024; j++) {
432 page_table[j] = (page_table[j] & ~(PAGE_DIRTY | PAGE_ACCESSED));
435 page_dir[i] = physaddr_pt | 3;
439 page_dir[1023] = physaddr | 3;
441 for(
int i = 0; i < 1024; i++)
443 qemu_log(
"[%d] %x = %x", i, kern_dir[i], page_dir[i]);
445 qemu_log(
"Page directory at: V%x (P%x); Here you are!", (
size_t)page_dir, physaddr);
450 void vmm_debug_switch(
bool enable) {
void * memset(void *ptr, char value, size_t num)
Заполнение массива указанными символами
void * memcpy(void *restrict destination, const void *restrict source, size_t n)
Копирование непересекающихся массивов используя SSE.
void qemu_printf(const char *text,...)
Вывод QEMU через COM1 информации