SayoriOS  0.3.3
vmm.c
1 
9 // Charmander - a new virtual memory manager by NDRAEY (c) 2023
10 // for SayoriOS
11 
12 #include "../../include/mem/vmm.h"
13 #include "../../include/mem/pmm.h"
14 #include "io/ports.h"
15 
16 heap_t system_heap;
17 bool vmm_debug = false;
18 
19 //size_t pmm_alloc_and_map(size_t* page_dir, size_t virtual_addr, size_t bytes) {
20 // size_t count = (bytes + 1) / PAGE_SIZE;
21 // size_t pages = phys_alloc_multi_pages(count);
22 //
23 // map_pages(page_dir, pages, virtual_addr, bytes, PAGE_WRITEABLE);
24 //
25 // return pages;
26 //}
27 
28 size_t pmm_alloc_and_map_self(size_t* page_dir, size_t bytes) {
29  size_t count = (bytes + 1) / PAGE_SIZE;
30  size_t pages = phys_alloc_multi_pages(count);
31 
32  map_pages(page_dir, pages, pages, bytes, PAGE_WRITEABLE);
33 
34  return pages;
35 }
36 
37 void vmm_init() {
38  memset(&system_heap, 0, sizeof(heap_t));
39 
40  system_heap.capacity = PAGE_SIZE / sizeof(struct heap_entry);
41 
42  system_heap.allocated_count = 0;
43  system_heap.used_memory = 0;
44 
45  system_heap.start = 0x1000000;
46  system_heap.memory = (struct heap_entry*)pmm_alloc_and_map_self(get_kernel_page_directory(), PAGE_SIZE);
47 
48  memset(system_heap.memory, 0, PAGE_SIZE);
49 
50  qemu_log("CAPACITY: %d", system_heap.capacity);
51  qemu_log("MEMORY AT: %x", (size_t)system_heap.memory);
52 }
53 
54 void heap_dump() {
55  qemu_note("Heap info: %d entries of %d possible", system_heap.allocated_count, system_heap.capacity);
56  qemu_note(" %d bytes of ? bytes used", system_heap.used_memory);
57 
58  for(int i = 0; i < system_heap.allocated_count; i++) {
59  qemu_log("[%d] [%x, %d => %x]",
60  i,
61  system_heap.memory[i].address,
62  system_heap.memory[i].length,
63  system_heap.memory[i].address + system_heap.memory[i].length);
64 
65  if(i < system_heap.allocated_count - 1) {
66  if(system_heap.memory[i].address + system_heap.memory[i].length < system_heap.memory[i + 1].address) {
67  qemu_log("FREE SPACE: %d bytes", system_heap.memory[i + 1].address - (system_heap.memory[i].address + system_heap.memory[i].length));
68  }
69  }
70  }
71 }
72 
73 // TODO: Handle out of memory, implement automatical heap resizing
74 void* alloc_no_map(size_t size, size_t align) {
75  void* mem = 0;
76 
77  if(system_heap.allocated_count == 0) {
78  mem = (void *) system_heap.start;
79 
80  system_heap.memory[0].address = system_heap.start;
81  system_heap.memory[0].length = size;
82 
83  goto ok;
84  }
85 
86  if(system_heap.allocated_count == system_heap.capacity - 1) {
87  qemu_err("TODO: IMPLEMENT HEAP RESIZING!!!!");
88  while(1);
89  }
90 
91  for(int i = 0; i < system_heap.allocated_count; i++) {
92  struct heap_entry cur = system_heap.memory[i];
93  struct heap_entry next = system_heap.memory[i + 1];
94 
95  size_t curend = cur.address + cur.length;
96 
97  if(align)
98  curend = ALIGN(curend, align);
99 
100  if(next.address == 0) {
101  system_heap.memory[i + 1].address = curend;
102  system_heap.memory[i + 1].length = size;
103 
104  mem = (void*)system_heap.memory[i + 1].address;
105 
106  goto ok;
107  } else if(curend + size <= next.address) {
108  // Ok!
109 
110  for(size_t j = system_heap.allocated_count; j > i; j--) {
111  system_heap.memory[j] = system_heap.memory[j - 1];
112  }
113 
114  mem = (void*)(curend);
115  system_heap.memory[i + 1].address = curend;
116  system_heap.memory[i + 1].length = size;
117 
118  goto ok;
119  }
120  }
121 
122  ok:
123 
124  system_heap.allocated_count++;
125  system_heap.used_memory += size;
126 
127  // end:
128 
129  return mem;
130 }
131 
132 void free_no_map(void* ptr) {
133  if(!ptr)
134  return;
135 
136  size_t i = 0;
137  bool found = false;
138 
139  for(; i < system_heap.allocated_count; i++) {
140  if(system_heap.memory[i].address == (size_t)ptr) {
141  found = true;
142  break;
143  }
144  }
145 
146  if(!found)
147  return;
148 
149  system_heap.used_memory -= system_heap.memory[i].length;
150 
151  for (; i < system_heap.allocated_count - 1; i++) {
152  system_heap.memory[i] = system_heap.memory[i + 1];
153  }
154 
155  system_heap.memory[i].address = 0;
156  system_heap.memory[i].length = 0;
157 
158  system_heap.allocated_count--;
159 }
160 
161 void* kmalloc_common(size_t size, size_t align) {
162  void* allocated = alloc_no_map(size, align);
163 
164  if(!allocated) {
165  return 0;
166  }
167 
168  size_t reg_addr = (size_t) allocated & ~0xfff;
169 
170  for(size_t i = 0; i <= ALIGN(size, 4096); i += PAGE_SIZE) {
171  size_t region = phys_get_page_data(get_kernel_page_directory(),
172  reg_addr); // is allocated region there?
173 
174  if (!region) {
175  if(vmm_debug) {
176  qemu_warn("Region is not yet mapped: %x", reg_addr);
177  }
178 
179  size_t page = phys_alloc_single_page();
180 
181  if(vmm_debug) {
182  qemu_log("Obtained new page: %x", page);
183  }
184 
185  map_single_page(get_kernel_page_directory(),
186  page,
187  reg_addr,
188  PAGE_WRITEABLE);
189 
190  if(vmm_debug) {
191  qemu_ok("Mapped!");
192  }
193  } else {
194  if(vmm_debug) {
195  qemu_warn("Already mapped: %x (Size: %d)", reg_addr, size);
196  }
197  }
198 
199  reg_addr += PAGE_SIZE;
200  }
201 
202  if(vmm_debug) {
203  qemu_ok("From %x to %x, here you are!", (size_t)allocated, (size_t)(allocated + size));
204  }
205 
206  return allocated;
207 }
208 
209 bool vmm_is_page_used_by_entries(size_t address) {
210  for(size_t i = 0; i < system_heap.allocated_count; i++) {
211  size_t start = system_heap.memory[i].address & ~0xfff;
212  size_t end = ALIGN(system_heap.memory[i].address + system_heap.memory[i].length, PAGE_SIZE);
213 
214 // qemu_log("[%x => %x] %x => %x", system_heap.memory[i].address, system_heap.memory[i].address + system_heap.memory[i].length, start, end);
215 
216  if(address >= start && address < end) {
217  return true;
218  }
219  }
220 
221  return false;
222 }
223 
224 struct heap_entry heap_get_block(size_t address) {
225  for(int i = 0; i < system_heap.allocated_count; i++) {
226  if(system_heap.memory[i].address == address) {
227  return system_heap.memory[i];
228  }
229  }
230 
231  return (struct heap_entry){};
232 }
233 
234 // NOTE: Returns nullptr if block does not exist
235 struct heap_entry* heap_get_block_ref(size_t address) {
236  for(int i = 0; i < system_heap.allocated_count; i++) {
237  if(system_heap.memory[i].address == address) {
238  return &(system_heap.memory[i]);
239  }
240  }
241 
242  return 0;
243 }
244 
245 // NOTE: Returns 0xFFFFFFFF if not exist
246 size_t heap_get_block_idx(size_t address) {
247  for(size_t i = 0; i < system_heap.allocated_count; i++) {
248  if(system_heap.memory[i].address == address) {
249  return i;
250  }
251  }
252 
253  return 0xFFFFFFFF;
254 }
255 
256 void kfree(void* ptr) {
257  if(!ptr)
258  return;
259 
260  struct heap_entry block = heap_get_block((size_t)ptr);
261 
262  if(vmm_debug)
263  qemu_printf("Freeing %x\n", (size_t)ptr);
264 
265  if(!block.address) {
266  qemu_warn("No block!");
267  return;
268  }
269 
270 
271  free_no_map(ptr);
272 
273  for(size_t i = 0; i < block.length; i += PAGE_SIZE) {
274  if(!vmm_is_page_used_by_entries(block.address + i)) {
275  size_t phys_addr = phys_get_page_data(get_kernel_page_directory(), block.address + i) & ~0xfff;
276 
277  if(vmm_debug)
278  qemu_warn("Unmapping %x => %x", block.address + i, phys_addr);
279 
280  unmap_single_page(get_kernel_page_directory(), block.address + i);
281 
282  phys_free_single_page(phys_addr);
283  }
284  }
285 
286 // qemu_ok("OK!");
287 }
288 
289 void* krealloc(void* ptr, size_t memory_size) {
290  if(!ptr)
291  return 0;
292 
293  struct heap_entry* block = heap_get_block_ref((size_t) ptr);
294 
295  if(!block)
296  return 0;
297 
298 // qemu_warn("ORIGINAL BLOCK: %x, %d", block->address, block->length);
299 
300  if(memory_size > block->length) { // Expand
301 // qemu_warn("EXPANDING FROM %d to %d", block->length, memory_size);
302 
303  size_t index = heap_get_block_idx((size_t) ptr);
304 
305  if(index == system_heap.allocated_count - 1) { // Last block?
306 // qemu_log("LAST BLOCK!");
307 
308  size_t reg_addr = block->address & ~0xfff;
309 
310  for(int addr_offset = 0; addr_offset <= ALIGN(memory_size, 4096); addr_offset += PAGE_SIZE) {
311  size_t region = phys_get_page_data(get_kernel_page_directory(),
312  reg_addr); // is allocated region there?
313 
314  if (!region) {
315 // qemu_warn("Region is not yet mapped: %x", reg_addr);
316 
317  size_t page = phys_alloc_single_page();
318 // qemu_log("Obtained new page: %x", page);
319 
320  map_single_page(get_kernel_page_directory(),
321  page,
322  reg_addr,
323  PAGE_WRITEABLE);
324 
325 // qemu_ok("Mapped!");
326  }/* else {
327  qemu_warn("Already mapped: %x", reg_addr);
328  }*/
329 
330  reg_addr += PAGE_SIZE;
331  }
332 
333  system_heap.used_memory += memory_size - block->length;
334 
335  block->length = memory_size;
336  } else {
337 // qemu_err("CAN USE NEXT!");
338 
339  struct heap_entry next = system_heap.memory[index + 1];
340 
341  size_t willend = block->address + memory_size;
342 
343  if(willend < next.address) {
344 // qemu_log("THERE'S FREE SPACE!");
345 
346  size_t reg_addr = block->address & ~0xfff;
347 
348  for(int addr_offset = 0; addr_offset <= ALIGN(memory_size, 4096); addr_offset += PAGE_SIZE) {
349  size_t region = phys_get_page_data(get_kernel_page_directory(),
350  reg_addr); // is allocated region there?
351 
352  if (!region) {
353 // qemu_warn("Region is not yet mapped: %x", reg_addr);
354 
355  size_t page = phys_alloc_single_page();
356 // qemu_log("Obtained new page: %x", page);
357 
358  map_single_page(get_kernel_page_directory(),
359  page,
360  reg_addr,
361  PAGE_WRITEABLE);
362 
363 // qemu_ok("Mapped!");
364  }/* else {
365  qemu_warn("Already mapped: %x", reg_addr);
366  }*/
367 
368  reg_addr += PAGE_SIZE;
369  }
370 
371  system_heap.used_memory += memory_size - block->length;
372 
373  block->length = memory_size;
374  } else {
375 // qemu_err("No space between blocks! :("); // IT'S NORMAL
376 
377  void* new_block = kmalloc(memory_size);
378 
379  memcpy(new_block, (const void *) block->address, block->length);
380 
381  kfree(ptr);
382 
383  return new_block;
384  }
385 
386 // qemu_ok("Next is %x, %d", next.address, next.length);
387  }
388  } else if(memory_size < block->length) { // Shrink
389  qemu_warn("SHRINKING FROM %d to %d", block->length, memory_size);
390 
391  system_heap.used_memory -= block->length - memory_size;
392 
393  block->length = memory_size;
394  }
395 
396  return (void *) block->address;
397 }
398 
403 void* clone_kernel_page_directory(size_t virts_out[1024]) {
404  uint32_t* page_dir = kmalloc_common(PAGE_SIZE, PAGE_SIZE);
405  memset(page_dir, 0, PAGE_SIZE);
406 
407  uint32_t physaddr = virt2phys(get_kernel_page_directory(), (virtual_addr_t) page_dir);
408 
409  const uint32_t* kern_dir = get_kernel_page_directory();
410  const uint32_t linaddr = (const uint32_t)(page_directory_start);
411 
412 // uint32_t* addresses[1024] = {0};
413 
414  for(int i = 0; i < 1023; i++) {
415  if (kern_dir[i]) {
416  uint32_t *page_table = kmalloc_common(PAGE_SIZE, PAGE_SIZE);
417 
418  virts_out[i] = (size_t)page_table;
419  }
420  }
421 
422  for(int i = 0; i < 1023; i++) {
423  if (kern_dir[i]) {
424  uint32_t* page_table = (uint32_t*)virts_out[i];
425  uint32_t physaddr_pt = virt2phys(kern_dir, (virtual_addr_t) page_table);
426 
427  qemu_log("Copying from %x to %x", linaddr + (i * PAGE_SIZE), (size_t)page_table);
428 
429  memcpy(page_table, (void*)(linaddr + (i * PAGE_SIZE)), PAGE_SIZE);
430 
431  for(int j = 0; j < 1024; j++) {
432  page_table[j] = (page_table[j] & ~(PAGE_DIRTY | PAGE_ACCESSED));
433  }
434 
435  page_dir[i] = physaddr_pt | 3;
436  }
437  }
438 
439  page_dir[1023] = physaddr | 3;
440 
441  for(int i = 0; i < 1024; i++)
442  if(page_dir[i])
443  qemu_log("[%d] %x = %x", i, kern_dir[i], page_dir[i]);
444 
445  qemu_log("Page directory at: V%x (P%x); Here you are!", (size_t)page_dir, physaddr);
446 
447  return page_dir;
448 }
449 
450 void vmm_debug_switch(bool enable) {
451  vmm_debug = enable;
452 }
void * memset(void *ptr, char value, size_t num)
Заполнение массива указанными символами
Definition: string.c:203
void * memcpy(void *restrict destination, const void *restrict source, size_t n)
Копирование непересекающихся массивов используя SSE.
Definition: string.c:173
void qemu_printf(const char *text,...)
Вывод QEMU через COM1 информации
Definition: ports.c:149
Definition: vmm.h:14
Definition: vmm.h:19