SayoriOS  0.3.3
pmm.c
1 
9 // Scyther Physical Memory Manager by NDRAEY (c) 2023
10 // for SayoriOS
11 
12 #include <common.h>
13 #include "lib/math.h"
14 #include "mem/pmm.h"
15 #include "lib/string.h"
16 #include "io/ports.h"
17 
18 extern size_t KERNEL_BASE_pos;
19 extern size_t KERNEL_END_pos;
20 
21 size_t kernel_start;
22 size_t kernel_end;
23 
24 size_t mmap_length = 0;
25 memory_map_entry_t* mentry = 0;
26 
27 size_t phys_memory_size = 0;
28 size_t used_phys_memory_size = 0;
29 
30 physical_addr_t* kernel_page_directory = 0;
31 
32 // Create 128 KB page bitmap for saving data about 4 GB space.
34 uint8_t pages_bitmap[PAGE_BITMAP_SIZE] = {0};
35 
36 bool paging_initialized = false;
37 
42 physical_addr_t phys_alloc_single_page() {
43  if(used_phys_memory_size >= phys_memory_size) {
44  // If no free space, just call the function that handles that situation.
45  qemu_log("No free physical memory. Running emergency scenario...");
46 
47  phys_not_enough_memory();
48  }
49 
50  for(int i = 0; i < PAGE_BITMAP_SIZE; i++) {
51  if(pages_bitmap[i] == 0xff) {
52  // 0xff is eight ones in 8 bit.
53  // 8 ones - all pages in this index is used.
54  continue;
55  } else {
56  // Otherwise, we have some bits cleared - we have free page(s).
57  // Roll over all bits
58  for(int j = 0; j < 8; j++) {
59  // If we have bit cleared, mark it as used, increment memory stat and return address.
60  if(((pages_bitmap[i] >> j) & 1) == 0) {
61  // Page is free
62  pages_bitmap[i] |= (1 << j);
63 
64  used_phys_memory_size += PAGE_SIZE;
65 
66  // Every (8bit) entry can handle (4096 * 8) = 32768 bytes.
67  // Every bit of entry can hold one page (4096 bytes).
68  return (PAGE_SIZE * 8 * i) + (j * PAGE_SIZE);
69  }
70  }
71  }
72  }
73 
74  return 0;
75 }
76 
82 physical_addr_t phys_alloc_multi_pages(size_t count) {
83  if(used_phys_memory_size + (count * PAGE_SIZE) >= phys_memory_size) {
84  qemu_log("No free physical memory. Running emergency scenario...");
85 
86  phys_not_enough_memory();
87  }
88 
89  size_t counter = 0;
90  size_t addr = 0;
91 
92  // They used for saving start indexes of our pages.
93  size_t si, sj = 0;
94 
95  for(int i = 0; i < PAGE_BITMAP_SIZE; i++) {
96  if(pages_bitmap[i] == 0xff) {
97  // 0xff is eight ones in 8 bit.
98  // 8 ones - all pages in this index is used.
99  continue;
100  } else {
101  // Otherwise, we have some bits cleared - we have free page(s).
102  // Roll over all bits
103  for(int j = 0; j < 8; j++) {
104  // Check if page is free
105  if(((pages_bitmap[i] >> j) & 1) == 0) {
106  // If we starting, we need to save an address.
107  if(counter == 0) {
108  si = i;
109  sj = j;
110  addr = (PAGE_SIZE * 8 * i) + (j * PAGE_SIZE);
111  } else if(counter == count) {
112  // If we found `count` free pages in a row, we should mark them as used and return address.
113 
114  // Roll through all entries, starting from indices we preserved.
115  for(; si < PAGE_BITMAP_SIZE; si++) {
116  for(; sj < 8; sj++) {
117  // Mark as used.
118  pages_bitmap[si] |= (1 << sj);
119 
120  // We have no control, so keep loops running until we mark all `count` pages as used.
121  // If we marked all pages, exit the loops.
122  if(!--counter)
123  goto phys_multialloc_end;
124  }
125 
126  sj = 0;
127  }
128 
129  phys_multialloc_end:
130 
131  used_phys_memory_size += PAGE_SIZE * count;
132 
133  return addr;
134  }
135 
136  // We found a free page, so increment a counter
137  counter++;
138  } else {
139  // Oh shit, we have encountered a used page! (Loud scream!)
140  // Okay, just reset the counter and address to start from the beginning.
141 
142  counter = 0;
143  addr = 0; // For sanity
144  }
145  }
146  }
147  }
148 
149  return 0;
150 }
151 
156 void phys_free_single_page(physical_addr_t addr) {
157  if(!addr)
158  return;
159 
160  // Extract our entry position (i) and bit in the entry (j).
161 
162  size_t i = addr / (PAGE_SIZE * 8);
163  size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
164 
165  // Just clear a nth bit
166  pages_bitmap[i] &= ~(1 << j);
167 
168  used_phys_memory_size -= PAGE_SIZE;
169 }
170 
176 void phys_free_multi_pages(physical_addr_t addr, size_t count) {
177  if(!addr)
178  return;
179 
180  // Extract our entry position (i) and bit in the entry (j).
181 
182  size_t i = addr / (PAGE_SIZE * 8);
183  size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
184 
185  // Roll over all entries starting from index of our address
186  for(; i < PAGE_BITMAP_SIZE; i++) {
187  for(; j < 8; j++) {
188  // Just clear a nth bit
189  pages_bitmap[i] &= ~(1 << j);
190 
191  // If we freed all pages, just exit the function.
192  if(!--count) {
193  return;
194  }
195  }
196 
197  j = 0;
198  }
199 
200  used_phys_memory_size -= PAGE_SIZE * count;
201 }
202 
203 // Tells if page allocated there
204 bool phys_is_used_page(physical_addr_t addr) {
205  if(!addr)
206  return true;
207 
208  // Extract our entry position (i) and bit in the entry (j).
209 
210  size_t i = addr / (PAGE_SIZE * 8);
211  size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
212 
213  // Just clear a nth bit
214  return (bool)((pages_bitmap[i] >> j) & 1);
215 }
216 
217 // Marks page.
218 void phys_mark_page_entry(physical_addr_t addr, uint8_t used) {
219  if(!addr)
220  return;
221 
222  // Extract our entry position (i) and bit in the entry (j).
223 
224  size_t i = addr / (PAGE_SIZE * 8);
225  size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
226 
227  if(used)
228  pages_bitmap[i] |= (1 << j);
229  else
230  pages_bitmap[i] &= ~(1 << j);
231 }
232 
233 // Creates and prepares a page directory
234 uint32_t * new_page_directory() {
235  // Allocate a page (page directory is 4096 bytes)
236  uint32_t* dir = (uint32_t*)phys_alloc_single_page();
237 
238  qemu_log("Allocated page directory at: %x", dir);
239 
240  // Blank it (they can store garbage, so we need to blank it)
241  memset(dir, 0, 4096);
242 
243 // uint32_t page_table = phys_alloc_single_page();
244 //
245 // dir[PD_INDEX((uint32_t)dir)] = page_table | 3;
246 //
247 // ((uint32_t*)page_table)[PT_INDEX((uint32_t)dir)] = (uint32_t)dir | 3;
248 
249  dir[1023] = (uint32_t)dir | 3;
250 
251 // for(int i = 0; i < 1024; i++) {
252 // uint32_t addr = phys_alloc_single_page();
253 // uint32_t* pt = (uint32_t*)addr;
254 //
255 // for(int j = 0; j < 1024; j++) {
256 // pt[j] = 0;
257 // }
258 //
259 // dir[i] = addr | 3;
260 //
261 // ((uint32_t*)(dir[PD_INDEX(addr)] & ~0x3ff))[PT_INDEX(addr)] = addr | 3;
262 // }
263 
264  qemu_log("Blanked directory.");
265 // qemu_log("================ Mapping a page directory");
266 
267  // Self-map page
268 // map_single_page(dir, (uint32_t)dir, (uint32_t)dir, PAGE_WRITEABLE);
269 
270  qemu_log("================ Page directory is ready.");
271 
272  return dir;
273 }
274 
275 void blank_page_directory(uint32_t* pagedir_addr) {
276  // Just roll over 1024 entries and set them to 0.
277  for (size_t i = 0; i < 1024; i++) {
278  pagedir_addr[i] = 0; // Fully blank
279  }
280 }
281 
282 uint32_t* get_page_table_by_vaddr(uint32_t* page_dir, virtual_addr_t vaddr) {
283  if(paging_initialized)
284  return (uint32_t*)((char*)page_directory_start + (PD_INDEX(vaddr) * PAGE_SIZE));
285  else
286  return (uint32_t*)(page_dir[PD_INDEX(vaddr)] & ~0xfff);
287 }
288 
289 void reload_cr3() {
290  __asm__ volatile("mov %cr3, %eax\n"
291  "mov %eax, %cr3");
292 }
293 
294 // Maps a page.
295 // Note: No need to set PAGE_PRESENT flag, it sets automatically.
296 // TODO: Rewrite this, this is buggy
297 
303 void map_single_page(physical_addr_t* page_dir, physical_addr_t physical, virtual_addr_t virtual, uint32_t flags) {
304  // Clean flags and some garbage from addresses.
305 
306  virtual &= ~0xfff;
307  physical &= ~0xfff;
308 
309 // qemu_log("V%x => P%x", virtual, physical);
310 
311  // Get our Page Directory Index and Page Table Index.
312  uint32_t pdi = PD_INDEX(virtual);
313  uint32_t pti = PT_INDEX(virtual);
314 
315  uint32_t* pt;
316 
317  // Check if page table not present.
318  if((page_dir[pdi] & 1) == 0) {
319  pt = (uint32_t *)phys_alloc_single_page();
320 
321  page_dir[pdi] = (uint32_t)pt | PAGE_WRITEABLE | PAGE_PRESENT;
322 
323  if(paging_initialized && page_dir == get_kernel_page_directory()) {
324  uint32_t pt_addr = (uint32_t)page_directory_start + (pdi * PAGE_SIZE);
325 
326  memset((uint32_t*)pt_addr, 0, PAGE_SIZE);
327 
328  pt = (uint32_t*)pt_addr;
329  } else if(paging_initialized && page_dir != get_kernel_page_directory()) {
330  qemu_warn("FIXME: Mapping other page directories");
331  while(1);
332  } else {
333  memset(pt, 0, PAGE_SIZE);
334  }
335  } else {
336  // Just get our page table
337  pt = get_page_table_by_vaddr(page_dir, virtual);
338  }
339 
340 // qemu_log("P: %x | V: %x => PDI: %d | PTI: %d", physical, virtual, pdi, pti);
341 
342  // Finally map our physical page to virtual
343  pt[pti] = physical | flags | PAGE_PRESENT;
344 
345  // Do our best to take effect.
346  reload_cr3();
347  __asm__ volatile ("invlpg (,%0,)"::"a"(virtual));
348 }
349 
350 void unmap_single_page(uint32_t* page_dir, virtual_addr_t virtual) {
351  virtual &= ~0xfff;
352 
353  uint32_t* pt;
354 
355  // Check if page table not present.
356  if((page_dir[PD_INDEX(virtual)] & 1) == 0) {
357  return;
358  } else {
359 // qemu_log("Page table exist");
360  pt = get_page_table_by_vaddr(page_dir, virtual);
361  }
362 
363 // qemu_log("Got page table at: %x", pt);
364 // qemu_log("Unmapping: %x", virtual);
365 
366  pt[PT_INDEX(virtual)] = 0;
367 
368  reload_cr3();
369 }
370 
371 uint32_t phys_get_page_data(uint32_t* page_dir, virtual_addr_t virtual) {
372  virtual &= ~0x3ff;
373 
374  uint32_t* pt;
375 
376  // Check if page table not present.
377  if((page_dir[PD_INDEX(virtual)] & 1) == 0) {
378  return 0;
379  } else {
380  pt = get_page_table_by_vaddr(page_dir, virtual);
381  }
382 
383  return pt[PT_INDEX(virtual)];
384 }
385 
386 uint32_t virt2phys(const uint32_t *page_dir, virtual_addr_t virtual) {
387 // virtual &= ~0x3ff;
388  virtual &= ~0xfff;
389 
390  uint32_t* pt;
391 
392  // Check if page table not present.
393  if((page_dir[PD_INDEX(virtual)] & 1) == 0) {
394  return 0;
395  } else {
396  pt = get_page_table_by_vaddr(page_dir, virtual);
397  }
398 
399  return pt[PT_INDEX(virtual)] & ~0x3ff;
400 }
401 
402 void phys_set_flags(uint32_t* page_dir, virtual_addr_t virtual, uint32_t flags) {
403  virtual &= ~0xfff;
404 
405  uint32_t* pt;
406 
407  // Check if page table not present.
408  if((page_dir[PD_INDEX(virtual)] & 1) == 0) {
409  return;
410  } else {
411  pt = get_page_table_by_vaddr(page_dir, virtual);
412  }
413 
414  pt[PT_INDEX(virtual)] = (pt[PT_INDEX(virtual)] & ~0x3ff) | flags | PAGE_PRESENT;
415 }
416 
417 
427 void map_pages(uint32_t* page_dir, physical_addr_t physical, virtual_addr_t virtual, size_t size, uint32_t flags) {
428  physical_addr_t phys = physical;
429  physical_addr_t virt = virtual;
430 
431  virtual_addr_t vend = ALIGN(virt + size, PAGE_SIZE);
432 
433  for(;
434  virt <= vend;
435  phys += PAGE_SIZE,
436  virt += PAGE_SIZE
437  ) {
438  map_single_page(page_dir, phys, virt, flags);
439  }
440 
441  reload_cr3();
442 }
443 
444 void check_memory_map(memory_map_entry_t* mmap_addr, uint32_t length){
445  int i;
446  /* Entries number in memory map structure */
447  mmap_length = length;
448  size_t n = length / sizeof(memory_map_entry_t);
449 
450  /* Set pointer to memory map */
451  mentry = mmap_addr;
452  qemu_log("[PMM] Map:");
453  for (i = 0; i < n; i++){
454  memory_map_entry_t entry = mentry[i];
455 
456  qemu_log("%s [Address: %x | Length: %x] <%d>",
457  (entry.type == 1?"Available":"Reserved"),
458  entry.addr_low, entry.len_low, entry.type);
459 
460  phys_memory_size += entry.len_low;
461  }
462 
463  qemu_log("RAM: %d MB | %d KB | %d B", phys_memory_size/(1024*1024), phys_memory_size/1024, phys_memory_size);
464 }
465 
466 size_t getInstalledRam(){
467  return phys_memory_size;
468 }
469 
470 void mark_reserved_memory_as_used(memory_map_entry_t* mmap_addr, uint32_t length){
471  int i;
472  /* Entries number in memory map structure */
473  mmap_length = length;
474  size_t n = length / sizeof(memory_map_entry_t);
475 
476  /* Set pointer to memory map */
477  mentry = mmap_addr;
478  for (i = 0; i < n; i++){
479  memory_map_entry_t entry = mentry[i];
480 
481  size_t addr = entry.addr_low;
482  size_t length = entry.len_low;
483 
484  if(entry.type != 1) {
485  for(int j = 0; j < length; j += PAGE_SIZE) {
486  phys_mark_page_entry(addr + j, 1); // Mark as used
487  }
488  }
489  }
490 
491  qemu_log("RAM: %d MB | %d KB | %d B", phys_memory_size/(1024*1024), phys_memory_size/1024, phys_memory_size);
492 }
493 
494 uint32_t* get_kernel_page_directory() {
495  if(paging_initialized)
496  return page_directory_virt;
497  else
498  return kernel_page_directory;
499 }
500 
501 void init_paging() {
502  extern size_t grub_last_module_end;
503 
504  qemu_log("Memory bitmap covers: %d MB", (sizeof(pages_bitmap) * 8) >> 10);
505 
506  kernel_start = (size_t)&KERNEL_BASE_pos;
507  kernel_end = (size_t)&KERNEL_END_pos;
508 
509  qemu_log("MODEND: %x; &MODEND: %x", grub_last_module_end, (size_t)&grub_last_module_end);
510 
511  size_t real_end = (size_t)grub_last_module_end;
512 
513  size_t kernel_size = real_end - kernel_start;
514 
515  qemu_log("Kernel starts at: %x", kernel_start);
516  qemu_log("Kernel ends at: %x (only kernel)", kernel_end);
517  qemu_log("Kernel ends at: %x (everything)", real_end);
518 
519  qemu_log("Kernel size is: %d (%d kB) (%d MB)", (kernel_end - kernel_start), (kernel_end - kernel_start) >> 10, (kernel_end - kernel_start) >> 20);
520  qemu_log("Kernel size (initrd included) is: %d (%d kB) (%d MB)", kernel_size, kernel_size >> 10, kernel_size >> 20);
521 
522  kernel_size = ALIGN(kernel_size, PAGE_SIZE);
523 
524  // Preallocate our kernel space
525 
526  qemu_log("Allocating %d pages for kernel space...", (real_end / 4096) + 1);
527 
528  for(int i = 0; i < (real_end / 4096) + 1; i++) {
529  // Note: if allocator returns 0, it's an error.
530  // But we don't care, because we're allocating pages for first time here.
531  phys_alloc_single_page();
532  }
533 
534  // Create new page directory
535 
536  kernel_page_directory = (physical_addr_t*)new_page_directory();
537 
538  qemu_log("New page directory at: %x", kernel_page_directory);
539 
540  map_pages(kernel_page_directory, 0, 0, ALIGN(real_end, PAGE_SIZE), PAGE_WRITEABLE);
541 
542  qemu_log("Max: %x", phys_memory_size);
543 
544  // while(1);
545  load_page_directory((size_t) kernel_page_directory);
546 
547  qemu_log("Ok?");
548 
549  enable_paging();
550 
551  paging_initialized = true;
552 
553  // Here paging enabled and every memory error will lead to a Page Fault
554 
555  uint32_t* pd = get_kernel_page_directory();
556 
557  for(int i = 0; i < 1024; i++) {
558  if(pd[i] != 0)
559  qemu_log("[%d]: %x", i, pd[i]);
560  }
561 }
562 
563 
573 void map_pages_overlapping(physical_addr_t* page_directory, size_t physical_start, size_t virtual_start, size_t size, uint32_t flags) {
574  // Explanation: We want to map address 0xd000abcd with size 2345
575  // If we will use map_pages it will map only one page, because addresses gets aligned to PAGE_SIZE
576  // (0xd000abcd -> 0xd000a000), and size too (2345 -> 4096)
577  // So it uses memory from 0xd000abcd to 0xd000b4f6 (2 pages)
578  //
579  // We need to calculate how many pages we need to map
580 // size_t pages_to_map = (size + PAGE_SIZE - 1) / PAGE_SIZE;
581  // And then map them
582 
583  size_t nth1 = virtual_start / PAGE_SIZE;
584  size_t nth2 = (virtual_start + size) / PAGE_SIZE;
585 
586  size_t pages_to_map = (nth2 - nth1) + 1;
587 
588  qemu_log("Range: %x - %x", virtual_start, virtual_start + size);
589 
590  qemu_note("Mapping %u pages to %x", pages_to_map, physical_start);
591  map_pages(page_directory, physical_start, virtual_start, pages_to_map * PAGE_SIZE, flags);
592 }
593 
594 void unmap_pages_overlapping(physical_addr_t* page_directory, size_t virtual, size_t size) {
595 // size_t pages_to_map = (size + PAGE_SIZE - 1) / PAGE_SIZE;
596  virtual &= ~0xfff;
597 
598  size_t nth1 = virtual / PAGE_SIZE;
599  size_t nth2 = (virtual + size) / PAGE_SIZE;
600 
601  size_t pages_to_map = (nth2 - nth1) + 1;
602 
603  for(size_t i = 0; i < pages_to_map; i++) {
604  unmap_single_page(page_directory, virtual + (i * PAGE_SIZE));
605  }
606 }
607 
608 void phys_not_enough_memory() {
609  qemu_log("Not enough memory!");
610 
611  while(1);
612 }
Основные определения ядра
void * memset(void *ptr, char value, size_t num)
Заполнение массива указанными символами
Definition: string.c:203
Definition: multiboot.h:102