15 #include "lib/string.h"
18 extern size_t KERNEL_BASE_pos;
19 extern size_t KERNEL_END_pos;
24 size_t mmap_length = 0;
27 size_t phys_memory_size = 0;
28 size_t used_phys_memory_size = 0;
30 physical_addr_t* kernel_page_directory = 0;
34 uint8_t pages_bitmap[PAGE_BITMAP_SIZE] = {0};
36 bool paging_initialized =
false;
42 physical_addr_t phys_alloc_single_page() {
43 if(used_phys_memory_size >= phys_memory_size) {
45 qemu_log(
"No free physical memory. Running emergency scenario...");
47 phys_not_enough_memory();
50 for(
int i = 0; i < PAGE_BITMAP_SIZE; i++) {
51 if(pages_bitmap[i] == 0xff) {
58 for(
int j = 0; j < 8; j++) {
60 if(((pages_bitmap[i] >> j) & 1) == 0) {
62 pages_bitmap[i] |= (1 << j);
64 used_phys_memory_size += PAGE_SIZE;
68 return (PAGE_SIZE * 8 * i) + (j * PAGE_SIZE);
82 physical_addr_t phys_alloc_multi_pages(
size_t count) {
83 if(used_phys_memory_size + (count * PAGE_SIZE) >= phys_memory_size) {
84 qemu_log(
"No free physical memory. Running emergency scenario...");
86 phys_not_enough_memory();
95 for(
int i = 0; i < PAGE_BITMAP_SIZE; i++) {
96 if(pages_bitmap[i] == 0xff) {
103 for(
int j = 0; j < 8; j++) {
105 if(((pages_bitmap[i] >> j) & 1) == 0) {
110 addr = (PAGE_SIZE * 8 * i) + (j * PAGE_SIZE);
111 }
else if(counter == count) {
115 for(; si < PAGE_BITMAP_SIZE; si++) {
116 for(; sj < 8; sj++) {
118 pages_bitmap[si] |= (1 << sj);
123 goto phys_multialloc_end;
131 used_phys_memory_size += PAGE_SIZE * count;
156 void phys_free_single_page(physical_addr_t addr) {
162 size_t i = addr / (PAGE_SIZE * 8);
163 size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
166 pages_bitmap[i] &= ~(1 << j);
168 used_phys_memory_size -= PAGE_SIZE;
176 void phys_free_multi_pages(physical_addr_t addr,
size_t count) {
182 size_t i = addr / (PAGE_SIZE * 8);
183 size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
186 for(; i < PAGE_BITMAP_SIZE; i++) {
189 pages_bitmap[i] &= ~(1 << j);
200 used_phys_memory_size -= PAGE_SIZE * count;
204 bool phys_is_used_page(physical_addr_t addr) {
210 size_t i = addr / (PAGE_SIZE * 8);
211 size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
214 return (
bool)((pages_bitmap[i] >> j) & 1);
218 void phys_mark_page_entry(physical_addr_t addr, uint8_t used) {
224 size_t i = addr / (PAGE_SIZE * 8);
225 size_t j = (addr % (PAGE_SIZE * 8)) / PAGE_SIZE;
228 pages_bitmap[i] |= (1 << j);
230 pages_bitmap[i] &= ~(1 << j);
234 uint32_t * new_page_directory() {
236 uint32_t* dir = (uint32_t*)phys_alloc_single_page();
238 qemu_log(
"Allocated page directory at: %x", dir);
249 dir[1023] = (uint32_t)dir | 3;
264 qemu_log(
"Blanked directory.");
270 qemu_log(
"================ Page directory is ready.");
275 void blank_page_directory(uint32_t* pagedir_addr) {
277 for (
size_t i = 0; i < 1024; i++) {
282 uint32_t* get_page_table_by_vaddr(uint32_t* page_dir, virtual_addr_t vaddr) {
283 if(paging_initialized)
284 return (uint32_t*)((
char*)page_directory_start + (PD_INDEX(vaddr) * PAGE_SIZE));
286 return (uint32_t*)(page_dir[PD_INDEX(vaddr)] & ~0xfff);
290 __asm__
volatile(
"mov %cr3, %eax\n"
303 void map_single_page(physical_addr_t* page_dir, physical_addr_t physical, virtual_addr_t
virtual, uint32_t flags) {
312 uint32_t pdi = PD_INDEX(
virtual);
313 uint32_t pti = PT_INDEX(
virtual);
318 if((page_dir[pdi] & 1) == 0) {
319 pt = (uint32_t *)phys_alloc_single_page();
321 page_dir[pdi] = (uint32_t)pt | PAGE_WRITEABLE | PAGE_PRESENT;
323 if(paging_initialized && page_dir == get_kernel_page_directory()) {
324 uint32_t pt_addr = (uint32_t)page_directory_start + (pdi * PAGE_SIZE);
326 memset((uint32_t*)pt_addr, 0, PAGE_SIZE);
328 pt = (uint32_t*)pt_addr;
329 }
else if(paging_initialized && page_dir != get_kernel_page_directory()) {
330 qemu_warn(
"FIXME: Mapping other page directories");
337 pt = get_page_table_by_vaddr(page_dir,
virtual);
343 pt[pti] = physical | flags | PAGE_PRESENT;
347 __asm__
volatile (
"invlpg (,%0,)"::
"a"(
virtual));
350 void unmap_single_page(uint32_t* page_dir, virtual_addr_t
virtual) {
356 if((page_dir[PD_INDEX(
virtual)] & 1) == 0) {
360 pt = get_page_table_by_vaddr(page_dir,
virtual);
366 pt[PT_INDEX(
virtual)] = 0;
371 uint32_t phys_get_page_data(uint32_t* page_dir, virtual_addr_t
virtual) {
377 if((page_dir[PD_INDEX(
virtual)] & 1) == 0) {
380 pt = get_page_table_by_vaddr(page_dir,
virtual);
383 return pt[PT_INDEX(
virtual)];
386 uint32_t virt2phys(
const uint32_t *page_dir, virtual_addr_t
virtual) {
393 if((page_dir[PD_INDEX(
virtual)] & 1) == 0) {
396 pt = get_page_table_by_vaddr(page_dir,
virtual);
399 return pt[PT_INDEX(
virtual)] & ~0x3ff;
402 void phys_set_flags(uint32_t* page_dir, virtual_addr_t
virtual, uint32_t flags) {
408 if((page_dir[PD_INDEX(
virtual)] & 1) == 0) {
411 pt = get_page_table_by_vaddr(page_dir,
virtual);
414 pt[PT_INDEX(
virtual)] = (pt[PT_INDEX(
virtual)] & ~0x3ff) | flags | PAGE_PRESENT;
427 void map_pages(uint32_t* page_dir, physical_addr_t physical, virtual_addr_t
virtual,
size_t size, uint32_t flags) {
428 physical_addr_t phys = physical;
429 physical_addr_t virt =
virtual;
431 virtual_addr_t vend = ALIGN(virt + size, PAGE_SIZE);
438 map_single_page(page_dir, phys, virt, flags);
447 mmap_length = length;
452 qemu_log(
"[PMM] Map:");
453 for (i = 0; i < n; i++){
456 qemu_log(
"%s [Address: %x | Length: %x] <%d>",
457 (entry.type == 1?
"Available":
"Reserved"),
458 entry.addr_low, entry.len_low, entry.type);
460 phys_memory_size += entry.len_low;
463 qemu_log(
"RAM: %d MB | %d KB | %d B", phys_memory_size/(1024*1024), phys_memory_size/1024, phys_memory_size);
466 size_t getInstalledRam(){
467 return phys_memory_size;
473 mmap_length = length;
478 for (i = 0; i < n; i++){
481 size_t addr = entry.addr_low;
482 size_t length = entry.len_low;
484 if(entry.type != 1) {
485 for(
int j = 0; j < length; j += PAGE_SIZE) {
486 phys_mark_page_entry(addr + j, 1);
491 qemu_log(
"RAM: %d MB | %d KB | %d B", phys_memory_size/(1024*1024), phys_memory_size/1024, phys_memory_size);
494 uint32_t* get_kernel_page_directory() {
495 if(paging_initialized)
496 return page_directory_virt;
498 return kernel_page_directory;
502 extern size_t grub_last_module_end;
504 qemu_log(
"Memory bitmap covers: %d MB", (
sizeof(pages_bitmap) * 8) >> 10);
506 kernel_start = (size_t)&KERNEL_BASE_pos;
507 kernel_end = (size_t)&KERNEL_END_pos;
509 qemu_log(
"MODEND: %x; &MODEND: %x", grub_last_module_end, (
size_t)&grub_last_module_end);
511 size_t real_end = (size_t)grub_last_module_end;
513 size_t kernel_size = real_end - kernel_start;
515 qemu_log(
"Kernel starts at: %x", kernel_start);
516 qemu_log(
"Kernel ends at: %x (only kernel)", kernel_end);
517 qemu_log(
"Kernel ends at: %x (everything)", real_end);
519 qemu_log(
"Kernel size is: %d (%d kB) (%d MB)", (kernel_end - kernel_start), (kernel_end - kernel_start) >> 10, (kernel_end - kernel_start) >> 20);
520 qemu_log(
"Kernel size (initrd included) is: %d (%d kB) (%d MB)", kernel_size, kernel_size >> 10, kernel_size >> 20);
522 kernel_size = ALIGN(kernel_size, PAGE_SIZE);
526 qemu_log(
"Allocating %d pages for kernel space...", (real_end / 4096) + 1);
528 for(
int i = 0; i < (real_end / 4096) + 1; i++) {
531 phys_alloc_single_page();
536 kernel_page_directory = (physical_addr_t*)new_page_directory();
538 qemu_log(
"New page directory at: %x", kernel_page_directory);
540 map_pages(kernel_page_directory, 0, 0, ALIGN(real_end, PAGE_SIZE), PAGE_WRITEABLE);
542 qemu_log(
"Max: %x", phys_memory_size);
545 load_page_directory((
size_t) kernel_page_directory);
551 paging_initialized =
true;
555 uint32_t* pd = get_kernel_page_directory();
557 for(
int i = 0; i < 1024; i++) {
559 qemu_log(
"[%d]: %x", i, pd[i]);
573 void map_pages_overlapping(physical_addr_t* page_directory,
size_t physical_start,
size_t virtual_start,
size_t size, uint32_t flags) {
583 size_t nth1 = virtual_start / PAGE_SIZE;
584 size_t nth2 = (virtual_start + size) / PAGE_SIZE;
586 size_t pages_to_map = (nth2 - nth1) + 1;
588 qemu_log(
"Range: %x - %x", virtual_start, virtual_start + size);
590 qemu_note(
"Mapping %u pages to %x", pages_to_map, physical_start);
591 map_pages(page_directory, physical_start, virtual_start, pages_to_map * PAGE_SIZE, flags);
594 void unmap_pages_overlapping(physical_addr_t* page_directory,
size_t virtual,
size_t size) {
598 size_t nth1 =
virtual / PAGE_SIZE;
599 size_t nth2 = (
virtual + size) / PAGE_SIZE;
601 size_t pages_to_map = (nth2 - nth1) + 1;
603 for(
size_t i = 0; i < pages_to_map; i++) {
604 unmap_single_page(page_directory,
virtual + (i * PAGE_SIZE));
608 void phys_not_enough_memory() {
609 qemu_log(
"Not enough memory!");
Основные определения ядра
void * memset(void *ptr, char value, size_t num)
Заполнение массива указанными символами