/*
* This file is part of the Corax operating system.
- * Copyright (C) 2016 Matthias Kruk
+ * Copyright (C) 2016-2019 Matthias Kruk
*
* Corax is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
{
struct memory_map *mmap;
u64_t mem_size;
- u64_t map_end;
- u64_t mapped;
u32_t cr3;
u32_t i;
mem_size = 0;
cr3 = 0;
- /* TODO: get rid of this */
- while((u32_t)mmap < (info->mmap_addr + info->mmap_length)) {
- mem_size += mmap->len;
- mmap = (struct memory_map*)((u32_t)mmap + mmap->size + sizeof(mmap->size));
- }
+ /*
+ * We need to determine the size of the frame set. This should be the largest address
+ * that we can use for allocations.
+ */
- /* FIXME: memory size is not good evidence of PAE capability */
+ for(mmap = (struct memory_map*)info->mmap_addr;
+ (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+ mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+ u64_t limit;
+
+ if(mmap->type != MEM_AVAILABLE) {
+ continue;
+ }
+
+ limit = mmap->addr + mmap->len;
+
+ if(limit > mem_size) {
+ mem_size = limit;
+ }
+ }
+ dbg_printf("Last usable address: 0x%016llx\n", mem_size);
+ dbg_printf("Allocating %lluKB for the frame set\n", (mem_size >> 15) / 1024);
+
+ /* FIXME: memory size is not good evidence of PAE capability */
if(mem_size < 0x100000000L) {
/* we have less than 4G of memory, no need for PAE */
_pg_flags = PG_MODE_LEGACY;
- map_end = mem_size;
} else {
/* TODO: check if IA-32e paging is supported */
_pg_flags = PG_MODE_PAE;
- map_end = 0x100000000L;
}
- mapped = 0;
- /* FIXME: We're including memory holes between regions in the calculation.
- * This WILL not end well */
- _nframes = mem_size >> 17;
+ _nframes = mem_size >> 17; /* 12 for the page size and 5 for the number of frames per u32_t */
_frame_map = _phys_alloc(_nframes << 2, 4);
+ /* first mark all frames as used - we will later clear those that are actually available */
+ for(i = 0; i < mem_size; i += PAGE_SIZE) {
+ _frame_set(i);
+ }
+
+ /* allpocate the proper page directory type for the kernel */
switch(_pg_flags & PG_MODE_MASK) {
case PG_MODE_LEGACY:
cr3 = (u32_t)_phys_alloc(sizeof(page_table_t), PAGE_ALIGN);
PANIC("IA-32e mode paging not yet supported\n");
}
- mmap = (struct memory_map*)info->mmap_addr;
- while((u32_t)mmap < (info->mmap_addr + info->mmap_length)) {
+ for(mmap = (struct memory_map*)info->mmap_addr;
+ (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+ mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
u32_t attrs;
u64_t addr;
-#if 0
+#if 1
dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
mmap->addr, mmap->addr + mmap->len, mmap->type);
#endif
/* disable caching on reserved memory areas */
if(mmap->type != MEM_AVAILABLE) {
attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
+ /* frames in this region are already marked as used */
+ } else {
+ /* mark these frames as not-in-use */
- /* mark page frames in this region as used */
- for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
- _frame_set(addr);
+ for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
+ _frame_clear(addr);
}
- }
+ }
switch(_pg_flags & PG_MODE_MASK) {
case PG_MODE_LEGACY: {
/* the way PAE works we can access 64G of physical memory, but we can still
* only address 4G at a time, i.e. in each page directory. So stop at 4G. */
- for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1); addr < (mmap->addr + mmap->len) && addr < 0x100000000LL; addr += PAGE_SIZE_BIG) {
+ for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1);
+ addr < (mmap->addr + mmap->len) && addr < 0x100000000LL;
+ addr += PAGE_SIZE_BIG) {
pae_page_table_t *pt;
if(!pd->pdpt_entries[(addr >> 30) & 0x3]) {
case PG_MODE_IA32E:
PANIC("How did I get here?");
}
- mmap = (struct memory_map*)((u32_t)mmap + mmap->size + sizeof(mmap->size));
}
- /*
- switch(_pg_flags & PG_MODE_MASK) {
- default:
- _pg_flags = PG_MODE_LEGACY;
- case PG_MODE_LEGACY: {
- page_table_t *pt;
-
- pt = (page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
-
- while(mapped < map_end) {
- pt->pt_entries[mapped >> 22] = (u32_t)mapped | PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
- mapped += PAGE_SIZE_LARGE;
- }
-
- cr3 = (u32_t)pt;
- break;
- }
- case PG_MODE_PAE: {
- pdpt_t *pd;
- pae_page_table_t *pt;
-
- pd = (pdpt_t*)_phys_alloc(sizeof(pdpt_t), PDPT_ALIGN);
- while(mapped < map_end) {
- if(!(mapped & (PAGE_SIZE_HUGE - 1))) {
- pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
- pd->pdpt_entries[(mapped >> 30) & 0x3] = (u32_t)pt | PAGE_ATTR_PRESENT;
- }
-
- pt->ppt_entries[(mapped >> 21) & 0x1ff] = mapped | PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
- mapped += PAGE_SIZE_BIG;
- }
-
- cr3 = (u32_t)pd;
- break;
- }
- case PG_MODE_INTEL64:
- case PG_MODE_IA32E: {
- PANIC("IA-32e mode paging not yet supported\n");
- break;
- }
- }
- */
/* mark all page frames from 0x0 to the end of the last kernel page dir/table as used */
-
for(i = 0; i < _mem_start; i += PAGE_SIZE) {
_frame_set(i);
}
- /* mark frames in reserved areas from GRUB-provided memory map as used and not-cachable */
- /*
- mmap = (struct memory_map*)info->mmap_addr;
- while((u32_t)mmap < (info->mmap_addr + info->mmap_length)) {
- if(mmap->type != MEM_AVAILABLE) {
- u64_t addr;
-
- for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
- _frame_set(addr);
-
- if(!(addr & (PAGE_SIZE_BIG - 1))) {
- switch(_pg_flags & PG_MODE_MASK) {
- case PG_MODE_LEGACY:
- ((page_table_t*)cr3)->pt_entries[addr >> 22] |= PAGE_ATTR_CACHE_DISABLE | PAGE_ATTR_WRITE_THRU;
- break;
- case PG_MODE_PAE: {
- pae_page_table_t *pt;
-
- pt = (pae_page_table_t*)((u32_t)((pdpt_t*)cr3)->pdpt_entries[(addr >> 30) & 0x3] ^ 0x1);
- pt->ppt_entries[(addr >> 21) & 0x1ff] |= PAGE_ATTR_CACHE_DISABLE | PAGE_ATTR_WRITE_THRU;
- break;
- }
- default:
- case PG_MODE_IA32E:
- case PG_MODE_INTEL64:
- PANIC("IA-32e mode paging not yet supported\n");
- }
- }
- }
- }
-
- mmap = (struct memory_map*)((u32_t)mmap + mmap->size + sizeof(mmap->size));
- }
- */
_kernel_cr3 = (u32_t)cr3;
dbg_printf("Enabling %s paging\n", _str_pg_mode[_pg_flags & PG_MODE_MASK]);