return(mem_end);
}
+/*
+ * _pae_identity_map() - Add an identity mapped memory region to a PAE page directory
+ *
+ * SYNOPSIS
+ * void _pae_identity_map(pdpt_t *pd, u64_t from, u64_t to, page_attrs_t attrs);
+ *
+ * DESCRIPTION
+ * The _pae_identity_map() function creates an identity mapping for the linear memory region
+ * [`from', `to') in the page directory pointed to by `pd' - that means, `to' is the first
+ * address outside of the mapped region. The page attributes of the mapping will be set
+ * according to the value passed in `attrs'.
+ * This function will only map pages from the low 4GB of memory.
+ *
+ * RETURN VALUE
+ * void
+ *
+ * ERRORS
+ * This function does not signal any errors.
+ */
+static void _pae_identity_map(pdpt_t *pd, u64_t from, u64_t to, page_attrs_t attrs)
+{
+ u64_t addr;
+
+ /* the way PAE works we can access 64G of physical memory, but we can still
+ * only address 4G at a time, i.e. in each page directory. So stop at 4G. */
+
+ for(addr = from & ~(PAGE_SIZE_BIG - 1); addr < to && addr < 0x100000000LL; addr += PAGE_SIZE_BIG) {
+ pae_page_table_t *pt;
+ u64_t idx;
+
+ idx = (addr >> 30) & 0x3;
+
+ if(!pd->pdpt_entries[idx]) {
+ pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
+ pd->pdpt_entries[idx] = (u32_t)pt | PAGE_ATTR_PRESENT;
+ } else {
+ /* FIXME: I'm pretty sure this will break on Intel64 */
+ pt = (pae_page_table_t*)((u32_t)pd->pdpt_entries[idx] ^ 0x1);
+ }
+
+ pt->ppt_entries[(addr >> 21) & 0x1ff] = addr | attrs;
+ }
+
+ return;
+}
+
+/*
+ * _legacy_identity_map() - Add an identity mapped memory region to an IA-32 page directory
+ *
+ * SYNOPSIS
+ * void _legacy_identity_map(page_table_t *pd, u64_t from, u64_t to, page_attrs_t attrs);
+ *
+ * DESCRIPTION
+ * The _legacy_identity_map() function creates an identity mapping for the linear memory region
+ * [`from', `to') in the page directory pointed to by `pd' - that means, `to' is the first
+ * address outside of the mapped region. The page attributes of the mapping will be set
+ * according to the value passed in `attrs'.
+ *
+ * RETURN VALUE
+ * void
+ *
+ * ERRORS
+ * This function does not signal any errors.
+ */
+static void _legacy_identity_map(page_table_t *pd, u64_t from, u64_t to, page_attrs_t attrs)
+{
+ u64_t addr;
+
+ for(addr = from & ~(PAGE_SIZE_LARGE - 1); addr < to; addr += PAGE_SIZE_LARGE) {
+ pd->pt_entries[addr >> 22] = (u32_t)addr | attrs;
+ }
+
+ return;
+}
+
+/*
+ * _identity_map() - Add an identity mapped memory region to a page directory
+ *
+ * SYNOPSIS
+ * void _identity_map(void *cr3, struct multiboot_info *info);
+ *
+ * DESCRIPTION
+ * The _identity_map() function will create an identity mapping for the memory map
+ * provided in the multiboot info pointed to by `info' within the page directory
+ * pointed to by `cr3'. The identity mapping will be created using pages that are
+ * as big as possible for the supported paging modes.
+ *
+ * RETURN VALUE
+ * void
+ *
+ * ERRORS
+ * This function does not signal any errors.
+ */
+static void _identity_map(void *cr3, struct multiboot_info *info)
+{
+ struct memory_map *mmap;
+
+ for(mmap = (struct memory_map*)info->mmap_addr;
+ (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+ mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+ page_attrs_t attrs;
+
+#if FEATURE(DEBUG)
+ dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
+ mmap->addr, mmap->addr + mmap->len, mmap->type);
+#endif /* FEATURE(DEBUG) */
+
+ /* FIXME: Memory in the region 0x100000:&_mem_start should NOT be writable! */
+ attrs = PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
+
+ /* disable caching on reserved memory areas */
+ if(mmap->type != MEM_AVAILABLE) {
+ /* the frames are already marked as in-use, so we'll just leave it at that */
+ attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
+ }
+
+ switch(_pg_flags & PG_MODE_MASK) {
+ case PG_MODE_LEGACY: {
+ _legacy_identity_map((page_table_t*)cr3, mmap->addr, mmap->addr + mmap->len, attrs);
+ break;
+ }
+
+ case PG_MODE_PAE: {
+ _pae_identity_map((pdpt_t*)cr3, mmap->addr, mmap->addr + mmap->len, attrs);
+ break;
+ }
+
+ case PG_MODE_INTEL64:
+ case PG_MODE_IA32E:
+ PANIC("How did I get here?");
+ }
+ }
+
+ return;
+}
+
+/*
+ * _clear_available_frames() - Mark available memory as unused in the frame map
+ *
+ * SYNOPSIS
+ * void _clear_unused_frames(struct multiboot_info *info);
+ *
+ * DESCRIPTION
+ * The _clear_available_frames() function will iterate over the memory map from the multiboot
+ * information pointed to by `info', and mark all frames of memory regions that are available
+ * for general use as not-in-use in the frame map.
+ *
+ * RETURN VALUE
+ * void
+ *
+ * ERRORS
+ * This function does not signal any errors.
+ */
+void _clear_unused_frames(struct multiboot_info *info)
+{
+ struct memory_map *mmap;
+
+ for(mmap = (struct memory_map*)info->mmap_addr;
+ (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+ mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+ u64_t addr;
+
+ if(mmap->type != MEM_AVAILABLE) {
+ continue;
+ }
+
+ for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
+ _frame_clear(addr);
+ }
+ }
+
+ return;
+}
+
/*
* pg_init() - Initialize kernel page directory
*
*/
void* pg_init(struct multiboot_info *info)
{
- struct memory_map *mmap;
u64_t mem_size;
u64_t i;
u32_t cr3;
_mem_start = (u32_t)&_mem_start + sizeof(u32_t);
- mmap = (struct memory_map*)info->mmap_addr;
mem_size = 0;
cr3 = 0;
dbg_printf("Allocating %lluKB for the frame set\n", (mem_size >> 15) / 1024);
#endif /* FEATURE(DEBUG) */
- /* FIXME: memory size is not good evidence of PAE capability */
+ /*
+ * Figure out if we should be using PAE paging or not. Besides looking at the amount
+ * of available memory, the kernel should probably also see what cpuid has to say about
+ * PAE availability. It's not unreasonable to expect PAE to be supported though, since
+ * it has been around since the Pentium Pro (with the exception of some Pentium Ms).
+ */
if(mem_size < 0x100000000L) {
/* we have less than 4G of memory, no need for PAE */
_pg_flags = PG_MODE_LEGACY;
_pg_flags = PG_MODE_PAE;
}
- _nframes = mem_size >> 17; /* 12 for the page size and 5 for the number of frames per u32_t */
+ /*
+ * Allocate linear memory for the frame map. The size of the frame map is determined by
+ * dividing the memory size by the smallest possible page size (4096) and then by the number
+ * of bits in a u32_t. I recon it's not really necessary to do a shift instead of a division
+ * here since this code will only be executed once and the compiler is going to optimize it
+ * anyways, but oh well.
+ */
+ _nframes = mem_size >> 17;
_frame_map = _phys_alloc(_nframes << 2, 4);
/* first mark all frames as used - we will later clear those that are actually available */
PANIC("IA-32e mode paging not yet supported\n");
}
- for(mmap = (struct memory_map*)info->mmap_addr;
- (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
- mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
- page_attrs_t attrs;
- u64_t addr;
-
-#if FEATURE(DEBUG)
- dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
- mmap->addr, mmap->addr + mmap->len, mmap->type);
-#endif /* FEATURE(DEBUG) */
-
- /* FIXME: Memory in the region 0x100000:&_mem_start should NOT be writable! */
- attrs = PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
-
- /* disable caching on reserved memory areas */
- if(mmap->type != MEM_AVAILABLE) {
- attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
- /* frames in this region are already marked as used */
- } else {
- /* mark these frames as not-in-use */
-
- for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
- _frame_clear(addr);
- }
- }
-
- switch(_pg_flags & PG_MODE_MASK) {
- case PG_MODE_LEGACY: {
- page_table_t *pd;
-
- pd = (page_table_t*)cr3;
-
- for(addr = mmap->addr & ~(PAGE_SIZE_LARGE - 1);
- addr < (mmap->addr + mmap->len);
- addr += PAGE_SIZE_LARGE) {
- pd->pt_entries[addr >> 22] = (u32_t)addr | attrs;
- }
-
- break;
- }
-
- case PG_MODE_PAE: {
- pdpt_t *pd;
-
- pd = (pdpt_t*)cr3;
-
- /* the way PAE works we can access 64G of physical memory, but we can still
- * only address 4G at a time, i.e. in each page directory. So stop at 4G. */
- for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1);
- addr < (mmap->addr + mmap->len) && addr < 0x100000000LL;
- addr += PAGE_SIZE_BIG) {
- pae_page_table_t *pt;
-
- if(!pd->pdpt_entries[(addr >> 30) & 0x3]) {
- pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
- pd->pdpt_entries[(addr >> 30) & 0x3] = (u32_t)pt | PAGE_ATTR_PRESENT;
- } else {
- /* FIXME: I'm pretty sure this will break on Intel64 */
- pt = (pae_page_table_t*)((u32_t)pd->pdpt_entries[(addr >> 30) & 0x3] ^ 0x1);
- }
-
- pt->ppt_entries[(addr >> 21) & 0x1ff] = addr | attrs;
- }
- break;
- }
-
- case PG_MODE_INTEL64:
- case PG_MODE_IA32E:
- PANIC("How did I get here?");
- }
- }
+ _identity_map((void*)cr3, info);
+ _clear_unused_frames(info);
/* initialize the heap, since we'll need it now */
heap_init((void*)_mem_start, CONFIG_KERNEL_HEAP_SIZE);