/*
* This file is part of the Corax operating system.
- * Copyright (C) 2016-2019 Matthias Kruk
+ * Copyright (C) 2016-2020 Matthias Kruk
*
* Corax is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
static struct pagedir _kernel_pgdir;
static const char *_str_pg_mode[] = {
- "legacy IA-32 mode",
- "PAE mode",
- "IA-32e mode",
- "Intel64 mode"
+ "legacy IA-32 mode",
+ "PAE mode",
+ "IA-32e mode",
+ "Intel64 mode"
};
-static int _pg_dir_add_region(pg_dir_t*, void*, u32_t, u32_t, u32_t, u32_t);
+static int _pg_dir_add_region(pg_dir_t*, void*, u32_t, region_type_t, u32_t, region_opts_t);
int _pg_dir_vpxlate(pg_dir_t*, u32_t, u32_t*);
int _pg_dir_pagesize(pg_dir_t*, u32_t, u32_t*);
int _pg_dir_xfer(pg_dir_t*, void*, pg_dir_t*, void*, u32_t);
static void* _phys_alloc(u32_t size, u32_t align)
{
- extern u32_t _mem_start;
- void *addr;
+ extern u32_t _mem_start;
+ void *addr;
- addr = NULL;
+ addr = NULL;
- if(align) {
- /* check if address needs alignment */
- if(_mem_start & (align - 1)) {
- _mem_start = (_mem_start & ~(align - 1)) + align;
- }
- }
+ if(align) {
+ /* check if address needs alignment */
+ if(_mem_start & (align - 1)) {
+ _mem_start = (_mem_start & ~(align - 1)) + align;
+ }
+ }
- addr = (void*)_mem_start;
- _mem_start += size;
+ addr = (void*)_mem_start;
+ _mem_start += size;
- memset(addr, 0, size);
+ memset(addr, 0, size);
- return(addr);
+ return(addr);
}
void* pg_frame_alloc_start(void)
/* find the 0 bit in _frame_map[frm] */
for(addr = frm << 17;
- addr < ((frm + 1) << 17);
- addr += PAGE_SIZE) {
+ addr < ((frm + 1) << 17);
+ addr += PAGE_SIZE) {
if(!_frame_get(addr)) {
_frame_set(addr);
return((void*)addr);
u32_t addr;
for(addr = frm << 17;
- addr > ((frm - 1) << 17);
- addr -= PAGE_SIZE) {
+ addr > ((frm - 1) << 17);
+ addr -= PAGE_SIZE) {
if(!_frame_get(addr)) {
_frame_set(addr);
return((void*)addr);
reg = pd->pd_regions[i];
dbg_printf("Region type %02x at 0x%08x:%08x\n",
- reg->reg_type, reg->reg_base, reg->reg_size);
+ reg->reg_type, reg->reg_base, reg->reg_size);
}
return;
}
dbg_printf("cr3[%01u][%03u][%03u] = 0x%016llx\n",
- i, j, k, ttbl->ppt_entries[k]);
+ i, j, k, ttbl->ppt_entries[k]);
}
}
}
void* pg_init(struct multiboot_info *info)
{
- struct memory_map *mmap;
- u64_t mem_size;
+ struct memory_map *mmap;
+ u64_t mem_size;
u64_t i;
- u32_t cr3;
+ u32_t cr3;
- _mem_start = (u32_t)&_mem_start + sizeof(u32_t);
- mmap = (struct memory_map*)info->mmap_addr;
- mem_size = 0;
+ _mem_start = (u32_t)&_mem_start + sizeof(u32_t);
+ mmap = (struct memory_map*)info->mmap_addr;
+ mem_size = 0;
cr3 = 0;
/*
*/
for(mmap = (struct memory_map*)info->mmap_addr;
- (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
- mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+ (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+ mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
u64_t limit;
if(mmap->type != MEM_AVAILABLE) {
dbg_printf("Allocating %lluKB for the frame set\n", (mem_size >> 15) / 1024);
#endif /* FEATURE(DEBUG) */
- /* FIXME: memory size is not good evidence of PAE capability */
- if(mem_size < 0x100000000L) {
- /* we have less than 4G of memory, no need for PAE */
- _pg_flags = PG_MODE_LEGACY;
- } else {
- /* TODO: check if IA-32e paging is supported */
- _pg_flags = PG_MODE_PAE;
- }
+ /* FIXME: memory size is not good evidence of PAE capability */
+ if(mem_size < 0x100000000L) {
+ /* we have less than 4G of memory, no need for PAE */
+ _pg_flags = PG_MODE_LEGACY;
+ } else {
+ /* TODO: check if IA-32e paging is supported */
+ _pg_flags = PG_MODE_PAE;
+ }
- _nframes = mem_size >> 17; /* 12 for the page size and 5 for the number of frames per u32_t */
- _frame_map = _phys_alloc(_nframes << 2, 4);
+ _nframes = mem_size >> 17; /* 12 for the page size and 5 for the number of frames per u32_t */
+ _frame_map = _phys_alloc(_nframes << 2, 4);
/* first mark all frames as used - we will later clear those that are actually available */
for(i = 0; i < mem_size; i += PAGE_SIZE) {
- _frame_set(i);
- }
+ _frame_set(i);
+ }
/* allocate the proper page directory type for the kernel */
- switch(_pg_flags & PG_MODE_MASK) {
+ switch(_pg_flags & PG_MODE_MASK) {
case PG_MODE_LEGACY:
- cr3 = (u32_t)_phys_alloc(sizeof(page_table_t), PAGE_ALIGN);
- break;
+ cr3 = (u32_t)_phys_alloc(sizeof(page_table_t), PAGE_ALIGN);
+ break;
case PG_MODE_PAE:
- cr3 = (u32_t)_phys_alloc(sizeof(pdpt_t), PDPT_ALIGN);
- break;
+ cr3 = (u32_t)_phys_alloc(sizeof(pdpt_t), PDPT_ALIGN);
+ break;
case PG_MODE_INTEL64:
case PG_MODE_IA32E:
- PANIC("IA-32e mode paging not yet supported\n");
- }
+ PANIC("IA-32e mode paging not yet supported\n");
+ }
for(mmap = (struct memory_map*)info->mmap_addr;
- (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
- mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
- u32_t attrs;
- u64_t addr;
+ (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+ mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+ u32_t attrs;
+ u64_t addr;
#if FEATURE(DEBUG)
- dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
- mmap->addr, mmap->addr + mmap->len, mmap->type);
+ dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
+ mmap->addr, mmap->addr + mmap->len, mmap->type);
#endif /* FEATURE(DEBUG) */
/* FIXME: Memory in the region 0x100000:&_mem_start should NOT be writable! */
- attrs = PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
+ attrs = PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
- /* disable caching on reserved memory areas */
- if(mmap->type != MEM_AVAILABLE) {
- attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
- /* frames in this region are already marked as used */
- } else {
+ /* disable caching on reserved memory areas */
+ if(mmap->type != MEM_AVAILABLE) {
+ attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
+ /* frames in this region are already marked as used */
+ } else {
/* mark these frames as not-in-use */
for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
- _frame_clear(addr);
- }
+ _frame_clear(addr);
+ }
}
- switch(_pg_flags & PG_MODE_MASK) {
- case PG_MODE_LEGACY: {
- page_table_t *pd;
+ switch(_pg_flags & PG_MODE_MASK) {
+ case PG_MODE_LEGACY: {
+ page_table_t *pd;
- pd = (page_table_t*)cr3;
+ pd = (page_table_t*)cr3;
- for(addr = mmap->addr & ~(PAGE_SIZE_LARGE - 1);
- addr < (mmap->addr + mmap->len);
- addr += PAGE_SIZE_LARGE) {
- pd->pt_entries[addr >> 22] = (u32_t)addr | attrs;
- }
+ for(addr = mmap->addr & ~(PAGE_SIZE_LARGE - 1);
+ addr < (mmap->addr + mmap->len);
+ addr += PAGE_SIZE_LARGE) {
+ pd->pt_entries[addr >> 22] = (u32_t)addr | attrs;
+ }
- break;
- }
+ break;
+ }
- case PG_MODE_PAE: {
- pdpt_t *pd;
+ case PG_MODE_PAE: {
+ pdpt_t *pd;
- pd = (pdpt_t*)cr3;
+ pd = (pdpt_t*)cr3;
- /* the way PAE works we can access 64G of physical memory, but we can still
- * only address 4G at a time, i.e. in each page directory. So stop at 4G. */
- for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1);
- addr < (mmap->addr + mmap->len) && addr < 0x100000000LL;
- addr += PAGE_SIZE_BIG) {
- pae_page_table_t *pt;
+ /* the way PAE works we can access 64G of physical memory, but we can still
+ * only address 4G at a time, i.e. in each page directory. So stop at 4G. */
+ for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1);
+ addr < (mmap->addr + mmap->len) && addr < 0x100000000LL;
+ addr += PAGE_SIZE_BIG) {
+ pae_page_table_t *pt;
- if(!pd->pdpt_entries[(addr >> 30) & 0x3]) {
- pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
- pd->pdpt_entries[(addr >> 30) & 0x3] = (u32_t)pt | PAGE_ATTR_PRESENT;
- } else {
- /* FIXME: I'm pretty sure this will break on Intel64 */
- pt = (pae_page_table_t*)((u32_t)pd->pdpt_entries[(addr >> 30) & 0x3] ^ 0x1);
- }
+ if(!pd->pdpt_entries[(addr >> 30) & 0x3]) {
+ pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
+ pd->pdpt_entries[(addr >> 30) & 0x3] = (u32_t)pt | PAGE_ATTR_PRESENT;
+ } else {
+ /* FIXME: I'm pretty sure this will break on Intel64 */
+ pt = (pae_page_table_t*)((u32_t)pd->pdpt_entries[(addr >> 30) & 0x3] ^ 0x1);
+ }
- pt->ppt_entries[(addr >> 21) & 0x1ff] = addr | attrs;
- }
- break;
- }
+ pt->ppt_entries[(addr >> 21) & 0x1ff] = addr | attrs;
+ }
+ break;
+ }
- case PG_MODE_INTEL64:
- case PG_MODE_IA32E:
- PANIC("How did I get here?");
- }
- }
+ case PG_MODE_INTEL64:
+ case PG_MODE_IA32E:
+ PANIC("How did I get here?");
+ }
+ }
/* initialize the heap, since we'll need it now */
heap_init((void*)_mem_start, CONFIG_KERNEL_HEAP_SIZE);
_kernel_pgdir.pd_base = (void*)cr3;
_pg_dir_add_region(&_kernel_pgdir, TEXT_BASE, TEXT_SIZE,
- REGION_TEXT, 0,
- REGION_KERNEL | REGION_SHARED);
+ REGION_TYPE_TEXT, 0,
+ REGION_OPT_KERNEL | REGION_OPT_SHARED);
_pg_dir_add_region(&_kernel_pgdir, RODATA_BASE, RODATA_SIZE,
- REGION_RODATA, PAGE_ATTR_NO_EXEC,
- REGION_KERNEL | REGION_SHARED);
+ REGION_TYPE_RODATA, PAGE_ATTR_NO_EXEC,
+ REGION_OPT_KERNEL | REGION_OPT_SHARED);
_pg_dir_add_region(&_kernel_pgdir, DATA_BASE, DATA_SIZE,
- REGION_DATA, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
- REGION_KERNEL | REGION_SHARED);
+ REGION_TYPE_DATA, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
+ REGION_OPT_KERNEL | REGION_OPT_SHARED);
_pg_dir_add_region(&_kernel_pgdir, BSS_BASE, BSS_SIZE,
- REGION_BSS, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
- REGION_KERNEL | REGION_SHARED);
+ REGION_TYPE_BSS, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
+ REGION_OPT_KERNEL | REGION_OPT_SHARED);
/* heap region also includes allocations from _phys_alloc() */
_pg_dir_add_region(&_kernel_pgdir, &_mem_start,
- _mem_start - (u32_t)&_mem_start + CONFIG_KERNEL_HEAP_SIZE,
- REGION_HEAP, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
- REGION_KERNEL | REGION_SHARED);
+ _mem_start - (u32_t)&_mem_start + CONFIG_KERNEL_HEAP_SIZE,
+ REGION_TYPE_HEAP, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
+ REGION_OPT_KERNEL | REGION_OPT_SHARED);
- /* mark all page frames from 0x0 to the end of the kernel heap as used */
- for(i = 0; i < _mem_start + CONFIG_KERNEL_HEAP_SIZE; i += PAGE_SIZE) {
- _frame_set(i);
- }
+ /* mark all page frames from 0x0 to the end of the kernel heap as used */
+ for(i = 0; i < _mem_start + CONFIG_KERNEL_HEAP_SIZE; i += PAGE_SIZE) {
+ _frame_set(i);
+ }
#if FEATURE(DEBUG)
_pg_dir_debug_regions(&_kernel_pgdir);
#endif /* FEATURE(DEBUG) */
- _kernel_cr3 = cr3;
+ _kernel_cr3 = cr3;
- dbg_printf("Enabling %s paging\n", _str_pg_mode[_pg_flags & PG_MODE_MASK]);
+ dbg_printf("Enabling %s paging\n", _str_pg_mode[_pg_flags & PG_MODE_MASK]);
- return((void*)(cr3 | _pg_flags));
+ return((void*)(cr3 | _pg_flags));
}
void* pg_dir_get_kstack(struct pagedir *pgdir)
for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
reg = pgdir->pd_regions[i];
- if(reg->reg_type == REGION_KSTACK) {
+ if(reg->reg_type == REGION_TYPE_KSTACK) {
u32_t virt;
u32_t phys;
for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
reg = pgdir->pd_regions[i];
- if(reg->reg_type == REGION_STACK) {
+ if(reg->reg_type == REGION_TYPE_STACK) {
ret_val = reg->reg_base;
break;
}
for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
reg = pgdir->pd_regions[i];
- if(reg->reg_type == REGION_STACK) {
+ if(reg->reg_type == REGION_TYPE_STACK) {
ret_val = (char*)reg->reg_base + reg->reg_size;
break;
}
int i;
for(stack = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
- if(pgdir->pd_regions[i]->reg_type == REGION_STACK) {
+ if(pgdir->pd_regions[i]->reg_type == REGION_TYPE_STACK) {
stack = pgdir->pd_regions[i];
break;
}
(unsigned long)new_aligned;
ret_val = pg_dir_map(pgdir, 0, new_aligned, map_size,
- stack->reg_attrs);
+ stack->reg_attrs);
if(!ret_val) {
stack->reg_base = new_aligned;
}
int pg_dir_memcpy(struct pagedir *ddir, void *dvaddr,
- struct pagedir *sdir, void *svaddr, u32_t bytes)
+ struct pagedir *sdir, void *svaddr, u32_t bytes)
{
int ret_val;
}
int _pg_dir_xfer(struct pagedir *ddir, void *dvaddr,
- struct pagedir *sdir, void *svaddr, u32_t bytes)
+ struct pagedir *sdir, void *svaddr, u32_t bytes)
{
void *dpaddr;
void *spaddr;
goto cleanup;
}
- ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_KSTACK, attrs,
- REGION_PRIV);
+ ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_TYPE_KSTACK, attrs,
+ REGION_OPT_PRIV);
if(ret_val < 0) {
dbg_printf("_pg_dir_add_region() failed with error %u\n", -ret_val);
goto cleanup;
}
- ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_STACK, attrs,
- REGION_PRIV);
+ ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_TYPE_STACK, attrs,
+ REGION_OPT_PRIV);
if(ret_val < 0) {
goto cleanup;
cleanup:
if(ret_val < 0) {
pg_dir_unmap(pgdir, (void*)((u32_t)CONFIG_KERNEL_STACK_BASE - PAGE_SIZE),
- PAGE_SIZE);
+ PAGE_SIZE);
if(frame) {
pg_frame_free(frame);
reg = pd->pd_regions[i];
switch(reg->reg_type) {
- case REGION_HEAP:
+ case REGION_TYPE_HEAP:
ret_val = (u32_t)reg->reg_base;
i = CONFIG_PAGING_DIR_MAXREGIONS;
break;
- case REGION_MMAP:
- case REGION_TEXT:
- case REGION_BSS:
- case REGION_DATA:
- case REGION_RODATA:
+ case REGION_TYPE_MMAP:
+ case REGION_TYPE_TEXT:
+ case REGION_TYPE_BSS:
+ case REGION_TYPE_DATA:
+ case REGION_TYPE_RODATA:
limit = ALIGN((u32_t)reg->reg_base + reg->reg_size,
- reg->reg_pgsize);
+ reg->reg_pgsize);
if(limit > ret_val) {
- ret_val = limit;
+ ret_val = limit;
}
break;
default:
- case REGION_STACK:
- case REGION_KSTACK:
+ case REGION_TYPE_STACK:
+ case REGION_TYPE_KSTACK:
break;
}
}
/* FIXME: Lock the page directory */
for(i = 0, heap_start = 0, ret_val = 0;
- i < CONFIG_PAGING_DIR_MAXREGIONS && ret_val > 0;
- i++) {
+ i < CONFIG_PAGING_DIR_MAXREGIONS && ret_val > 0;
+ i++) {
struct region *reg;
u32_t limit;
reg = pd->pd_regions[i];
switch(reg->reg_type) {
- case REGION_HEAP:
+ case REGION_TYPE_HEAP:
/* the page directory apparently already contains a heap */
ret_val = -EALREADY;
break;
- case REGION_STACK:
- case REGION_KSTACK:
+ case REGION_TYPE_STACK:
+ case REGION_TYPE_KSTACK:
/*
* Ignore stack segments in the calculation because they are supposed to
* be at the end of the address space, and not in front of the heap.
*/
break;
- case REGION_MMAP:
- case REGION_TEXT:
- case REGION_BSS:
- case REGION_DATA:
- case REGION_RODATA:
+ case REGION_TYPE_MMAP:
+ case REGION_TYPE_TEXT:
+ case REGION_TYPE_BSS:
+ case REGION_TYPE_DATA:
+ case REGION_TYPE_RODATA:
/*
* Calculate the address of the first byte after this region. It *should*
* already be aligned, but nevertheless align it again. 念の為.
*/
limit = ALIGN((u32_t)reg->reg_base + reg->reg_size,
- reg->reg_pgsize);
+ reg->reg_pgsize);
if(limit > heap_start) {
heap_start = limit;
/* allocate page frames from wherever */
ret_val = pg_dir_map(pd, NULL, (void*)heap_start,
- heap_size, attrs);
+ heap_size, attrs);
if(!ret_val) {
/* add a struct region so the memory is accounted for */
ret_val = _pg_dir_add_region(pd, (void*)heap_start, heap_size,
- REGION_HEAP, attrs, REGION_PRIV);
+ REGION_TYPE_HEAP, attrs, REGION_OPT_PRIV);
if(ret_val < 0) {
/* oops - ran out of kernel heap space */
dir = (pg_dir_t*)data;
switch(reg->reg_type) {
- case REGION_TEXT:
- case REGION_BSS:
- case REGION_DATA:
- case REGION_RODATA:
+ case REGION_TYPE_TEXT:
+ case REGION_TYPE_BSS:
+ case REGION_TYPE_DATA:
+ case REGION_TYPE_RODATA:
ret_val = pg_dir_map_region(dir, kdir, reg);
break;
break;
#if 0
- case REGION_DATA:
+ case REGION_TYPE_DATA:
ret_val = pg_dir_clone_region(dir, kdir, reg);
break;
- case REGION_TEXT:
- case REGION_RODATA:
- case REGION_HEAP:
+ case REGION_TYPE_TEXT:
+ case REGION_TYPE_RODATA:
+ case REGION_TYPE_HEAP:
/*
* The kernel's .text and .rodata region are mapped directly into the page
* directory since they cannot be modified or removed anyways.
*/
- case REGION_BSS:
+ case REGION_TYPE_BSS:
/*
* The way the interrupt handling is currently implemented, accesses to the
* _cpu structure and _kernel_cr3 are made within the context (i.e. using
*/
#if FEATURE(DEBUG)
dbg_printf("Mapping region %02x at 0x%08x:%08x (ATTR=%x)\n",
- reg->reg_type, reg->reg_base, reg->reg_size, attrs);
+ reg->reg_type, reg->reg_base, reg->reg_size, attrs);
#endif /* FEATURE(DEBUG) */
ret_val = pg_dir_map_region(dir, kdir, reg);
break;
#if 0
- case REGION_BSS:
+ case REGION_TYPE_BSS:
/*
* The .bss section contains the _kernel_cr3 symbol, which is necessary
* for the interrupt handling code to be able to switch to the kernel
* page directory. Alternatively, interrupt handlers could also turn
* off paging to access _kernel_cr3, though...
*/
- case REGION_DATA:
+ case REGION_TYPE_DATA:
/*
* FIXME: Duplicate the parent's .bss and .data, not the kernel's
*
if(ret_val >= 0) {
ret_val = _pg_dir_add_region(dir, reg->reg_base, reg->reg_size,
- reg->reg_type, attrs, reg->reg_flags);
+ reg->reg_type, attrs, reg->reg_opts);
if(ret_val >= 0) {
/* copy the contents of the pages */
pg_dir_memcpy(dir, reg->reg_base, &_kernel_pgdir, reg->reg_base,
- reg->reg_size);
+ reg->reg_size);
}
}
#if 0
/* map the vesa memory into the pagedir */
pg_dir_map(dir, (void*)0xb8000, (void*)0xb8000, 0x2000,
- PAGE_ATTR_PRESENT | PAGE_ATTR_WRITABLE);
+ PAGE_ATTR_PRESENT | PAGE_ATTR_WRITABLE);
#endif /* 0 */
/* allocate the kernel stack */
}
static int _pg_dir_map_legacy(page_table_t *pd, u32_t paddr, u32_t vaddr,
- u32_t size, const u32_t flags)
+ u32_t size, const u32_t flags)
{
int ret_val;
}
static int _pg_dir_map_pae(pdpt_t *pd, u64_t paddr, u64_t vaddr,
- u32_t size, const u32_t flags)
+ u32_t size, const u32_t flags)
{
int ret_val;
}
int pg_dir_map(pg_dir_t *pd, const void *phys, const void *virt,
- const u32_t size, const u32_t flags)
+ const u32_t size, const u32_t flags)
{
int ret_val;
u32_t asize;
switch(_pg_flags & PG_MODE_MASK) {
case PG_MODE_LEGACY:
ret_val = _pg_dir_map_legacy((struct page_table*)pd->pd_base,
- (u32_t)phys, (u32_t)virt,
- asize, flags);
+ (u32_t)phys, (u32_t)virt,
+ asize, flags);
break;
case PG_MODE_PAE:
ret_val = _pg_dir_map_pae((struct pdpt*)pd->pd_base,
- (unsigned)phys, (unsigned)virt,
- asize, flags);
+ (unsigned)phys, (unsigned)virt,
+ asize, flags);
break;
case PG_MODE_IA32E:
goto gtfo;
}
- if(reg->reg_flags & REGION_KERNEL) {
+ if(reg->reg_opts & REGION_OPT_KERNEL) {
/*
* Special case: kernel regions
*
* user-space.
*/
ret_val = pg_dir_map(dpd, reg->reg_base, reg->reg_base, reg->reg_size,
- (reg->reg_attrs | PAGE_ATTR_USER) & ~PAGE_ATTR_WRITABLE);
+ (reg->reg_attrs | PAGE_ATTR_USER) & ~PAGE_ATTR_WRITABLE);
if(ret_val < 0) {
goto gtfo;
}
} else {
for(vaddr = (u32_t)reg->reg_base;
- vaddr < ((u32_t)reg->reg_base + reg->reg_size);
- vaddr += reg->reg_pgsize) {
+ vaddr < ((u32_t)reg->reg_base + reg->reg_size);
+ vaddr += reg->reg_pgsize) {
u32_t paddr;
/* since the pages may not be continguous in memory, map each page separately */
}
ret_val = pg_dir_map(dpd, (void*)paddr, (void*)vaddr,
- reg->reg_size, reg->reg_attrs);
+ reg->reg_size, reg->reg_attrs);
if(ret_val < 0) {
break;
pg_dir_unmap(dpd, reg->reg_base, reg->reg_size);
} else {
/* mark region as shared and increase its refcount */
- reg->reg_flags |= REGION_SHARED;
+ reg->reg_opts |= REGION_OPT_SHARED;
reg->reg_refs++;
dpd->pd_regions[idx] = reg;
}
/* attempt to map the region */
ret_val = pg_dir_map(dpd, NULL, reg->reg_base,
- reg->reg_size, reg->reg_attrs | PAGE_ATTR_USER);
+ reg->reg_size, reg->reg_attrs | PAGE_ATTR_USER);
if(ret_val < 0) {
goto gtfo;
/* copy the contents of the region */
ret_val = pg_dir_memcpy(dpd, reg->reg_base,
- spd, reg->reg_base,
- reg->reg_size);
+ spd, reg->reg_base,
+ reg->reg_size);
if(ret_val >= 0) {
/* also keep track of the region */
ret_val = _pg_dir_add_region(dpd, reg->reg_base, reg->reg_size,
- reg->reg_type, reg->reg_attrs, reg->reg_flags | PAGE_ATTR_USER);
+ reg->reg_type, reg->reg_attrs | PAGE_ATTR_USER,
+ reg->reg_opts);
}
if(ret_val < 0) {
}
static int _pg_dir_add_region(pg_dir_t *pd, void *base, u32_t size,
- u32_t type, u32_t attrs, u32_t flags)
+ region_type_t type, u32_t attrs, region_opts_t opts)
{
struct region *reg;
int ret_val;
reg->reg_pgsize = PAGE_SIZE;
reg->reg_type = type;
reg->reg_attrs = attrs;
- reg->reg_flags = flags;
+ reg->reg_opts = opts;
reg->reg_refs = 1;
pd->pd_regions[i] = reg;
int i;
for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
- if(pd->pd_regions[i]->reg_type == REGION_HEAP) {
+ if(pd->pd_regions[i]->reg_type == REGION_TYPE_HEAP) {
ret_val = pd->pd_regions[i]->reg_base;
break;
}
/* special case where the heap is not touched */
if(!increment) {
for(i = 0, ret_val = NULL; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
- if(pd->pd_regions[i]->reg_type == REGION_HEAP) {
+ if(pd->pd_regions[i]->reg_type == REGION_TYPE_HEAP) {
ret_val = pd->pd_regions[i]->reg_base +
pd->pd_regions[i]->reg_size;
break;
}
for(heap = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
- if(pd->pd_regions[i]->reg_type == REGION_HEAP) {
+ if(pd->pd_regions[i]->reg_type == REGION_TYPE_HEAP) {
heap = pd->pd_regions[i];
break;
}
if(pgdir && dst) {
u32_t attrs;
- u32_t rflags;
+ region_opts_t opts;
void* vaddr;
void* paddr;
attrs |= PAGE_ATTR_USER;
}
- rflags = REGION_PRIV;
+ opts = REGION_OPT_PRIV;
if(flags & MAP_PHYS) {
/*
vaddr = addr;
paddr = addr;
- rflags = REGION_SHARED;
+ opts = REGION_OPT_SHARED;
}
/*
if(!ret_val) {
/* also account for the new region */
ret_val = _pg_dir_add_region(pgdir, addr, size,
- REGION_MMAP, 0,
- rflags);
+ REGION_TYPE_MMAP, 0,
+ opts);
/* or unmap if it can't be accounted for */
if(ret_val < 0) {