return(NULL);
}
+void pg_frame_free(void *addr)
+{
+ _frame_clear((u32_t)addr);
+
+ return;
+}
+
+
#if FEATURE(DEBUG)
static void _pg_dir_debug_regions(pg_dir_t *pd)
{
return;
}
+
+static void _pg_dir_debug_arch(pg_dir_t *pd)
+{
+ switch(_pg_flags & PG_MODE_MASK) {
+ case PG_MODE_LEGACY: {
+ struct page_table *dir;
+ u32_t i;
+
+ dir = (struct page_table*)pd->pd_base;
+
+ for(i = 0; i < 1024; i++) {
+ if(!(dir->pt_entries[i] & PAGE_ATTR_PRESENT)) {
+ continue;
+ }
+
+ dbg_printf("cr3[%04u] = 0x%08x\n", i, dir->pt_entries[i]);
+ if(!(dir->pt_entries[i] & PAGE_ATTR_SIZE)) {
+ struct page_table *tbl;
+ u32_t j;
+
+ tbl = (struct page_table*)(dir->pt_entries[i] & 0xfffff000);
+
+ for(j = 0; j < 1024; j++) {
+ if(tbl->pt_entries[j] & PAGE_ATTR_PRESENT) {
+ dbg_printf("cr3[%04u][%04u] = 0x%08x\n", i, j, tbl->pt_entries[j]);
+ }
+ }
+ }
+ }
+
+ break;
+ }
+
+ case PG_MODE_PAE:
+ break;
+ }
+
+ return;
+}
#endif /* FEATURE(DEBUG) */
void* pg_init(struct multiboot_info *info)
return((void*)(cr3 | _pg_flags));
}
+int _pg_dir_kstack_map(struct pagedir *pgdir)
+{
+ int ret_val;
+ void *frame;
+
+ ret_val = -ENOMEM;
+ frame = pg_frame_alloc_end();
+
+ if(ret_val) {
+ u32_t attrs;
+ void* vaddr;
+
+ vaddr = CONFIG_KERNEL_STACK_BASE;
+ attrs = PAGE_ATTR_PRESENT | PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC;
+
+ ret_val = pg_dir_map(pgdir, frame, vaddr, PAGE_SIZE, attrs);
+
+ if(ret_val < 0) {
+ goto cleanup;
+ }
+
+ ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_KSTACK, attrs);
+
+ if(ret_val < 0) {
+ goto cleanup;
+ }
+
+ ret_val = 0;
+ }
+
+cleanup:
+ if(ret_val < 0) {
+ pg_dir_unmap(pgdir, CONFIG_KERNEL_STACK_BASE, PAGE_SIZE);
+
+ if(frame) {
+ pg_frame_free(frame);
+ }
+ }
+
+ return(ret_val);
+}
+
int pg_dir_create(pg_dir_t **dst)
{
int ret_val;
for(reg = _kernel_pgdir.pd_regions; reg; reg = reg->reg_next) {
switch(reg->reg_type) {
case REGION_TEXT:
+ case REGION_BSS:
+ case REGION_DATA:
+ case REGION_RODATA:
+ case REGION_HEAP:
dbg_printf("Mapping region %02x at 0x%08x:%08x\n",
reg->reg_type, reg->reg_base, reg->reg_size);
}
}
+ /* allocate the kernel stack */
+ ret_val = _pg_dir_kstack_map(dir);
+
+#if FEATURE(DEBUG)
+ _pg_dir_debug_regions(dir);
+ _pg_dir_debug_arch(dir);
+#endif /* FEATURE(DEBUG) */
+
if(ret_val) {
/* couldn't create the page dir - free allocations */
kfree(dir);
return(ret_val);
}
-static int _pg_dir_map_legacy(page_table_t *pd, u32_t paddr, u32_t vaddr, u32_t size, const u32_t flags)
+static int _pg_dir_map_legacy(page_table_t *pd, u32_t paddr, u32_t vaddr,
+ u32_t size, const u32_t flags)
{
int ret_val;
return(ret_val);
}
-static int _pg_dir_map_pae(pdpt_t *pd, u64_t paddr, u64_t vaddr, u32_t size, const u32_t flags)
+static int _pg_dir_map_pae(pdpt_t *pd, u64_t paddr, u64_t vaddr,
+ u32_t size, const u32_t flags)
{
int ret_val;
return(ret_val);
}
-int pg_dir_map(pg_dir_t *pd, const void *phys, const void *virt, const u32_t size, const u32_t flags)
+int pg_dir_map(pg_dir_t *pd, const void *phys, const void *virt,
+ const u32_t size, const u32_t flags)
{
int ret_val;
u32_t asize;
switch(_pg_flags & PG_MODE_MASK) {
case PG_MODE_LEGACY:
ret_val = _pg_dir_map_legacy((struct page_table*)pd->pd_base,
- (u32_t)virt, (u32_t)phys,
+ (u32_t)phys, (u32_t)virt,
asize, flags);
break;
case PG_MODE_PAE:
ret_val = _pg_dir_map_pae((struct pdpt*)pd->pd_base,
- (unsigned)virt, (unsigned)phys,
+ (unsigned)phys, (unsigned)virt,
asize, flags);
break;
return(ret_val);
}
+int pg_dir_unmap(pg_dir_t *pd, const void *base, const u32_t size)
+{
+ return(-ENOSYS);
+}
+
static int _pg_dir_add_region(pg_dir_t *pd, void *base, u32_t size, u32_t type, u32_t attrs)
{
struct region *reg;