]> git.corax.cc Git - corax/commitdiff
kernel/arch: Add region_type_t and region_opts_t types to describe the type and optio...
authorMatthias Kruk <m@m10k.eu>
Fri, 31 Jul 2020 16:22:32 +0000 (01:22 +0900)
committerMatthias Kruk <m@m10k.eu>
Fri, 31 Jul 2020 16:22:32 +0000 (01:22 +0900)
kernel/arch/paging.c
kernel/core/process.c
kernel/include/arch.h

index 0fec7d17f78199a89155d638b01e3e5f591db32f..b0c9fcead8169c8e92b3b083cd53c83ed7a61b67 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * This file is part of the Corax operating system.
- * Copyright (C) 2016-2019 Matthias Kruk
+ * Copyright (C) 2016-2020 Matthias Kruk
  *
  * Corax is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -54,13 +54,13 @@ static u32_t _nframes;
 static struct pagedir _kernel_pgdir;
 
 static const char *_str_pg_mode[] = {
-    "legacy IA-32 mode",
-    "PAE mode",
-    "IA-32e mode",
-    "Intel64 mode"
+       "legacy IA-32 mode",
+       "PAE mode",
+       "IA-32e mode",
+       "Intel64 mode"
 };
 
-static int _pg_dir_add_region(pg_dir_t*, void*, u32_t, u32_t, u32_t, u32_t);
+static int _pg_dir_add_region(pg_dir_t*, void*, u32_t, region_type_t, u32_t, region_opts_t);
 int _pg_dir_vpxlate(pg_dir_t*, u32_t, u32_t*);
 int _pg_dir_pagesize(pg_dir_t*, u32_t, u32_t*);
 int _pg_dir_xfer(pg_dir_t*, void*, pg_dir_t*, void*, u32_t);
@@ -68,24 +68,24 @@ static int _pg_dir_heap_map(pg_dir_t*, u32_t);
 
 static void* _phys_alloc(u32_t size, u32_t align)
 {
-    extern u32_t _mem_start;
-    void *addr;
+       extern u32_t _mem_start;
+       void *addr;
 
-    addr = NULL;
+       addr = NULL;
 
-    if(align) {
-        /* check if address needs alignment */
-        if(_mem_start & (align - 1)) {
-            _mem_start = (_mem_start & ~(align - 1)) + align;
-        }
-    }
+       if(align) {
+               /* check if address needs alignment */
+               if(_mem_start & (align - 1)) {
+                       _mem_start = (_mem_start & ~(align - 1)) + align;
+               }
+       }
 
-    addr = (void*)_mem_start;
-    _mem_start += size;
+       addr = (void*)_mem_start;
+       _mem_start += size;
 
-    memset(addr, 0, size);
+       memset(addr, 0, size);
 
-    return(addr);
+       return(addr);
 }
 
 void* pg_frame_alloc_start(void)
@@ -99,8 +99,8 @@ void* pg_frame_alloc_start(void)
 
                        /* find the 0 bit in _frame_map[frm] */
                        for(addr = frm << 17;
-                               addr < ((frm + 1) << 17);
-                               addr += PAGE_SIZE) {
+                           addr < ((frm + 1) << 17);
+                           addr += PAGE_SIZE) {
                                if(!_frame_get(addr)) {
                                        _frame_set(addr);
                                        return((void*)addr);
@@ -121,8 +121,8 @@ void* pg_frame_alloc_end(void)
                        u32_t addr;
 
                        for(addr = frm << 17;
-                               addr > ((frm - 1) << 17);
-                               addr -= PAGE_SIZE) {
+                           addr > ((frm - 1) << 17);
+                           addr -= PAGE_SIZE) {
                                if(!_frame_get(addr)) {
                                        _frame_set(addr);
                                        return((void*)addr);
@@ -152,7 +152,7 @@ static void _pg_dir_debug_regions(pg_dir_t *pd)
                reg = pd->pd_regions[i];
 
                dbg_printf("Region type %02x at 0x%08x:%08x\n",
-                                  reg->reg_type, reg->reg_base, reg->reg_size);
+                          reg->reg_type, reg->reg_base, reg->reg_size);
        }
 
        return;
@@ -226,7 +226,7 @@ static void _pg_dir_debug_arch(pg_dir_t *pd)
                                                }
 
                                                dbg_printf("cr3[%01u][%03u][%03u] = 0x%016llx\n",
-                                                                  i, j, k, ttbl->ppt_entries[k]);
+                                                          i, j, k, ttbl->ppt_entries[k]);
                                        }
                                }
                        }
@@ -243,14 +243,14 @@ static void _pg_dir_debug_arch(pg_dir_t *pd)
 
 void* pg_init(struct multiboot_info *info)
 {
-    struct memory_map *mmap;
-    u64_t mem_size;
+       struct memory_map *mmap;
+       u64_t mem_size;
        u64_t i;
-    u32_t cr3;
+       u32_t cr3;
 
-    _mem_start = (u32_t)&_mem_start + sizeof(u32_t);
-    mmap = (struct memory_map*)info->mmap_addr;
-    mem_size = 0;
+       _mem_start = (u32_t)&_mem_start + sizeof(u32_t);
+       mmap = (struct memory_map*)info->mmap_addr;
+       mem_size = 0;
        cr3 = 0;
 
        /*
@@ -259,8 +259,8 @@ void* pg_init(struct multiboot_info *info)
         */
 
        for(mmap = (struct memory_map*)info->mmap_addr;
-               (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
-               mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+           (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+           mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
                u64_t limit;
 
                if(mmap->type != MEM_AVAILABLE) {
@@ -279,109 +279,109 @@ void* pg_init(struct multiboot_info *info)
        dbg_printf("Allocating %lluKB for the frame set\n", (mem_size >> 15) / 1024);
 #endif /* FEATURE(DEBUG) */
 
-    /* FIXME: memory size is not good evidence of PAE capability */
-    if(mem_size < 0x100000000L) {
-        /* we have less than 4G of memory, no need for PAE */
-        _pg_flags = PG_MODE_LEGACY;
-    } else {
-        /* TODO: check if IA-32e paging is supported */
-        _pg_flags = PG_MODE_PAE;
-    }
+       /* FIXME: memory size is not good evidence of PAE capability */
+       if(mem_size < 0x100000000L) {
+               /* we have less than 4G of memory, no need for PAE */
+               _pg_flags = PG_MODE_LEGACY;
+       } else {
+               /* TODO: check if IA-32e paging is supported */
+               _pg_flags = PG_MODE_PAE;
+       }
 
-    _nframes = mem_size >> 17; /* 12 for the page size and 5 for the number of frames per u32_t */
-    _frame_map = _phys_alloc(_nframes << 2, 4);
+       _nframes = mem_size >> 17; /* 12 for the page size and 5 for the number of frames per u32_t */
+       _frame_map = _phys_alloc(_nframes << 2, 4);
 
        /* first mark all frames as used - we will later clear those that are actually available */
        for(i = 0; i < mem_size; i += PAGE_SIZE) {
-        _frame_set(i);
-    }
+               _frame_set(i);
+       }
 
        /* allocate the proper page directory type for the kernel */
-    switch(_pg_flags & PG_MODE_MASK) {
+       switch(_pg_flags & PG_MODE_MASK) {
         case PG_MODE_LEGACY:
-            cr3 = (u32_t)_phys_alloc(sizeof(page_table_t), PAGE_ALIGN);
-            break;
+               cr3 = (u32_t)_phys_alloc(sizeof(page_table_t), PAGE_ALIGN);
+               break;
 
         case PG_MODE_PAE:
-            cr3 = (u32_t)_phys_alloc(sizeof(pdpt_t), PDPT_ALIGN);
-            break;
+               cr3 = (u32_t)_phys_alloc(sizeof(pdpt_t), PDPT_ALIGN);
+               break;
 
         case PG_MODE_INTEL64:
         case PG_MODE_IA32E:
-            PANIC("IA-32e mode paging not yet supported\n");
-    }
+               PANIC("IA-32e mode paging not yet supported\n");
+       }
 
        for(mmap = (struct memory_map*)info->mmap_addr;
-               (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
-               mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
-        u32_t attrs;
-        u64_t addr;
+           (void*)mmap < (void*)(info->mmap_addr + info->mmap_length);
+           mmap = (struct memory_map*)((void*)mmap + mmap->size + sizeof(mmap->size))) {
+               u32_t attrs;
+               u64_t addr;
 
 #if FEATURE(DEBUG)
-        dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
-                                  mmap->addr, mmap->addr + mmap->len, mmap->type);
+               dbg_printf("Region: 0x%016llx - 0x%016llx [%u]\n",
+                          mmap->addr, mmap->addr + mmap->len, mmap->type);
 #endif /* FEATURE(DEBUG) */
 
                /* FIXME: Memory in the region 0x100000:&_mem_start should NOT be writable! */
-        attrs = PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
+               attrs = PAGE_ATTR_SIZE | PAGE_ATTR_WRITABLE | PAGE_ATTR_PRESENT;
 
-        /* disable caching on reserved memory areas */
-        if(mmap->type != MEM_AVAILABLE) {
-            attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
-            /* frames in this region are already marked as used */
-        } else {
+               /* disable caching on reserved memory areas */
+               if(mmap->type != MEM_AVAILABLE) {
+                       attrs |= PAGE_ATTR_WRITE_THRU | PAGE_ATTR_CACHE_DISABLE;
+                       /* frames in this region are already marked as used */
+               } else {
                        /* mark these frames as not-in-use */
 
                        for(addr = mmap->addr; addr < (mmap->addr + mmap->len); addr += PAGE_SIZE) {
-                _frame_clear(addr);
-            }
+                               _frame_clear(addr);
+                       }
                }
 
-        switch(_pg_flags & PG_MODE_MASK) {
-            case PG_MODE_LEGACY: {
-                page_table_t *pd;
+               switch(_pg_flags & PG_MODE_MASK) {
+               case PG_MODE_LEGACY: {
+                       page_table_t *pd;
 
-                pd = (page_table_t*)cr3;
+                       pd = (page_table_t*)cr3;
 
-                for(addr = mmap->addr & ~(PAGE_SIZE_LARGE - 1);
-                                       addr < (mmap->addr + mmap->len);
-                                       addr += PAGE_SIZE_LARGE) {
-                    pd->pt_entries[addr >> 22] = (u32_t)addr | attrs;
-                }
+                       for(addr = mmap->addr & ~(PAGE_SIZE_LARGE - 1);
+                           addr < (mmap->addr + mmap->len);
+                           addr += PAGE_SIZE_LARGE) {
+                               pd->pt_entries[addr >> 22] = (u32_t)addr | attrs;
+                       }
 
-                break;
-            }
+                       break;
+               }
 
-            case PG_MODE_PAE: {
-                pdpt_t *pd;
+               case PG_MODE_PAE: {
+                       pdpt_t *pd;
 
-                pd = (pdpt_t*)cr3;
+                       pd = (pdpt_t*)cr3;
 
-                /* the way PAE works we can access 64G of physical memory, but we can still
-                 * only address 4G at a time, i.e. in each page directory. So stop at 4G. */
-                for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1);
-                                       addr < (mmap->addr + mmap->len) && addr < 0x100000000LL;
-                                       addr += PAGE_SIZE_BIG) {
-                    pae_page_table_t *pt;
+                       /* the way PAE works we can access 64G of physical memory, but we can still
+                        * only address 4G at a time, i.e. in each page directory. So stop at 4G. */
+                       for(addr = mmap->addr & ~(PAGE_SIZE_BIG - 1);
+                           addr < (mmap->addr + mmap->len) && addr < 0x100000000LL;
+                           addr += PAGE_SIZE_BIG) {
+                               pae_page_table_t *pt;
 
-                    if(!pd->pdpt_entries[(addr >> 30) & 0x3]) {
-                        pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
-                        pd->pdpt_entries[(addr >> 30) & 0x3] = (u32_t)pt | PAGE_ATTR_PRESENT;
-                    } else {
-                        /* FIXME: I'm pretty sure this will break on Intel64 */
-                        pt = (pae_page_table_t*)((u32_t)pd->pdpt_entries[(addr >> 30) & 0x3] ^ 0x1);
-                    }
+                               if(!pd->pdpt_entries[(addr >> 30) & 0x3]) {
+                                       pt = (pae_page_table_t*)_phys_alloc(PAGE_SIZE, PAGE_ALIGN);
+                                       pd->pdpt_entries[(addr >> 30) & 0x3] = (u32_t)pt | PAGE_ATTR_PRESENT;
+                               } else {
+                                       /* FIXME: I'm pretty sure this will break on Intel64 */
+                                       pt = (pae_page_table_t*)((u32_t)pd->pdpt_entries[(addr >> 30) & 0x3] ^ 0x1);
+                               }
 
-                    pt->ppt_entries[(addr >> 21) & 0x1ff] = addr | attrs;
-                }
-                break;
-            }
+                               pt->ppt_entries[(addr >> 21) & 0x1ff] = addr | attrs;
+                       }
+                       break;
+               }
 
-            case PG_MODE_INTEL64:
-            case PG_MODE_IA32E:
-                PANIC("How did I get here?");
-        }
-    }
+               case PG_MODE_INTEL64:
+               case PG_MODE_IA32E:
+                       PANIC("How did I get here?");
+               }
+       }
 
        /* initialize the heap, since we'll need it now */
        heap_init((void*)_mem_start, CONFIG_KERNEL_HEAP_SIZE);
@@ -389,37 +389,37 @@ void* pg_init(struct multiboot_info *info)
        _kernel_pgdir.pd_base = (void*)cr3;
 
        _pg_dir_add_region(&_kernel_pgdir, TEXT_BASE, TEXT_SIZE,
-                                          REGION_TEXT, 0,
-                                          REGION_KERNEL | REGION_SHARED);
+                          REGION_TYPE_TEXT, 0,
+                          REGION_OPT_KERNEL | REGION_OPT_SHARED);
        _pg_dir_add_region(&_kernel_pgdir, RODATA_BASE, RODATA_SIZE,
-                                          REGION_RODATA, PAGE_ATTR_NO_EXEC,
-                                          REGION_KERNEL | REGION_SHARED);
+                          REGION_TYPE_RODATA, PAGE_ATTR_NO_EXEC,
+                          REGION_OPT_KERNEL | REGION_OPT_SHARED);
        _pg_dir_add_region(&_kernel_pgdir, DATA_BASE, DATA_SIZE,
-                                          REGION_DATA, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
-                                          REGION_KERNEL | REGION_SHARED);
+                          REGION_TYPE_DATA, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
+                          REGION_OPT_KERNEL | REGION_OPT_SHARED);
        _pg_dir_add_region(&_kernel_pgdir, BSS_BASE, BSS_SIZE,
-                                          REGION_BSS, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
-                                          REGION_KERNEL | REGION_SHARED);
+                          REGION_TYPE_BSS, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
+                          REGION_OPT_KERNEL | REGION_OPT_SHARED);
        /* heap region also includes allocations from _phys_alloc() */
        _pg_dir_add_region(&_kernel_pgdir, &_mem_start,
-                                          _mem_start - (u32_t)&_mem_start + CONFIG_KERNEL_HEAP_SIZE,
-                                          REGION_HEAP, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
-                                          REGION_KERNEL | REGION_SHARED);
+                          _mem_start - (u32_t)&_mem_start + CONFIG_KERNEL_HEAP_SIZE,
+                          REGION_TYPE_HEAP, PAGE_ATTR_WRITABLE | PAGE_ATTR_NO_EXEC,
+                          REGION_OPT_KERNEL | REGION_OPT_SHARED);
 
-    /* mark all page frames from 0x0 to the end of the kernel heap as used */
-    for(i = 0; i < _mem_start + CONFIG_KERNEL_HEAP_SIZE; i += PAGE_SIZE) {
-        _frame_set(i);
-    }
+       /* mark all page frames from 0x0 to the end of the kernel heap as used */
+       for(i = 0; i < _mem_start + CONFIG_KERNEL_HEAP_SIZE; i += PAGE_SIZE) {
+               _frame_set(i);
+       }
 
 #if FEATURE(DEBUG)
        _pg_dir_debug_regions(&_kernel_pgdir);
 #endif /* FEATURE(DEBUG) */
 
-    _kernel_cr3 = cr3;
+       _kernel_cr3 = cr3;
 
-    dbg_printf("Enabling %s paging\n", _str_pg_mode[_pg_flags & PG_MODE_MASK]);
+       dbg_printf("Enabling %s paging\n", _str_pg_mode[_pg_flags & PG_MODE_MASK]);
 
-    return((void*)(cr3 | _pg_flags));
+       return((void*)(cr3 | _pg_flags));
 }
 
 void* pg_dir_get_kstack(struct pagedir *pgdir)
@@ -431,7 +431,7 @@ void* pg_dir_get_kstack(struct pagedir *pgdir)
        for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
                reg = pgdir->pd_regions[i];
 
-               if(reg->reg_type == REGION_KSTACK) {
+               if(reg->reg_type == REGION_TYPE_KSTACK) {
                        u32_t virt;
                        u32_t phys;
 
@@ -460,7 +460,7 @@ void* pg_dir_get_ustack(struct pagedir *pgdir)
        for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
                reg = pgdir->pd_regions[i];
 
-               if(reg->reg_type == REGION_STACK) {
+               if(reg->reg_type == REGION_TYPE_STACK) {
                        ret_val = reg->reg_base;
                        break;
                }
@@ -478,7 +478,7 @@ void* pg_dir_get_ustack_top(struct pagedir *pgdir)
        for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
                reg = pgdir->pd_regions[i];
 
-               if(reg->reg_type == REGION_STACK) {
+               if(reg->reg_type == REGION_TYPE_STACK) {
                        ret_val = (char*)reg->reg_base + reg->reg_size;
                        break;
                }
@@ -497,7 +497,7 @@ int pg_dir_grow_ustack(struct pagedir *pgdir, void *new_bottom)
        int i;
 
        for(stack = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
-               if(pgdir->pd_regions[i]->reg_type == REGION_STACK) {
+               if(pgdir->pd_regions[i]->reg_type == REGION_TYPE_STACK) {
                        stack = pgdir->pd_regions[i];
                        break;
                }
@@ -520,7 +520,7 @@ int pg_dir_grow_ustack(struct pagedir *pgdir, void *new_bottom)
                        (unsigned long)new_aligned;
 
                ret_val = pg_dir_map(pgdir, 0, new_aligned, map_size,
-                                stack->reg_attrs);
+                                    stack->reg_attrs);
 
                if(!ret_val) {
                        stack->reg_base = new_aligned;
@@ -541,7 +541,7 @@ gtfo:
 }
 
 int pg_dir_memcpy(struct pagedir *ddir, void *dvaddr,
-                                 struct pagedir *sdir, void *svaddr, u32_t bytes)
+                 struct pagedir *sdir, void *svaddr, u32_t bytes)
 {
        int ret_val;
 
@@ -638,7 +638,7 @@ int pg_dir_memset(struct pagedir *ddir, void *dvaddr,
 }
 
 int _pg_dir_xfer(struct pagedir *ddir, void *dvaddr,
-                                struct pagedir *sdir, void *svaddr, u32_t bytes)
+                struct pagedir *sdir, void *svaddr, u32_t bytes)
 {
        void *dpaddr;
        void *spaddr;
@@ -844,8 +844,8 @@ int _pg_dir_kstack_map(struct pagedir *pgdir)
                        goto cleanup;
                }
 
-               ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_KSTACK, attrs,
-                                                                        REGION_PRIV);
+               ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_TYPE_KSTACK, attrs,
+                                            REGION_OPT_PRIV);
 
                if(ret_val < 0) {
                        dbg_printf("_pg_dir_add_region() failed with error %u\n", -ret_val);
@@ -888,8 +888,8 @@ int _pg_dir_ustack_map(struct pagedir *pgdir)
                        goto cleanup;
                }
 
-               ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_STACK, attrs,
-                                                                        REGION_PRIV);
+               ret_val = _pg_dir_add_region(pgdir, vaddr, PAGE_SIZE, REGION_TYPE_STACK, attrs,
+                                            REGION_OPT_PRIV);
 
                if(ret_val < 0) {
                        goto cleanup;
@@ -901,7 +901,7 @@ int _pg_dir_ustack_map(struct pagedir *pgdir)
 cleanup:
        if(ret_val < 0) {
                pg_dir_unmap(pgdir, (void*)((u32_t)CONFIG_KERNEL_STACK_BASE - PAGE_SIZE),
-                                        PAGE_SIZE);
+                            PAGE_SIZE);
 
                if(frame) {
                        pg_frame_free(frame);
@@ -923,29 +923,29 @@ static void* _pg_dir_heap_start(pg_dir_t *pd)
                reg = pd->pd_regions[i];
 
                switch(reg->reg_type) {
-               case REGION_HEAP:
+               case REGION_TYPE_HEAP:
                        ret_val = (u32_t)reg->reg_base;
                        i = CONFIG_PAGING_DIR_MAXREGIONS;
 
                        break;
 
-               case REGION_MMAP:
-               case REGION_TEXT:
-               case REGION_BSS:
-               case REGION_DATA:
-               case REGION_RODATA:
+               case REGION_TYPE_MMAP:
+               case REGION_TYPE_TEXT:
+               case REGION_TYPE_BSS:
+               case REGION_TYPE_DATA:
+               case REGION_TYPE_RODATA:
                        limit = ALIGN((u32_t)reg->reg_base + reg->reg_size,
-                                                 reg->reg_pgsize);
+                                     reg->reg_pgsize);
 
                        if(limit > ret_val) {
-                           ret_val = limit;
+                               ret_val = limit;
                        }
 
                        break;
 
                default:
-               case REGION_STACK:
-               case REGION_KSTACK:
+               case REGION_TYPE_STACK:
+               case REGION_TYPE_KSTACK:
                        break;
                }
        }
@@ -980,38 +980,38 @@ static int _pg_dir_heap_map(pg_dir_t *pd, u32_t heap_size)
        /* FIXME: Lock the page directory */
 
        for(i = 0, heap_start = 0, ret_val = 0;
-               i < CONFIG_PAGING_DIR_MAXREGIONS && ret_val > 0;
-               i++) {
+           i < CONFIG_PAGING_DIR_MAXREGIONS && ret_val > 0;
+           i++) {
                struct region *reg;
                u32_t limit;
 
                reg = pd->pd_regions[i];
 
                switch(reg->reg_type) {
-               case REGION_HEAP:
+               case REGION_TYPE_HEAP:
                        /* the page directory apparently already contains a heap */
                        ret_val = -EALREADY;
                        break;
 
-               case REGION_STACK:
-               case REGION_KSTACK:
+               case REGION_TYPE_STACK:
+               case REGION_TYPE_KSTACK:
                        /*
                         * Ignore stack segments in the calculation because they are supposed to
                         * be at the end of the address space, and not in front of the heap.
                         */
                        break;
 
-               case REGION_MMAP:
-               case REGION_TEXT:
-               case REGION_BSS:
-               case REGION_DATA:
-               case REGION_RODATA:
+               case REGION_TYPE_MMAP:
+               case REGION_TYPE_TEXT:
+               case REGION_TYPE_BSS:
+               case REGION_TYPE_DATA:
+               case REGION_TYPE_RODATA:
                        /*
                         * Calculate the address of the first byte after this region. It *should*
                         * already be aligned, but nevertheless align it again. 念の為.
                         */
                        limit = ALIGN((u32_t)reg->reg_base + reg->reg_size,
-                                                 reg->reg_pgsize);
+                                     reg->reg_pgsize);
 
                        if(limit > heap_start) {
                                heap_start = limit;
@@ -1037,12 +1037,12 @@ static int _pg_dir_heap_map(pg_dir_t *pd, u32_t heap_size)
 
                        /* allocate page frames from wherever */
                        ret_val = pg_dir_map(pd, NULL, (void*)heap_start,
-                                                                heap_size, attrs);
+                                            heap_size, attrs);
 
                        if(!ret_val) {
                                /* add a struct region so the memory is accounted for */
                                ret_val = _pg_dir_add_region(pd, (void*)heap_start, heap_size,
-                                                                                        REGION_HEAP, attrs, REGION_PRIV);
+                                                            REGION_TYPE_HEAP, attrs, REGION_OPT_PRIV);
 
                                if(ret_val < 0) {
                                        /* oops - ran out of kernel heap space */
@@ -1071,10 +1071,10 @@ int _clone_kernel_region(pg_dir_t *kdir, region_t *reg, void *data)
        dir = (pg_dir_t*)data;
 
        switch(reg->reg_type) {
-       case REGION_TEXT:
-       case REGION_BSS:
-       case REGION_DATA:
-       case REGION_RODATA:
+       case REGION_TYPE_TEXT:
+       case REGION_TYPE_BSS:
+       case REGION_TYPE_DATA:
+       case REGION_TYPE_RODATA:
                ret_val = pg_dir_map_region(dir, kdir, reg);
                break;
 
@@ -1084,18 +1084,18 @@ int _clone_kernel_region(pg_dir_t *kdir, region_t *reg, void *data)
                break;
 
 #if 0
-       case REGION_DATA:
+       case REGION_TYPE_DATA:
                ret_val = pg_dir_clone_region(dir, kdir, reg);
                break;
 
-       case REGION_TEXT:
-       case REGION_RODATA:
-       case REGION_HEAP:
+       case REGION_TYPE_TEXT:
+       case REGION_TYPE_RODATA:
+       case REGION_TYPE_HEAP:
                /*
                 * The kernel's .text and .rodata region are mapped directly into the page
                 * directory since they cannot be modified or removed anyways.
                 */
-       case REGION_BSS:
+       case REGION_TYPE_BSS:
                /*
                 * The way the interrupt handling is currently implemented, accesses to the
                 * _cpu structure and _kernel_cr3 are made within the context (i.e. using
@@ -1118,21 +1118,21 @@ int _clone_kernel_region(pg_dir_t *kdir, region_t *reg, void *data)
                 */
 #if FEATURE(DEBUG)
                dbg_printf("Mapping region %02x at 0x%08x:%08x (ATTR=%x)\n",
-                                  reg->reg_type, reg->reg_base, reg->reg_size, attrs);
+                          reg->reg_type, reg->reg_base, reg->reg_size, attrs);
 #endif /* FEATURE(DEBUG) */
 
                ret_val = pg_dir_map_region(dir, kdir, reg);
 
                break;
 #if 0
-       case REGION_BSS:
+       case REGION_TYPE_BSS:
                /*
                 * The .bss section contains the _kernel_cr3 symbol, which is necessary
                 * for the interrupt handling code to be able to switch to the kernel
                 * page directory. Alternatively, interrupt handlers could also turn
                 * off paging to access _kernel_cr3, though...
                 */
-       case REGION_DATA:
+       case REGION_TYPE_DATA:
                /*
                 * FIXME: Duplicate the parent's .bss and .data, not the kernel's
                 *
@@ -1150,12 +1150,12 @@ int _clone_kernel_region(pg_dir_t *kdir, region_t *reg, void *data)
 
                if(ret_val >= 0) {
                        ret_val = _pg_dir_add_region(dir, reg->reg_base, reg->reg_size,
-                                                                                reg->reg_type, attrs, reg->reg_flags);
+                                                    reg->reg_type, attrs, reg->reg_opts);
 
                        if(ret_val >= 0) {
                                /* copy the contents of the pages */
                                pg_dir_memcpy(dir, reg->reg_base, &_kernel_pgdir, reg->reg_base,
-                                                         reg->reg_size);
+                                             reg->reg_size);
                        }
                }
 
@@ -1210,7 +1210,7 @@ int pg_dir_create(pg_dir_t **dst)
 #if 0
                /* map the vesa memory into the pagedir */
                pg_dir_map(dir, (void*)0xb8000, (void*)0xb8000, 0x2000,
-                                  PAGE_ATTR_PRESENT | PAGE_ATTR_WRITABLE);
+                          PAGE_ATTR_PRESENT | PAGE_ATTR_WRITABLE);
 #endif /* 0 */
 
                /* allocate the kernel stack */
@@ -1238,7 +1238,7 @@ int pg_dir_create(pg_dir_t **dst)
 }
 
 static int _pg_dir_map_legacy(page_table_t *pd, u32_t paddr, u32_t vaddr,
-                                                         u32_t size, const u32_t flags)
+                             u32_t size, const u32_t flags)
 {
        int ret_val;
 
@@ -1326,7 +1326,7 @@ static int _pg_dir_map_legacy(page_table_t *pd, u32_t paddr, u32_t vaddr,
 }
 
 static int _pg_dir_map_pae(pdpt_t *pd, u64_t paddr, u64_t vaddr,
-                                                  u32_t size, const u32_t flags)
+                          u32_t size, const u32_t flags)
 {
        int ret_val;
 
@@ -1380,7 +1380,7 @@ static int _pg_dir_map_pae(pdpt_t *pd, u64_t paddr, u64_t vaddr,
 }
 
 int pg_dir_map(pg_dir_t *pd, const void *phys, const void *virt,
-                          const u32_t size, const u32_t flags)
+              const u32_t size, const u32_t flags)
 {
        int ret_val;
        u32_t asize;
@@ -1394,14 +1394,14 @@ int pg_dir_map(pg_dir_t *pd, const void *phys, const void *virt,
        switch(_pg_flags & PG_MODE_MASK) {
        case PG_MODE_LEGACY:
                ret_val = _pg_dir_map_legacy((struct page_table*)pd->pd_base,
-                                                                        (u32_t)phys, (u32_t)virt,
-                                                                        asize, flags);
+                                            (u32_t)phys, (u32_t)virt,
+                                            asize, flags);
                break;
 
        case PG_MODE_PAE:
                ret_val = _pg_dir_map_pae((struct pdpt*)pd->pd_base,
-                                                                 (unsigned)phys, (unsigned)virt,
-                                                                 asize, flags);
+                                         (unsigned)phys, (unsigned)virt,
+                                         asize, flags);
                break;
 
        case PG_MODE_IA32E:
@@ -1440,7 +1440,7 @@ int pg_dir_map_region(pg_dir_t *dpd, pg_dir_t *spd, region_t *reg)
                goto gtfo;
        }
 
-       if(reg->reg_flags & REGION_KERNEL) {
+       if(reg->reg_opts & REGION_OPT_KERNEL) {
                /*
                 * Special case: kernel regions
                 *
@@ -1452,15 +1452,15 @@ int pg_dir_map_region(pg_dir_t *dpd, pg_dir_t *spd, region_t *reg)
                 * user-space.
                 */
                ret_val = pg_dir_map(dpd, reg->reg_base, reg->reg_base, reg->reg_size,
-                                                        (reg->reg_attrs | PAGE_ATTR_USER) & ~PAGE_ATTR_WRITABLE);
+                                    (reg->reg_attrs | PAGE_ATTR_USER) & ~PAGE_ATTR_WRITABLE);
 
                if(ret_val < 0) {
                        goto gtfo;
                }
        } else {
                for(vaddr = (u32_t)reg->reg_base;
-                       vaddr < ((u32_t)reg->reg_base + reg->reg_size);
-                       vaddr += reg->reg_pgsize) {
+                   vaddr < ((u32_t)reg->reg_base + reg->reg_size);
+                   vaddr += reg->reg_pgsize) {
                        u32_t paddr;
 
                        /* since the pages may not be continguous in memory, map each page separately */
@@ -1471,7 +1471,7 @@ int pg_dir_map_region(pg_dir_t *dpd, pg_dir_t *spd, region_t *reg)
                        }
 
                        ret_val = pg_dir_map(dpd, (void*)paddr, (void*)vaddr,
-                                                                reg->reg_size, reg->reg_attrs);
+                                            reg->reg_size, reg->reg_attrs);
 
                        if(ret_val < 0) {
                                break;
@@ -1484,7 +1484,7 @@ int pg_dir_map_region(pg_dir_t *dpd, pg_dir_t *spd, region_t *reg)
                pg_dir_unmap(dpd, reg->reg_base, reg->reg_size);
        } else {
                /* mark region as shared and increase its refcount */
-               reg->reg_flags |= REGION_SHARED;
+               reg->reg_opts |= REGION_OPT_SHARED;
                reg->reg_refs++;
                dpd->pd_regions[idx] = reg;
        }
@@ -1514,7 +1514,7 @@ int pg_dir_clone_region(pg_dir_t *dpd, pg_dir_t *spd, region_t *reg)
 
        /* attempt to map the region */
        ret_val = pg_dir_map(dpd, NULL, reg->reg_base,
-                                                reg->reg_size, reg->reg_attrs | PAGE_ATTR_USER);
+                            reg->reg_size, reg->reg_attrs | PAGE_ATTR_USER);
 
        if(ret_val < 0) {
                goto gtfo;
@@ -1522,13 +1522,14 @@ int pg_dir_clone_region(pg_dir_t *dpd, pg_dir_t *spd, region_t *reg)
 
        /* copy the contents of the region */
        ret_val = pg_dir_memcpy(dpd, reg->reg_base,
-                                                       spd, reg->reg_base,
-                                                       reg->reg_size);
+                               spd, reg->reg_base,
+                               reg->reg_size);
 
        if(ret_val >= 0) {
                /* also keep track of the region */
                ret_val = _pg_dir_add_region(dpd, reg->reg_base, reg->reg_size,
-                                                                        reg->reg_type, reg->reg_attrs, reg->reg_flags | PAGE_ATTR_USER);
+                                            reg->reg_type, reg->reg_attrs | PAGE_ATTR_USER,
+                                            reg->reg_opts);
        }
 
        if(ret_val < 0) {
@@ -1573,7 +1574,7 @@ int pg_dir_foreach_region(pg_dir_t *pd, int (*func)(pg_dir_t*, region_t*, void*)
 }
 
 static int _pg_dir_add_region(pg_dir_t *pd, void *base, u32_t size,
-                             u32_t type, u32_t attrs, u32_t flags)
+                             region_type_t type, u32_t attrs, region_opts_t opts)
 {
        struct region *reg;
        int ret_val;
@@ -1606,7 +1607,7 @@ static int _pg_dir_add_region(pg_dir_t *pd, void *base, u32_t size,
        reg->reg_pgsize = PAGE_SIZE;
        reg->reg_type = type;
        reg->reg_attrs = attrs;
-       reg->reg_flags = flags;
+       reg->reg_opts = opts;
        reg->reg_refs = 1;
 
        pd->pd_regions[i] = reg;
@@ -1627,7 +1628,7 @@ void* pg_dir_get_heap(pg_dir_t *pd)
        int i;
 
        for(ret_val = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
-               if(pd->pd_regions[i]->reg_type == REGION_HEAP) {
+               if(pd->pd_regions[i]->reg_type == REGION_TYPE_HEAP) {
                        ret_val = pd->pd_regions[i]->reg_base;
                        break;
                }
@@ -1645,7 +1646,7 @@ void* pg_dir_sbrk(pg_dir_t *pd, i32_t increment)
        /* special case where the heap is not touched */
        if(!increment) {
                for(i = 0, ret_val = NULL; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
-                       if(pd->pd_regions[i]->reg_type == REGION_HEAP) {
+                       if(pd->pd_regions[i]->reg_type == REGION_TYPE_HEAP) {
                                ret_val = pd->pd_regions[i]->reg_base +
                                        pd->pd_regions[i]->reg_size;
                                break;
@@ -1662,7 +1663,7 @@ void* pg_dir_sbrk(pg_dir_t *pd, i32_t increment)
        }
 
        for(heap = NULL, i = 0; i < CONFIG_PAGING_DIR_MAXREGIONS; i++) {
-               if(pd->pd_regions[i]->reg_type == REGION_HEAP) {
+               if(pd->pd_regions[i]->reg_type == REGION_TYPE_HEAP) {
                        heap = pd->pd_regions[i];
                        break;
                }
@@ -1764,7 +1765,7 @@ int pg_dir_mmap(pg_dir_t *pgdir, void *addr, u32_t size,
 
        if(pgdir && dst) {
                u32_t attrs;
-               u32_t rflags;
+               region_opts_t opts;
                void* vaddr;
                void* paddr;
 
@@ -1788,7 +1789,7 @@ int pg_dir_mmap(pg_dir_t *pgdir, void *addr, u32_t size,
                        attrs |= PAGE_ATTR_USER;
                }
 
-               rflags = REGION_PRIV;
+               opts = REGION_OPT_PRIV;
 
                if(flags & MAP_PHYS) {
                        /*
@@ -1798,7 +1799,7 @@ int pg_dir_mmap(pg_dir_t *pgdir, void *addr, u32_t size,
 
                        vaddr = addr;
                        paddr = addr;
-                       rflags = REGION_SHARED;
+                       opts = REGION_OPT_SHARED;
                }
 
                /*
@@ -1810,8 +1811,8 @@ int pg_dir_mmap(pg_dir_t *pgdir, void *addr, u32_t size,
                if(!ret_val) {
                        /* also account for the new region */
                        ret_val = _pg_dir_add_region(pgdir, addr, size,
-                                                    REGION_MMAP, 0,
-                                                    rflags);
+                                                    REGION_TYPE_MMAP, 0,
+                                                    opts);
 
                        /* or unmap if it can't be accounted for */
                        if(ret_val < 0) {
index bbe18b968799035f0ba53d0a08b556c0d2c5b165..95c2c4f7730c15274cbbdb3c6488b917e8c5a7e3 100644 (file)
@@ -194,28 +194,30 @@ int _fork_region(pg_dir_t *parent_dir, region_t *reg, void *data)
        ret_val = 0;
        child_dir = (pg_dir_t*)data;
 
-       if(reg->reg_flags & (REGION_PRIV | REGION_KERNEL)) {
+       if(reg->reg_opts & (REGION_OPT_PRIV | REGION_OPT_KERNEL)) {
                /* skip private and kernel regions */
                goto skip;
        }
 
        switch(reg->reg_type) {
-       case REGION_TEXT:
-       case REGION_RODATA:
+       case REGION_TYPE_TEXT:
+       case REGION_TYPE_RODATA:
                /* .text and .rodata are read-only, so we might as well just map them */
                ret_val = pg_dir_map_region(child_dir, parent_dir, reg);
                break;
 
-       case REGION_BSS:
-       case REGION_DATA:
+       case REGION_TYPE_BSS:
+       case REGION_TYPE_DATA:
                /* copy the region and its contents */
                ret_val = pg_dir_clone_region(child_dir, parent_dir, reg);
                break;
 
-       case REGION_KSTACK:
-       case REGION_STACK:
-       case REGION_HEAP:
+       case REGION_TYPE_KSTACK:
+       case REGION_TYPE_STACK:
+       case REGION_TYPE_HEAP:
                /* don't clone KSTACK/STACK/HEAP regions */
+       case REGION_TYPE_MMAP:
+               /* FIXME: what about mmap'ed regions? */
        default:
                break;
        }
@@ -232,24 +234,26 @@ int _vfork_region(pg_dir_t *parent_dir, region_t *reg, void *data)
        ret_val = 0;
        child_dir = (pg_dir_t*)data;
 
-       if(reg->reg_flags & (REGION_PRIV | REGION_KERNEL)) {
+       if(reg->reg_opts & (REGION_OPT_PRIV | REGION_OPT_KERNEL)) {
                /* skip private and kernel regions */
                goto skip;
        }
 
        switch(reg->reg_type) {
-       case REGION_TEXT:
-       case REGION_BSS:
-       case REGION_DATA:
-       case REGION_RODATA:
+       case REGION_TYPE_TEXT:
+       case REGION_TYPE_BSS:
+       case REGION_TYPE_DATA:
+       case REGION_TYPE_RODATA:
                /* directly map the region into the new page directory */
                ret_val = pg_dir_map_region(child_dir, parent_dir, reg);
                break;
 
-       case REGION_KSTACK:
-       case REGION_STACK:
-       case REGION_HEAP:
+       case REGION_TYPE_KSTACK:
+       case REGION_TYPE_STACK:
+       case REGION_TYPE_HEAP:
                /* don't map these */
+       case REGION_TYPE_MMAP:
+               /* FIXME: what about mmap'ed regions? */
        default:
                break;
        }
index cfa3a20f60e1e0cdcf8cbb821db8d542faa96202..b0a97163aaf9e1fb1abaa4309163f9dd4e4e20e7 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * This file is part of the Corax operating system.
- * Copyright (C) 2016-2019 Matthias Kruk
+ * Copyright (C) 2016-2020 Matthias Kruk
  *
  * Corax is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -117,28 +117,32 @@ struct task {
        pid_t t_pid;
 } __attribute__((packed));
 
-#define REGION_TEXT   0
-#define REGION_BSS    1
-#define REGION_DATA   2
-#define REGION_RODATA 3
-#define REGION_KSTACK 4
-#define REGION_STACK  5
-#define REGION_HEAP   6
-#define REGION_MMAP   7
-
-#define REGION_SHARED (1 << 0)
-#define REGION_KERNEL (1 << 1)
-#define REGION_PRIV   (1 << 2)
+typedef enum {
+       REGION_TYPE_TEXT = 0,
+       REGION_TYPE_BSS,
+       REGION_TYPE_DATA,
+       REGION_TYPE_RODATA,
+       REGION_TYPE_KSTACK,
+       REGION_TYPE_STACK,
+       REGION_TYPE_HEAP,
+       REGION_TYPE_MMAP
+} region_type_t;
+
+typedef enum {
+       REGION_OPT_SHARED = (1 << 0),
+       REGION_OPT_KERNEL = (1 << 1),
+       REGION_OPT_PRIV   = (1 << 2)
+} region_opts_t;
 
 typedef struct region region_t;
 
 struct region {
        void *reg_base;
-       u32_t reg_type;
+       region_type_t reg_type;
        u32_t reg_size;
        u32_t reg_pgsize;
-       u32_t reg_attrs;
-       u32_t reg_flags;
+       u32_t reg_attrs; /* page attributes */
+       region_opts_t reg_opts;
        u32_t reg_refs;
 };