]> git.corax.cc Git - corax/commitdiff
Add "lazy" scheduler implementation
authorMatthias Kruk <m@m10k.eu>
Mon, 3 Feb 2020 22:34:13 +0000 (07:34 +0900)
committerMatthias Kruk <m@m10k.eu>
Mon, 3 Feb 2020 22:34:13 +0000 (07:34 +0900)
kernel/Makefile
kernel/include/sched.h
kernel/sched/Makefile [new file with mode: 0644]
kernel/sched/common.c [new file with mode: 0644]
kernel/sched/lazy.c [new file with mode: 0644]

index 3367319591c54b6759809d4d429b49f4f5d8ac1b..642a6ce62691246595108a45e7d0ad3474317af5 100644 (file)
@@ -1,7 +1,8 @@
-OBJECTS = arch/arch.o core/core.o io/io.o vga/vga.o kbd/kbd.o klibc/klibc.a
-DEPS = arch core io vga kbd klibc
+OBJECTS = arch/arch.o core/core.o vga/vga.o kbd/kbd.o klibc/klibc.a sched/sched.o
+DEPS = arch core vga kbd klibc sched
 LDFLAGS = --oformat=elf32-i386 -b elf32-i386 -m elf_i386
 CFLAGS = -m32 -Wall -nostdlib -nodefaultlibs -nostartfiles -ffreestanding
+LIBGCC = /usr/lib/gcc-cross/i686-linux-gnu/8/libgcc.a
 PHONY = $(DEPS) clean
 OUTPUT = corax
 
@@ -15,7 +16,7 @@ $(DEPS):
 
 # FIXME: Remove dependency on libgcc
 $(OUTPUT): $(DEPS)
-       ld -o $@ -T linker.ld $(LDFLAGS) $(OBJECTS) /usr/lib/gcc/i686-linux-gnu/8/libgcc.a
+       ld -o $@ -T linker.ld $(LDFLAGS) $(OBJECTS) $(LIBGCC)
 
 clean: $(DEPS)
        rm -f $(OUTPUT)
index 7e059a83c9fb1cad49560110472737885cd59882..2caf701133ae19c494380d348811b58812493e05 100644 (file)
@@ -2,6 +2,7 @@
 #define __CORE_SCHED_H
 
 #include <config.h>
+#include <arch.h>
 
 struct task;
 
@@ -12,7 +13,7 @@ typedef enum {
 } sched_task_state_t;
 
 struct sched {
-       int (*tick)(struct sched*);
+       void (*tick)(struct sched*);
 
        int (*sleep)(struct sched*, struct task*);
        int (*wakeup)(struct sched*, struct task*, int);
@@ -22,7 +23,22 @@ struct sched {
        int (*nice)(struct sched*, struct task*, int);
 };
 
-void sched_tick(void);
+#ifdef __SCHED_COMMON
+#define GLOBAL
+#else
+#define GLOBAL extern
+#endif
+
+GLOBAL struct sched *_sched[CONFIG_SMP_CPUS];
+
+#define sched_tick()      (_sched[CPU_ID])->tick(_sched[CPU_ID])
+#define sched_sleep(t)    (_sched[CPU_ID])->sleep(_sched[CPU_ID], (t))
+#define sched_wakeup(t,f) (_sched[CPU_ID])->wakeup(_sched[CPU_ID], (t), (f))
+#define sched_schedule(t) (_sched[CPU_ID])->schedule(_sched[CPU_ID], (t))
+#define sched_drop(t)     (_sched[CPU_ID])->drop(_sched[CPU_ID], (t))
+#define sched_nice(t,n)   (_sched[CPU_ID])->nice(_sched[CPU_ID], (t), (n))
+
+/* void sched_tick(void); */
 void sched_wait(pid_t);
 int  sched_raise(pid_t);
 int  sched_enqueue(task_t*);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
new file mode 100644 (file)
index 0000000..3b50359
--- /dev/null
@@ -0,0 +1,16 @@
+OBJECTS = common.o lazy.o
+OUTPUT = sched.o
+INCLUDES = -I../include -I../../include -I../..
+CFLAGS += $(INCLUDES)
+ASFLAGS = $(CFLAGS)
+PHONY = clean
+
+all: $(OUTPUT)
+
+$(OUTPUT): $(OBJECTS)
+       ld -r $(LDFLAGS) -o $@ $^
+
+clean:
+       rm -f $(OBJECTS) $(OUTPUT)
+
+.PHONY: $(PHONY)
diff --git a/kernel/sched/common.c b/kernel/sched/common.c
new file mode 100644 (file)
index 0000000..0db6396
--- /dev/null
@@ -0,0 +1,24 @@
+#include <config.h>
+#include <sched.h>
+
+extern int sched_lazy_init(struct sched**);
+
+int sched_init(void)
+{
+       int ret_val;
+       int i;
+
+       for(i = 0; i < CONFIG_SMP_CPUS; i++) {
+               ret_val = sched_lazy_init(&_sched[i]);
+
+               if(ret_val < 0) {
+                       break;
+               }
+       }
+
+       if(ret_val < 0) {
+               /* Failed to initialize scheduler for CPU */
+       }
+
+       return(ret_val);
+}
diff --git a/kernel/sched/lazy.c b/kernel/sched/lazy.c
new file mode 100644 (file)
index 0000000..06a672c
--- /dev/null
@@ -0,0 +1,242 @@
+#include <config.h>
+#include <corax/errno.h>
+#include <arch.h>
+#include <sched.h>
+#include <kq.h>
+
+struct sched_lazy {
+       struct sched parent;
+
+       spinlock_t lock;
+
+       /* structures for task accounting */
+       kq_t *readyq;
+       kq_t *waitq;
+};
+
+#define _LOCK(_l)   spinlock_lock(&((_l)->lock))
+#define _UNLOCK(_l) spinlock_unlock(&((_l)->lock))
+
+static int _lazy_tick(struct sched_lazy *lazy)
+{
+       struct task *ctask;
+       int ret_val;
+
+       ctask = task_get_current();
+       ret_val = 0;
+
+       task_lock(ctask);
+
+       if(ctask->t_state != TASK_STATE_EXIT) {
+               if(ctask->t_rslice) {
+                       ctask->t_rslice--;
+               } else {
+                       /* task has exhausted its time slice - reschedule */
+                       ctask->t_rslice = ctask->t_tslice;
+                       ctask->t_state = TASK_STATE_READY;
+                       ret_val = 1;
+               }
+       }
+
+       task_unlock(ctask);
+
+       if(ret_val) {
+               /* rescheduling is required */
+               _LOCK(lazy);
+
+               /* put current task back on the ready queue */
+               ret_val = kq_nq(&(lazy->readyq), ctask);
+
+               if(ret_val < 0) {
+                       /*
+                        * FIXME: could not put task back on ready queue - wat do?
+                        *
+                        * If we don't do anything here, the task would be silently
+                        * dropped by the scheduler and never get scheduled again.
+                        * Instead, it would be better if we could move it to a different
+                        * processor, which (on NUMA systems) might have enough memory
+                        * available to schedule the task.
+                        */
+               }
+
+               /* get the first ready task */
+               ctask = (struct task*)kq_dq(&(lazy->readyq));
+
+               _UNLOCK(lazy);
+
+               if(ctask) {
+                       task_switch(ctask);
+               }
+
+               /* ctask may not point to the correct task anymore */
+               ctask = task_get_current();
+
+               task_lock(ctask);
+
+               if(ctask->t_state == TASK_STATE_SIGNAL) {
+                       ret_val = -EINTR;
+               } else {
+                       ret_val = 0;
+               }
+
+               ctask->t_state = TASK_STATE_RUNNING;
+               task_unlock(ctask);
+       }
+
+       return(ret_val);
+}
+
+static int _lazy_sleep(struct sched_lazy *lazy, struct task *task)
+{
+       struct task *ctask;
+       int ret_val;
+
+       ret_val = -EALREADY;
+       ctask = task_get_current();
+
+       if(!task) {
+               task = ctask;
+       }
+
+       task_lock(task);
+
+       if(t->t_state != TASK_STATE_WAITING) {
+               t->t_state = TASK_STATE_WAITING;
+               ret_val = 0;
+       }
+
+       task_unlock(task);
+
+       /* the task is already on the wait queue if it was waiting above */
+       if(!ret_val) {
+               _LOCK(lazy);
+
+               ret_val = kq_rm(&(lazy->readyq), task);
+
+               if(!ret_val) {
+                       kq_nq(&(lazy->waitq), task);
+               }
+
+               _UNLOCK(lazy);
+       }
+
+       /* switch to another task if we're putting the current one to sleep */
+
+       if(task == ctask) {
+               ret_val = sched_tick();
+       }
+
+       return(ret_val);
+}
+
+static int _lazy_wakeup(struct sched_lazy *lazy, struct task *task, int flags)
+{
+       int ret_val;
+
+       ret_val = -EALREADY;
+
+       task_lock(task);
+
+       /* don't resume tasks that aren't waiting to be resumed */
+       if(t->t_state == TASK_STATE_WAITING) {
+               t->t_state = flags ? TASK_STATE_SIGNAL : TASK_STATE_READY;
+               ret_val = 0;
+       } else {
+               ret_val = -EBUSY;
+       }
+
+       task_unlock(task);
+
+       /* if the task was waiting, move it to the ready queue */
+       if(!ret_val) {
+               _LOCK(lazy);
+
+               ret_val = kq_rm(&(lazy->waitq, task));
+
+               /* don't enqueue the task if it wasn't on the wait queue */
+               if(!ret_val) {
+                       kq_nq(&(lazy->readyq), task);
+               }
+
+               _UNLOCK(lazy);
+       }
+
+       return(ret_val);
+}
+
+static int _lazy_schedule(struct sched_lazy *lazy, struct task *task)
+{
+       int ret_val;
+
+       ret_val = -EINVAL;
+
+       task_lock(task);
+
+       if(task->t_state == TASK_STATE_NEW ||
+          task->t_state == TASK_STATE_FORKED) {
+               ret_val = 0;
+       }
+
+       task_unlock(task);
+
+       /* only allow TASK_STATE_NEW and TASK_STATE_FORKED tasks to be enqueued */
+       if(!ret_val) {
+               _LOCK(lazy);
+               ret_val = kq_nq(&(lazy->readyq), task);
+               _UNLOCK(lazy);
+       }
+
+       return(ret_val);
+}
+
+static int _lazy_drop(struct sched_lazy *lazy, struct task *task)
+{
+       int ret_val;
+
+       ret_val = -EINVAL;
+
+       if(task) {
+               _LOCK(lazy);
+               ret_val = kq_rm(&(lazy->readyq), task);
+
+               if(ret_val < 0) {
+                       ret_val = kq_rm(&(lazy->waitq), task);
+               }
+               _UNLOCK(lazy);
+       }
+
+       return(ret_val);
+}
+
+static int _lazy_nice(struct sched_lazy *lazy, struct task *task, int nice)
+{
+       return(-ENOSYS);
+}
+
+int sched_lazy_init(struct sched **sch)
+{
+       struct sched_lazy *sched;
+       int ret_val;
+
+       ret_val = -ENOMEM;
+       sched = kmalloc(sizeof(*sched));
+
+       if(sched) {
+               sched->parent.tick = (void(*)(struct sched*))_lazy_tick;
+               sched->parent.sleep = (int(*)(struct sched*, struct task*))_lazy_sleep;
+               sched->parent.wakeup = (int(*)(struct sched*, struct task*, int))_lazy_wakeup;
+               sched->parent.schedule = (int(*)(struct sched*, struct task*))_lazy_schedule;
+               sched->parent.drop = (int(*)(struct sched*, struct task*))_lazy_drop;
+               sched->parent.nice = (int(*)(struct sched*, struct task*, int))_lazy_nice;
+
+               *sch = &(sched->parent);
+               ret_val = 0;
+       }
+
+       return(ret_val);
+}
+
+int sched_lazy_free(struct sched *sched, struct sched *other)
+{
+
+}