#define __CORE_SCHED_H
#include <config.h>
+#include <arch.h>
struct task;
} sched_task_state_t;
struct sched {
- int (*tick)(struct sched*);
+ void (*tick)(struct sched*);
int (*sleep)(struct sched*, struct task*);
int (*wakeup)(struct sched*, struct task*, int);
int (*nice)(struct sched*, struct task*, int);
};
-void sched_tick(void);
+#ifdef __SCHED_COMMON
+#define GLOBAL
+#else
+#define GLOBAL extern
+#endif
+
+GLOBAL struct sched *_sched[CONFIG_SMP_CPUS];
+
+#define sched_tick() (_sched[CPU_ID])->tick(_sched[CPU_ID])
+#define sched_sleep(t) (_sched[CPU_ID])->sleep(_sched[CPU_ID], (t))
+#define sched_wakeup(t,f) (_sched[CPU_ID])->wakeup(_sched[CPU_ID], (t), (f))
+#define sched_schedule(t) (_sched[CPU_ID])->schedule(_sched[CPU_ID], (t))
+#define sched_drop(t) (_sched[CPU_ID])->drop(_sched[CPU_ID], (t))
+#define sched_nice(t,n) (_sched[CPU_ID])->nice(_sched[CPU_ID], (t), (n))
+
+/* void sched_tick(void); */
void sched_wait(pid_t);
int sched_raise(pid_t);
int sched_enqueue(task_t*);
--- /dev/null
+#include <config.h>
+#include <corax/errno.h>
+#include <arch.h>
+#include <sched.h>
+#include <kq.h>
+
+struct sched_lazy {
+ struct sched parent;
+
+ spinlock_t lock;
+
+ /* structures for task accounting */
+ kq_t *readyq;
+ kq_t *waitq;
+};
+
+#define _LOCK(_l) spinlock_lock(&((_l)->lock))
+#define _UNLOCK(_l) spinlock_unlock(&((_l)->lock))
+
+static int _lazy_tick(struct sched_lazy *lazy)
+{
+ struct task *ctask;
+ int ret_val;
+
+ ctask = task_get_current();
+ ret_val = 0;
+
+ task_lock(ctask);
+
+ if(ctask->t_state != TASK_STATE_EXIT) {
+ if(ctask->t_rslice) {
+ ctask->t_rslice--;
+ } else {
+ /* task has exhausted its time slice - reschedule */
+ ctask->t_rslice = ctask->t_tslice;
+ ctask->t_state = TASK_STATE_READY;
+ ret_val = 1;
+ }
+ }
+
+ task_unlock(ctask);
+
+ if(ret_val) {
+ /* rescheduling is required */
+ _LOCK(lazy);
+
+ /* put current task back on the ready queue */
+ ret_val = kq_nq(&(lazy->readyq), ctask);
+
+ if(ret_val < 0) {
+ /*
+ * FIXME: could not put task back on ready queue - wat do?
+ *
+ * If we don't do anything here, the task would be silently
+ * dropped by the scheduler and never get scheduled again.
+ * Instead, it would be better if we could move it to a different
+ * processor, which (on NUMA systems) might have enough memory
+ * available to schedule the task.
+ */
+ }
+
+ /* get the first ready task */
+ ctask = (struct task*)kq_dq(&(lazy->readyq));
+
+ _UNLOCK(lazy);
+
+ if(ctask) {
+ task_switch(ctask);
+ }
+
+ /* ctask may not point to the correct task anymore */
+ ctask = task_get_current();
+
+ task_lock(ctask);
+
+ if(ctask->t_state == TASK_STATE_SIGNAL) {
+ ret_val = -EINTR;
+ } else {
+ ret_val = 0;
+ }
+
+ ctask->t_state = TASK_STATE_RUNNING;
+ task_unlock(ctask);
+ }
+
+ return(ret_val);
+}
+
+static int _lazy_sleep(struct sched_lazy *lazy, struct task *task)
+{
+ struct task *ctask;
+ int ret_val;
+
+ ret_val = -EALREADY;
+ ctask = task_get_current();
+
+ if(!task) {
+ task = ctask;
+ }
+
+ task_lock(task);
+
+ if(t->t_state != TASK_STATE_WAITING) {
+ t->t_state = TASK_STATE_WAITING;
+ ret_val = 0;
+ }
+
+ task_unlock(task);
+
+ /* the task is already on the wait queue if it was waiting above */
+ if(!ret_val) {
+ _LOCK(lazy);
+
+ ret_val = kq_rm(&(lazy->readyq), task);
+
+ if(!ret_val) {
+ kq_nq(&(lazy->waitq), task);
+ }
+
+ _UNLOCK(lazy);
+ }
+
+ /* switch to another task if we're putting the current one to sleep */
+
+ if(task == ctask) {
+ ret_val = sched_tick();
+ }
+
+ return(ret_val);
+}
+
+static int _lazy_wakeup(struct sched_lazy *lazy, struct task *task, int flags)
+{
+ int ret_val;
+
+ ret_val = -EALREADY;
+
+ task_lock(task);
+
+ /* don't resume tasks that aren't waiting to be resumed */
+ if(t->t_state == TASK_STATE_WAITING) {
+ t->t_state = flags ? TASK_STATE_SIGNAL : TASK_STATE_READY;
+ ret_val = 0;
+ } else {
+ ret_val = -EBUSY;
+ }
+
+ task_unlock(task);
+
+ /* if the task was waiting, move it to the ready queue */
+ if(!ret_val) {
+ _LOCK(lazy);
+
+ ret_val = kq_rm(&(lazy->waitq, task));
+
+ /* don't enqueue the task if it wasn't on the wait queue */
+ if(!ret_val) {
+ kq_nq(&(lazy->readyq), task);
+ }
+
+ _UNLOCK(lazy);
+ }
+
+ return(ret_val);
+}
+
+static int _lazy_schedule(struct sched_lazy *lazy, struct task *task)
+{
+ int ret_val;
+
+ ret_val = -EINVAL;
+
+ task_lock(task);
+
+ if(task->t_state == TASK_STATE_NEW ||
+ task->t_state == TASK_STATE_FORKED) {
+ ret_val = 0;
+ }
+
+ task_unlock(task);
+
+ /* only allow TASK_STATE_NEW and TASK_STATE_FORKED tasks to be enqueued */
+ if(!ret_val) {
+ _LOCK(lazy);
+ ret_val = kq_nq(&(lazy->readyq), task);
+ _UNLOCK(lazy);
+ }
+
+ return(ret_val);
+}
+
+static int _lazy_drop(struct sched_lazy *lazy, struct task *task)
+{
+ int ret_val;
+
+ ret_val = -EINVAL;
+
+ if(task) {
+ _LOCK(lazy);
+ ret_val = kq_rm(&(lazy->readyq), task);
+
+ if(ret_val < 0) {
+ ret_val = kq_rm(&(lazy->waitq), task);
+ }
+ _UNLOCK(lazy);
+ }
+
+ return(ret_val);
+}
+
+static int _lazy_nice(struct sched_lazy *lazy, struct task *task, int nice)
+{
+ return(-ENOSYS);
+}
+
+int sched_lazy_init(struct sched **sch)
+{
+ struct sched_lazy *sched;
+ int ret_val;
+
+ ret_val = -ENOMEM;
+ sched = kmalloc(sizeof(*sched));
+
+ if(sched) {
+ sched->parent.tick = (void(*)(struct sched*))_lazy_tick;
+ sched->parent.sleep = (int(*)(struct sched*, struct task*))_lazy_sleep;
+ sched->parent.wakeup = (int(*)(struct sched*, struct task*, int))_lazy_wakeup;
+ sched->parent.schedule = (int(*)(struct sched*, struct task*))_lazy_schedule;
+ sched->parent.drop = (int(*)(struct sched*, struct task*))_lazy_drop;
+ sched->parent.nice = (int(*)(struct sched*, struct task*, int))_lazy_nice;
+
+ *sch = &(sched->parent);
+ ret_val = 0;
+ }
+
+ return(ret_val);
+}
+
+int sched_lazy_free(struct sched *sched, struct sched *other)
+{
+
+}