diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index df16d0c6..08d63d2f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,9 +4,10 @@ on: push: branches: - main + - o1-sched-lauch pull_request: branches: - - main + - main # Cancel in-progress runs for the same PR/branch concurrency: diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..ce791a2b 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4d..cc8d2570 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -86,6 +86,10 @@ typedef struct tcb { /* Stack Protection */ uint32_t canary; /* Random stack canary for overflow detection */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -108,14 +112,18 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attributions */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ @@ -288,6 +296,14 @@ uint64_t mo_uptime(void); */ void _sched_block(queue_t *wait_q); +/* Dequeue path for task with TASK_BLOCKED state. It must be called before task + * state set as TASK_BLOCKED. Currently, this API is used in mutex lock case.*/ +void _sched_block_dequeue(tcb_t *blocked_task); + +/* Enqueue path for the task with TASK_BLOCKED state. This API is the mainly + * enqueuing path for semaphore and mutex operations. */ +void _sched_block_enqueue(tcb_t *blocked_task); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/mutex.c b/kernel/mutex.c index 52a16a72..dd06727e 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -76,9 +76,12 @@ static void mutex_block_atomic(list_t *waiters) /* Add to waiters list */ if (unlikely(!list_pushback(waiters, self))) panic(ERR_SEM_OPERATION); - + /* Block and yield atomically */ self->state = TASK_BLOCKED; + + /* Explicit remove list node from the ready queue */ + _sched_block_dequeue(self); _yield(); /* This releases NOSCHED when we context switch */ } @@ -241,7 +244,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) if (self->state == TASK_BLOCKED) { /* We woke up due to timeout, not mutex unlock */ if (remove_self_from_waiters(m->waiters)) { - self->state = TASK_READY; + _sched_block_enqueue(self); result = ERR_TIMEOUT; } else { /* Race condition: we were both timed out and unlocked */ @@ -283,7 +286,7 @@ int32_t mo_mutex_unlock(mutex_t *m) /* Validate task state before waking */ if (likely(next_owner->state == TASK_BLOCKED)) { m->owner_tid = next_owner->id; - next_owner->state = TASK_READY; + _sched_block_enqueue(next_owner); /* Clear any pending timeout since we're granting ownership */ next_owner->delay = 0; } else { @@ -396,7 +399,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) /* Failed to unlock - remove from wait list and restore state */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); NOSCHED_LEAVE(); return unlock_result; } @@ -439,7 +442,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Failed to unlock - cleanup and restore */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); self->delay = 0; NOSCHED_LEAVE(); return unlock_result; @@ -455,7 +458,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) if (self->state == TASK_BLOCKED) { /* Timeout occurred - remove from wait list */ remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); self->delay = 0; wait_status = ERR_TIMEOUT; } else { @@ -484,7 +487,7 @@ int32_t mo_cond_signal(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're signaling */ waiter->delay = 0; } else { @@ -511,7 +514,7 @@ int32_t mo_cond_broadcast(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're broadcasting */ waiter->delay = 0; } else { diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372d..9fffa679 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + _sched_block_enqueue(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ diff --git a/kernel/task.c b/kernel/task.c index 1304a1ee..d51e9591 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -26,6 +26,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; @@ -71,7 +74,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -354,29 +357,64 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + kcb->ready_bitmap |= (1U << (task->prio_level)); + return; } -/* Remove task from ready queues - state-based approach for compatibility */ +/* Remove task from ready queue */ void sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (rq->length == 0) { + *cursor = NULL; + kcb->ready_bitmap &= ~(1U << (task->prio_level)); + } + return; } /* Handle time slice expiration for current task */ @@ -401,36 +439,47 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); +} + +/* Dequeue blocked task when mutex lock is triggered */ +void _sched_block_dequeue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_dequeue_task(blocked_task); +} + +void _sched_block_enqueue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_enqueue_task(blocked_task); } /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -443,53 +492,35 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ - - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; - - tcb_t *task = node->data; + /* Bitmap search, from bit0 (highest priority level) to bit7 (lowest + * priority level) */ + uint32_t bitmap = kcb->ready_bitmap; + uint8_t top_prio_level = 0; + while (top_prio_level < 8) { + if (bitmap & 1U) + break; + bitmap >>= 1; + top_prio_level++; + } - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; + list_t *rq = kcb->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - return task->id; + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - } while (node != start_node && ++iterations < SCHED_IMAX); + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - /* No ready tasks found in preemptive mode - all tasks are blocked. - * This is normal for periodic RT tasks waiting for their next period. - * We CANNOT return a BLOCKED task as that would cause it to run. - * Instead, find ANY task (even blocked) as a placeholder, then wait for - * interrupt. - */ - if (kcb->preemptive) { - /* Select any task as placeholder (dispatcher won't actually switch to - * it if blocked) */ - list_node_t *any_node = list_next(kcb->tasks->head); - while (any_node && any_node != kcb->tasks->tail) { - if (any_node->data) { - kcb->task_current = any_node; - tcb_t *any_task = any_node->data; - return any_task->id; - } - any_node = list_next(any_node); - } - /* No tasks at all - this is a real error */ - panic(ERR_NO_TASKS); - } + if (kcb->task_current) + return new_task->id; /* In cooperative mode, having no ready tasks is an error */ panic(ERR_NO_TASKS); @@ -770,8 +801,14 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ + /* Binding ready queue node */ + tcb->rq_node.data = tcb; + if (!kcb->task_current) - kcb->task_current = node; + kcb->task_current = &tcb->rq_node; + + /* Push node to ready queue */ + sched_enqueue_task(tcb); CRITICAL_LEAVE(); @@ -791,7 +828,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } @@ -826,6 +862,10 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ @@ -855,7 +895,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -882,6 +924,11 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; bool is_current = (kcb->task_current->data == task); @@ -910,9 +957,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -936,12 +982,30 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) { + sched_dequeue_task(task); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue task node into new priority ready queue*/ + sched_enqueue_task(task); + } + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } @@ -1034,6 +1098,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION);