From ff30091ef17737a40792e5f8c76a0bb40f09e463 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 01/13] Add data structures for O(1) scheduler This commit extends the core scheduler data structures to support the new O(1) scheduler design. Adds in tcb_t: - rq_node: embedded list node for ready-queue membership used during task state transitions. This avoids redundant malloc/free for per-enqueue/dequeue nodes by tying the node's lifetime to the task control block. Adds in kcb_t: - ready_bitmap: 8-bit bitmap tracking which priority levels have runnable tasks. - ready_queues[]: per-priority ready queues for O(1) task selection. - rr_cursors[]: round-robin cursor per priority level to support fair selection within the same priority. These additions are structural only and prepare the scheduler for O(1) ready-queue operations; they do not change behavior yet. --- include/sys/task.h | 10 ++++++++++ kernel/task.c | 3 +++ 2 files changed, 13 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4d..15e85291 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -86,6 +86,10 @@ typedef struct tcb { /* Stack Protection */ uint32_t canary; /* Random stack canary for overflow detection */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -108,6 +112,12 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attributions */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 1304a1ee..df1190b7 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -26,6 +26,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; From 826dc1aab8b660feba862982018381f2cb181a6c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 19 Nov 2025 22:50:48 +0800 Subject: [PATCH 02/13] Add list helpers to support the new scheduler Previously, list_pushback() and list_remove() were the only list APIs available for data insertion into and removal from the list by malloc a new and free target linkage node. After the new data structure, rq_node, is added as the linkage node for ready queue operation purpose, there is no need to malloc and free each time. This commit adds the insertion and removal list operations without malloc and free on the linkage node. - list_pushback_node(): append an existing node to the end of the list in O(n) time without allocating memory. - list_remove_node(): remove a node from the list without freeing it. Both helper functions are operated in O(n) by linearly searching method and will be applied in the upcoming task dequeuing/enqueuing from/into the ready queue operations. --- include/lib/list.h | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..ce791a2b 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From 26e68c8ff73fe62b0b91ef2b791e22e96ceb0e10 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 12:16:41 +0800 Subject: [PATCH 03/13] Add enqueue path to sched_enqueue_task() Previously, `sched_enqueue_task()` only marked task state as TASK_READY to represent the task has been enqueued due to the original scheduler selects the next task based on the global list and all tasks are kept in it. After new data structure, ready_queue[], is added for keeping runnable tasks, the enqueuing task API should push the embedded linkage list node, rq_node, into the corresponding ready_queue. This commit uses list_pushback_node() helper to enqueue the embeded list node of tcb into ready queue and sets up cursor and bitmap of the corresponding priority queue. --- kernel/task.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index df1190b7..491339ac 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -74,7 +74,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -357,17 +357,36 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + kcb->ready_bitmap |= (1U << (task->prio_level)); + return; } /* Remove task from ready queues - state-based approach for compatibility */ From 8a98aa7d77a9996a395968408411da712e9b0421 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 12:30:04 +0800 Subject: [PATCH 04/13] Add dequeue path in sched_dequeue_task() Previously, sched_dequeue_task() was a no-op stub, which was sufficient when the scheduler selected tasks from the global list. Since new data structure, ready_queue, is added for keeping all runnable tasks, a dequeue path is required to remove tasks from ready queue to ensure it always holds runnable tasks. This commit adds the dequeue path to sched_dequeue_task(), using list_remove_node() helper to remove the existing linkage node from the corresponding ready queue and update the RR cursor and priority bitmap accordingly. --- kernel/task.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 491339ac..463f01a4 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -389,16 +389,32 @@ static void sched_enqueue_task(tcb_t *task) return; } -/* Remove task from ready queues - state-based approach for compatibility */ +/* Remove task from ready queue */ void sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (rq->length == 0) { + *cursor = NULL; + kcb->ready_bitmap &= ~(1U << (task->prio_level)); + } + return; } /* Handle time slice expiration for current task */ From b608ac8ee00871f0da83c111ae763fb3b8dd5038 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 13:30:24 +0800 Subject: [PATCH 05/13] Add enqueue and dequeue paths to task operation APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, task operation APIs such as sched_wakeup_task() only updated the task state, which was sufficient when scheduling relied on the global task list. With the scheduler now selecting runnable tasks from ready_queue[] per priority level, state changes alone are insufficient. To support the new scheduler and to prevent selection of tasks that have already left the runnable set, explicit enqueue and dequeue paths are required when task state transitions cross the runnable boundary: In ready-queue set: {TASK_RUNNING, TASK_READY} Not in ready-queue set: {all other states} This commit updates task operation APIs to include queue insertion and removal logic according to their semantics. In general, queue operations are performed by invoking existing helper functions mo_enqueue_task() and mo_dequeue_task(). The modified APIs include: - sched_wakeup_task(): avoid enqueueing a task that is already running by treating TASK_RUNNING as part of the runnable set complement. - mo_task_cancel(): dequeue TASK_READY tasks from ready_queue[] before cancelling, ensuring removed tasks are not scheduled again. - mo_task_delay(): runnable boundary transition only ("TASK_RUNNING → TASK_BLOCKED"), no queue insertion for non-runnable tasks. - mo_task_suspend(): supports both TASK_RUNNING and TASK_READY ("TASK_RUNNING/TASK_READY → TASK_SUSPENDED"), dequeue before suspend when necessary. - mo_task_resume(): only for suspended tasks ("TASK_SUSPENDED → TASK_READY"), enqueue into ready_queue[] on resume. - _sched_block(): runnable boundary transition only ("TASK_RUNNING → TASK_BLOCKED"), dequeue without memory free. --- kernel/task.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 463f01a4..f174c5fa 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -439,20 +439,15 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection with O(n) Complexity @@ -864,6 +859,10 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ @@ -893,7 +892,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -920,6 +921,11 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; bool is_current = (kcb->task_current->data == task); @@ -948,9 +954,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -1072,6 +1077,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); From b66b1541302724eeb0503ed88b29660e03e7e09a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 29 Nov 2025 15:42:15 +0800 Subject: [PATCH 06/13] Add dequeue helper for mutex lock Currently, mo_mutex_lock() will call mutex_block_atomic() to mark the running task as TASK_BLOCKED so that it won't be selected by the old scheduler. To support the ready queue consistency that always keeps runnable tasks, the dequeuing path should be included when mutex_block_atomic() is called. This commit adds _sched_blocked_dequeue() helper and will be applied in mutex_block_atomic() in the following commit. --- include/sys/task.h | 4 ++++ kernel/task.c | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 15e85291..41606cee 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -298,6 +298,10 @@ uint64_t mo_uptime(void); */ void _sched_block(queue_t *wait_q); +/* Dequeue path for task with TASK_BLOCKED state. It must be called before task + * state set as TASK_BLOCKED. Currently, this API is used in mutex lock case.*/ +void _sched_block_dequeue(tcb_t *blocked_task); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/task.c b/kernel/task.c index f174c5fa..0dfdb1a1 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -450,6 +450,15 @@ void sched_wakeup_task(tcb_t *task) sched_enqueue_task(task); } +/* Dequeue blocked task when mutex lock is triggered */ +void _sched_block_dequeue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_dequeue_task(blocked_task); +} + /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. From c4d4de637abfa64dfa33ba12e628bcf189448bcc Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 29 Nov 2025 15:54:36 +0800 Subject: [PATCH 07/13] Use _sched_block_dequeue() helper in mutex_block_atomic() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, mutex_block_atomic() only marked the running task as TASK_BLOCKED, which was sufficient when scheduling selected tasks by scanning the global task list. Since the new scheduler is designed to select only runnable tasks from ready_queue[], mutex blocking now also requires removing the task’s rq_node from the corresponding ready queue, preventing the scheduler from selecting a blocked (non-runnable/dequeued) task again. --- kernel/mutex.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 52a16a72..190654eb 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -76,9 +76,12 @@ static void mutex_block_atomic(list_t *waiters) /* Add to waiters list */ if (unlikely(!list_pushback(waiters, self))) panic(ERR_SEM_OPERATION); - + /* Block and yield atomically */ self->state = TASK_BLOCKED; + + /* Explicit remove list node from the ready queue */ + _sched_block_dequeue(self); _yield(); /* This releases NOSCHED when we context switch */ } From 737ef546aa8554bca7c64d2bd0e35550867e6dca Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 29 Nov 2025 16:20:49 +0800 Subject: [PATCH 08/13] Add enqueue helper for mutex/semaphore operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, there is no enqueueing API that can be invoked from other files, especially in mutex and semaphore operations which include task state transition from TASK_BLOCKED to TASK_READY when a held resource is released. This change introduces the _sched_blocked_enqueue() helper, which will be used by mutex/semaphore unblocking paths to insert the task’s existing linkage node into the corresponding per-priority ready queue, keeping scheduler visibility and ready-queue consistency. --- include/sys/task.h | 4 ++++ kernel/task.c | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 41606cee..de9c0cbe 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -302,6 +302,10 @@ void _sched_block(queue_t *wait_q); * state set as TASK_BLOCKED. Currently, this API is used in mutex lock case.*/ void _sched_block_dequeue(tcb_t *blocked_task); +/* Enqueue path for the task with TASK_BLOCKED state. This API is the mainly + * enqueuing path for semaphore and mutex operations. */ +void _sched_block_enqueue(tcb_t *blocked_task); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/task.c b/kernel/task.c index 0dfdb1a1..4d321e79 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -459,6 +459,14 @@ void _sched_block_dequeue(tcb_t *blocked_task) sched_dequeue_task(blocked_task); } +void _sched_block_enqueue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_enqueue_task(blocked_task); +} + /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. From e6f3bb615a0b2125557b470e3a45947c35a9a022 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 29 Nov 2025 16:53:33 +0800 Subject: [PATCH 09/13] Use _sched_block_enqueue() helper in mutex/semaphore This commit replaces unblocking state transitions (TASK_BLOCKED->TASK_READY) in mutex and semaphore paths with the _sched_block_enqueue() helper to ensure scheduler visibility and preserve ready-queue invariants. --- kernel/mutex.c | 14 +++++++------- kernel/semaphore.c | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 190654eb..dd06727e 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -244,7 +244,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) if (self->state == TASK_BLOCKED) { /* We woke up due to timeout, not mutex unlock */ if (remove_self_from_waiters(m->waiters)) { - self->state = TASK_READY; + _sched_block_enqueue(self); result = ERR_TIMEOUT; } else { /* Race condition: we were both timed out and unlocked */ @@ -286,7 +286,7 @@ int32_t mo_mutex_unlock(mutex_t *m) /* Validate task state before waking */ if (likely(next_owner->state == TASK_BLOCKED)) { m->owner_tid = next_owner->id; - next_owner->state = TASK_READY; + _sched_block_enqueue(next_owner); /* Clear any pending timeout since we're granting ownership */ next_owner->delay = 0; } else { @@ -399,7 +399,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) /* Failed to unlock - remove from wait list and restore state */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); NOSCHED_LEAVE(); return unlock_result; } @@ -442,7 +442,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Failed to unlock - cleanup and restore */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); self->delay = 0; NOSCHED_LEAVE(); return unlock_result; @@ -458,7 +458,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) if (self->state == TASK_BLOCKED) { /* Timeout occurred - remove from wait list */ remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); self->delay = 0; wait_status = ERR_TIMEOUT; } else { @@ -487,7 +487,7 @@ int32_t mo_cond_signal(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're signaling */ waiter->delay = 0; } else { @@ -514,7 +514,7 @@ int32_t mo_cond_broadcast(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're broadcasting */ waiter->delay = 0; } else { diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372d..9fffa679 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + _sched_block_enqueue(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ From 6761f89fed36455e46428669600c5a4bbe42927f Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 26 Nov 2025 21:03:56 +0800 Subject: [PATCH 10/13] Refactor priority-change path in mo_task_priority() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, mo_task_priority() only updated the task’s time slice and priority level. With the new scheduler design, tasks are kept in per-priority ready queues, so mo_task_priority() must also handle migrating tasks between these queues. This commit adds dequeue/enqueue logic for tasks in TASK_RUNNING or TASK_READY state, as such tasks must reside in a ready queue and a priority change implies ready-queue migration. The priority fields are still updated as part of the migration path: sched_dequeue_task() relies on the current priority, while the enqueue operation needs the new priority. Therefore, the priority update is performed between the dequeue and enqueue steps. If the priority change happens while the task is running, it must yield immediately to preserve the scheduler’s strict task-ordering policy. --- kernel/task.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 4d321e79..eb272a7e 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -996,12 +996,30 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) { + sched_dequeue_task(task); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue task node into new priority ready queue*/ + sched_enqueue_task(task); + } + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } From 5a77b207730465d96a20e15e4f21f5043741256c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 26 Nov 2025 20:56:42 +0800 Subject: [PATCH 11/13] Refactor mo_task_spawn() for the new scheduler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit refactors mo_task_spawn() to align with the new O(1) scheduler design. The task control block (tcb_t) embeds its list node during task creation. The enqueue operation is moved inside a critical section to guarantee consistent enqueuing process during task creation. The “first task assignment” logic is removed because first task has been assigned to system idle task as previous commit mentioned. --- kernel/task.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index eb272a7e..a3c05966 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -820,8 +820,14 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ + /* Binding ready queue node */ + tcb->rq_node.data = tcb; + if (!kcb->task_current) - kcb->task_current = node; + kcb->task_current = &tcb->rq_node; + + /* Push node to ready queue */ + sched_enqueue_task(tcb); CRITICAL_LEAVE(); @@ -841,7 +847,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } From 0f92317cce9a6e5ae5719c6e07bae669b25e95f2 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:08:38 +0800 Subject: [PATCH 12/13] Refactor scheduler to RR cursor-based O(1) design Previously, the scheduler performed an O(N) scan of the global task list (kcb->tasks) to locate the next TASK_READY task. This resulted in non-deterministic selection latency and unstable round-robin rotation under heavy load or frequent task state transitions. This change introduces a strict O(1) scheduler based on per-priority ready queues and round-robin (RR) cursors. Each priority level maintains its own ready queue and cursor, enabling constant-time selection of the next runnable task while preserving fairness within the same priority. --- include/sys/task.h | 2 -- kernel/task.c | 81 ++++++++++++++++++---------------------------- 2 files changed, 31 insertions(+), 52 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index de9c0cbe..cc8d2570 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -124,8 +124,6 @@ typedef struct { extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ diff --git a/kernel/task.c b/kernel/task.c index a3c05966..d51e9591 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -471,16 +471,15 @@ void _sched_block_enqueue(tcb_t *blocked_task) * * Selects the next ready task using circular traversal of the master task list. * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -493,53 +492,35 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ - - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; - - tcb_t *task = node->data; + /* Bitmap search, from bit0 (highest priority level) to bit7 (lowest + * priority level) */ + uint32_t bitmap = kcb->ready_bitmap; + uint8_t top_prio_level = 0; + while (top_prio_level < 8) { + if (bitmap & 1U) + break; + bitmap >>= 1; + top_prio_level++; + } - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; + list_t *rq = kcb->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - return task->id; + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - } while (node != start_node && ++iterations < SCHED_IMAX); + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - /* No ready tasks found in preemptive mode - all tasks are blocked. - * This is normal for periodic RT tasks waiting for their next period. - * We CANNOT return a BLOCKED task as that would cause it to run. - * Instead, find ANY task (even blocked) as a placeholder, then wait for - * interrupt. - */ - if (kcb->preemptive) { - /* Select any task as placeholder (dispatcher won't actually switch to - * it if blocked) */ - list_node_t *any_node = list_next(kcb->tasks->head); - while (any_node && any_node != kcb->tasks->tail) { - if (any_node->data) { - kcb->task_current = any_node; - tcb_t *any_task = any_node->data; - return any_task->id; - } - any_node = list_next(any_node); - } - /* No tasks at all - this is a real error */ - panic(ERR_NO_TASKS); - } + if (kcb->task_current) + return new_task->id; /* In cooperative mode, having no ready tasks is an error */ panic(ERR_NO_TASKS); From 77bb7ae8972aa6e00f8b4e31e74d1a1880d3c716 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 29 Nov 2025 17:19:31 +0800 Subject: [PATCH 13/13] Add branch ci --- .github/workflows/ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index df16d0c6..08d63d2f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,9 +4,10 @@ on: push: branches: - main + - o1-sched-lauch pull_request: branches: - - main + - main # Cancel in-progress runs for the same PR/branch concurrency: