From 53eb3093d0ccd0d391a5d9d9b872793b8c5c9743 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 01/12] Add data structures for O(1) scheduler Extends the core scheduler data structures to support the new O(1) scheduler design. Adds in tcb_t: - rq_node: embedded list node for ready-queue membership used during task state transitions. This avoids redundant malloc/free for per-enqueue/dequeue nodes by tying the node's lifetime to the task control block. Adds in kcb_t: - ready_bitmap: 8-bit bitmap tracking which priority levels have runnable tasks. - ready_queues[]: per-priority ready queues for O(1) task selection. - rr_cursors[]: round-robin cursor per priority level to support fair selection within the same priority. These additions are structural only and prepare the scheduler for O(1) ready-queue operations. --- include/sys/task.h | 10 ++++++++++ kernel/task.c | 3 +++ 2 files changed, 13 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4d..15e85291 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -86,6 +86,10 @@ typedef struct tcb { /* Stack Protection */ uint32_t canary; /* Random stack canary for overflow detection */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -108,6 +112,12 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attributions */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 1304a1ee..df1190b7 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -26,6 +26,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; From eedb4c41b927b627312000e202870865e90aef91 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 19 Nov 2025 22:50:48 +0800 Subject: [PATCH 02/12] Add list helpers to support task state transition Previously, list_pushback() and list_remove() were the only list APIs available for data insertion into and removal from the list by malloc a new and free target linkage node. After the new data structure, rq_node, is added as the linkage node for ready queue operation purpose, there is no need to malloc and free each time. Add the insertion and removal list helpers without malloc and free on the linkage node. Both APIs will be applied in the dequeue and enqueue paths. --- include/lib/list.h | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..ce791a2b 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From 3ac26b9d1c3942f5d6d9244c45b10df0a7c0e71e Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 12:16:41 +0800 Subject: [PATCH 03/12] Add enqueue path to sched_enqueue_task() Previously, `sched_enqueue_task()` only marked task state as TASK_READY to represent the task has been enqueued due to the original scheduler selects the next task based on the global list and all tasks are kept in it. After new data structure, ready_queue[], is added for keeping runnable tasks, the enqueuing task API should push the embedded linkage list node, rq_node, into the corresponding ready_queue. This change aligns with the new task selection based on the ready queue. --- kernel/task.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index df1190b7..491339ac 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -74,7 +74,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -357,17 +357,36 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + kcb->ready_bitmap |= (1U << (task->prio_level)); + return; } /* Remove task from ready queues - state-based approach for compatibility */ From 95d9f81a314573be1a32770153e5e74fd954dc71 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 12:30:04 +0800 Subject: [PATCH 04/12] Add dequeue path in sched_dequeue_task() Previously, sched_dequeue_task() was a no-op stub, which was sufficient when the scheduler selected tasks from the global list. Since new data structure, ready_queue, is added for keeping all runnable tasks, a dequeue path is required to remove tasks from ready queue to ensure it always holds runnable tasks. This change aligns with the non-runnable task should not in the ready queue. --- kernel/task.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 491339ac..463f01a4 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -389,16 +389,32 @@ static void sched_enqueue_task(tcb_t *task) return; } -/* Remove task from ready queues - state-based approach for compatibility */ +/* Remove task from ready queue */ void sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (rq->length == 0) { + *cursor = NULL; + kcb->ready_bitmap &= ~(1U << (task->prio_level)); + } + return; } /* Handle time slice expiration for current task */ From 202b1155a7fb7519839896af38baef24bb7057e1 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 13:30:24 +0800 Subject: [PATCH 05/12] Add enqueue and dequeue paths to task operation APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, task operation APIs such as sched_wakeup_task() only updated the task state, which was sufficient when scheduling relied on the global task list. With the scheduler now selecting runnable tasks from ready_queue[] per priority level, state changes alone are insufficient. To support the new scheduler and to prevent selection of tasks that have already left the runnable set, explicit enqueue and dequeue paths are required when task state transitions cross the runnable boundary: In ready-queue set: {TASK_RUNNING, TASK_READY} Not in ready-queue set: {all other states} This change updates task operation APIs to include queue insertion and removal logic according to their semantics. In general, queue operations are performed by invoking existing helper functions sched_enqueue_task() and sched_dequeue_task(). The modified APIs include: - sched_wakeup_task(): avoid enqueueing a task that is already running by treating TASK_RUNNING as part of the runnable set complement. - mo_task_cancel(): dequeue TASK_READY tasks from ready_queue[] before cancelling, ensuring removed tasks are not scheduled again. - mo_task_delay(): runnable boundary transition only ("TASK_RUNNING → TASK_BLOCKED"), no queue insertion for non-runnable tasks. - mo_task_suspend(): supports both TASK_RUNNING and TASK_READY ("TASK_RUNNING/TASK_READY → TASK_SUSPENDED"), dequeue before suspend when necessary. - mo_task_resume(): only for suspended tasks ("TASK_SUSPENDED → TASK_READY"), enqueue into ready_queue[] on resume. - _sched_block(): runnable boundary transition only ("TASK_RUNNING → TASK_BLOCKED"), dequeue without memory free. This change keeps task state transition consistent to the ready queue semantic. --- kernel/task.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 463f01a4..f174c5fa 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -439,20 +439,15 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection with O(n) Complexity @@ -864,6 +859,10 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ @@ -893,7 +892,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -920,6 +921,11 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; bool is_current = (kcb->task_current->data == task); @@ -948,9 +954,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -1072,6 +1077,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); From f556a2c19eb8f096c968d30a1579773fb90aee1e Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 29 Nov 2025 15:42:15 +0800 Subject: [PATCH 06/12] Add ready queue helpers for mutex/semaphore Previously, task operations lacked public enqueue/dequeue helpers usable from outside the task core, which made it impossible to keep ready queue synchronized when tasks crossed the runnable boundary. This change introduces two helpers to be used by mutex and semaphore resource-wait and wakeup paths, ensuring their state transitions update ready queue explicitly and remain aligned with ready-queue semantics. Both helpers are prefixed with _sched_block to emphasize their role in TASK_BLOCKED-related transitions. This keeps mutex and semaphore APIs consistent with ready-queue semantics. --- include/sys/task.h | 8 ++++++++ kernel/task.c | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 15e85291..de9c0cbe 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -298,6 +298,14 @@ uint64_t mo_uptime(void); */ void _sched_block(queue_t *wait_q); +/* Dequeue path for task with TASK_BLOCKED state. It must be called before task + * state set as TASK_BLOCKED. Currently, this API is used in mutex lock case.*/ +void _sched_block_dequeue(tcb_t *blocked_task); + +/* Enqueue path for the task with TASK_BLOCKED state. This API is the mainly + * enqueuing path for semaphore and mutex operations. */ +void _sched_block_enqueue(tcb_t *blocked_task); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/task.c b/kernel/task.c index f174c5fa..4d321e79 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -450,6 +450,23 @@ void sched_wakeup_task(tcb_t *task) sched_enqueue_task(task); } +/* Dequeue blocked task when mutex lock is triggered */ +void _sched_block_dequeue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_dequeue_task(blocked_task); +} + +void _sched_block_enqueue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_enqueue_task(blocked_task); +} + /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. From 9942fb47bfa8f493a52fcf9c3b5c291335fc0ba1 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 3 Dec 2025 17:26:52 +0800 Subject: [PATCH 07/12] Use ready queue helpers in mutex and semaphore path Invoke _sched_block_enqueue() and _sched_block_dequeue() helpers for all transitions into or out of TASK_BLOCKED state. This change keeps the scheduler ready queue and mutex/semaphore semantics aligned and consistent. --- kernel/mutex.c | 16 +++++++++++----- kernel/semaphore.c | 2 +- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 5ff9c8aa..859b599c 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -78,6 +78,9 @@ static void mutex_block_atomic(list_t *waiters) /* Block and yield atomically */ self->state = TASK_BLOCKED; + + /* Explicit remove list node from the ready queue */ + _sched_block_dequeue(self); _yield(); /* This releases NOSCHED when we context switch */ } @@ -227,6 +230,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) /* Set up timeout using task delay mechanism */ self->delay = ticks; self->state = TASK_BLOCKED; + _sched_block_dequeue(self); NOSCHED_LEAVE(); @@ -282,7 +286,7 @@ int32_t mo_mutex_unlock(mutex_t *m) /* Validate task state before waking */ if (likely(next_owner->state == TASK_BLOCKED)) { m->owner_tid = next_owner->id; - next_owner->state = TASK_READY; + _sched_block_enqueue(next_owner); /* Clear any pending timeout since we're granting ownership */ next_owner->delay = 0; } else { @@ -387,6 +391,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) panic(ERR_SEM_OPERATION); } self->state = TASK_BLOCKED; + _sched_block_dequeue(self); NOSCHED_LEAVE(); /* Release mutex */ @@ -395,7 +400,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) /* Failed to unlock - remove from wait list and restore state */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); NOSCHED_LEAVE(); return unlock_result; } @@ -430,6 +435,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) } self->delay = ticks; self->state = TASK_BLOCKED; + _sched_block_dequeue(self); NOSCHED_LEAVE(); /* Release mutex */ @@ -438,7 +444,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Failed to unlock - cleanup and restore */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); self->delay = 0; NOSCHED_LEAVE(); return unlock_result; @@ -483,7 +489,7 @@ int32_t mo_cond_signal(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're signaling */ waiter->delay = 0; } else { @@ -510,7 +516,7 @@ int32_t mo_cond_broadcast(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're broadcasting */ waiter->delay = 0; } else { diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372d..9fffa679 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + _sched_block_enqueue(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ From e92dda482bf32d9ed7982cdcb6e514ffb1748935 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 3 Dec 2025 17:16:03 +0800 Subject: [PATCH 08/12] Refine mutex timed lock wakeup condition Once a task wakes up from a timed mutex lock, its state is already in TASK_RUNNING instead of TASK_BLOCKED. To determine whether the wakeup was triggered by the mutex or by a timeout, check whether the woken task is still present in the mutex waiter list. This change removes the incorrect TASK_BLOCKED-based condition check and replaces it with a waiter list check for timed mutex lock. --- kernel/mutex.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 859b599c..e8747a27 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -237,20 +237,14 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) /* Yield and let the scheduler handle timeout via delay mechanism */ mo_task_yield(); - /* Check result after waking up */ + /* Check result after waking up; the task is in TASK_RUNNING and in the + * ready queue */ int32_t result; NOSCHED_ENTER(); - if (self->state == TASK_BLOCKED) { - /* We woke up due to timeout, not mutex unlock */ - if (remove_self_from_waiters(m->waiters)) { - self->state = TASK_READY; - result = ERR_TIMEOUT; - } else { - /* Race condition: we were both timed out and unlocked */ - /* Check if we now own the mutex */ - result = (m->owner_tid == self_tid) ? ERR_OK : ERR_TIMEOUT; - } + /* If task still in the waiter list, it is woken up due to time out. */ + if (remove_self_from_waiters(m->waiters)) { + result = ERR_TIMEOUT; } else { /* We were woken by mutex unlock - check ownership */ result = (m->owner_tid == self_tid) ? ERR_OK : ERR_FAIL; @@ -453,14 +447,13 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Yield and wait for signal or timeout */ mo_task_yield(); - /* Determine why we woke up */ + /* Determine why we woke up; the task is in the TASK_RUNNING state and in + * the ready queue. */ int32_t wait_status; NOSCHED_ENTER(); - if (self->state == TASK_BLOCKED) { - /* Timeout occurred - remove from wait list */ - remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + /* Timeout occurred - remove from wait list */ + if (remove_self_from_waiters(c->waiters)) { self->delay = 0; wait_status = ERR_TIMEOUT; } else { From 953e3c7f276dfd2d0342d68c6801183ee5524b72 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 26 Nov 2025 21:03:56 +0800 Subject: [PATCH 09/12] Refactor priority-change path in mo_task_priority() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, mo_task_priority() only updated the task’s time slice and priority level. With the new scheduler design, tasks are kept in per-priority ready queues, so mo_task_priority() must also handle migrating tasks between these queues. This change supports task migration from original ready queue to the new priority ready queue. --- kernel/task.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 4d321e79..eb272a7e 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -996,12 +996,30 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) { + sched_dequeue_task(task); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue task node into new priority ready queue*/ + sched_enqueue_task(task); + } + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } From 1d91b3921592d0d8b15d35252c22f4fda50340f0 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 26 Nov 2025 20:56:42 +0800 Subject: [PATCH 10/12] Refactor mo_task_spawn() for the new scheduler Move sched_enqueue_task() into a critical section to protect ready_queue[] integrity, as the API modifies shared scheduler resources. Initialize the embedded rq_node when a task spawns and set its next pointer to NULL to ensure deterministic linkage for ready-queue insertion. Bind the initial task slot using rq_node instead of the global task list, matching the new ready-queue selection model. This aligns task-spawn behavior with rq_node-based scheduling semantics. --- kernel/task.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index eb272a7e..0ef858be 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -820,8 +820,15 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ + /* Binding ready queue node */ + tcb->rq_node.data = tcb; + tcb->rq_node.next = NULL; + if (!kcb->task_current) - kcb->task_current = node; + kcb->task_current = &tcb->rq_node; + + /* Push node to ready queue */ + sched_enqueue_task(tcb); CRITICAL_LEAVE(); @@ -841,7 +848,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } From 28955d5e3cfed000cd34a4ac1f9843772f66be43 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:08:38 +0800 Subject: [PATCH 11/12] Refactor scheduler to RR cursor-based O(1) design Previously, the scheduler performed an O(N) scan of the global task list (kcb->tasks) to locate the next TASK_READY task. This resulted in non-deterministic selection latency and unstable round-robin rotation under heavy load or frequent task state transitions. This change introduces a strict O(1) scheduler based on per-priority ready queues and round-robin (RR) cursors. Each priority level maintains its own ready queue and cursor, enabling constant-time selection of the next runnable task while preserving fairness within the same priority. --- include/sys/task.h | 2 -- kernel/task.c | 81 +++++++++++++++++++--------------------------- 2 files changed, 33 insertions(+), 50 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index de9c0cbe..cc8d2570 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -124,8 +124,6 @@ typedef struct { extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ diff --git a/kernel/task.c b/kernel/task.c index 0ef858be..2b4109fb 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -471,16 +471,15 @@ void _sched_block_enqueue(tcb_t *blocked_task) * * Selects the next ready task using circular traversal of the master task list. * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -493,53 +492,39 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ + /* Bitmap search, from bit0 (highest priority level) to bit7 (lowest + * priority level) */ + uint8_t bitmap = kcb->ready_bitmap; + if (unlikely(bitmap == 0)) + panic(ERR_NO_TASKS); - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; + uint8_t top_prio_level = 0; + while (top_prio_level < TASK_PRIORITY_LEVELS) { + if (bitmap & 1U) + break; - tcb_t *task = node->data; + bitmap >>= 1; + top_prio_level++; + } - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; + list_t *rq = kcb->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - return task->id; + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - } while (node != start_node && ++iterations < SCHED_IMAX); + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - /* No ready tasks found in preemptive mode - all tasks are blocked. - * This is normal for periodic RT tasks waiting for their next period. - * We CANNOT return a BLOCKED task as that would cause it to run. - * Instead, find ANY task (even blocked) as a placeholder, then wait for - * interrupt. - */ - if (kcb->preemptive) { - /* Select any task as placeholder (dispatcher won't actually switch to - * it if blocked) */ - list_node_t *any_node = list_next(kcb->tasks->head); - while (any_node && any_node != kcb->tasks->tail) { - if (any_node->data) { - kcb->task_current = any_node; - tcb_t *any_task = any_node->data; - return any_task->id; - } - any_node = list_next(any_node); - } - /* No tasks at all - this is a real error */ - panic(ERR_NO_TASKS); - } + if (kcb->task_current) + return new_task->id; /* In cooperative mode, having no ready tasks is an error */ panic(ERR_NO_TASKS); From 99e33afad1573dd1b0d4bf28b5036e0993981151 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 30 Nov 2025 09:39:09 +0800 Subject: [PATCH 12/12] Add unit test for the new scheduler This unit test cover task.c, mutex.c, and semaphore.c APIs that related to task state transition. --- Makefile | 2 +- app/test_sched.c | 517 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 518 insertions(+), 1 deletion(-) create mode 100644 app/test_sched.c diff --git a/Makefile b/Makefile index 23ef4d06..683e76d3 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ deps += $(LIB_OBJS:%.o=%.o.d) APPS := coop echo hello mqueues semaphore mutex cond \ pipes pipes_small pipes_struct prodcons progress \ rtsched suspend test64 timer timer_kill \ - cpubench test_libc + cpubench test_libc test_sched # Output files for __link target IMAGE_BASE := $(BUILD_DIR)/image diff --git a/app/test_sched.c b/app/test_sched.c new file mode 100644 index 00000000..7903738e --- /dev/null +++ b/app/test_sched.c @@ -0,0 +1,517 @@ +/* + * Test suite for the RR-cursor based scheduler implementation. + * + * This suite verifies the consistency of the O(1) RR-cursor based scheduler + * data structures, including bitmap bit positions, ready queues, and RR-cursor + * It exercises all task states (TASK_READY, TASK_BLOCKED, + * TASK_SUSPENDED, and TASK_RUNNING) and also verifies correct handling + * of task cancellation. + * + * The semaphore and mutex operations are also included in this unit tests file, + * verifying the alignment of the new scheduler design. + */ + +#include + +#include "private/error.h" + +#define TEST_ASSERT(condition, description) \ + do { \ + if (condition) { \ + printf("PASS: %s\n", description); \ + tests_passed++; \ + } else { \ + printf("FAIL: %s\n", description); \ + tests_failed++; \ + } \ + } while (0) + +/* Test results tracking */ +static int tests_passed = 0; +static int tests_failed = 0; + +/* Tasks and resources */ +int test_controller; +sem_t *sem; +mutex_t mutex; +cond_t cond; + +void task_normal(void) +{ + for (;;) { + mo_task_wfi(); + } +} + +/* Suspend self task */ +void task_suspend(void) +{ + mo_task_suspend(mo_task_id()); + while (1) + mo_task_wfi(); +} + +/* Delay and block self task */ +void task_delay(void) +{ + mo_task_delay(1); + mo_task_resume(test_controller); + mo_task_yield(); + while (1) + mo_task_wfi(); +} + +/* Mutex lock task; try to obtain lock */ +void task_mutex(void) +{ + mo_mutex_lock(&mutex); + mo_mutex_unlock(&mutex); + while (1) { + mo_task_wfi(); + } +} + +/* Mutex timed lock task; try to obtain lock before timeout */ +void task_mutex_timedlock(void) +{ + uint32_t ticks = 10; + TEST_ASSERT(mo_mutex_timedlock(&mutex, ticks) == ERR_TIMEOUT, + " Mutex timeout unlock successful "); + mo_task_resume(test_controller); + while (1) { + mo_task_wfi(); + } +} + +/* Mutex condition task; try to obtain lock before timeout */ +void task_mutex_cond(void) +{ + /* Acquire mutex */ + mo_mutex_lock(&mutex); + + /* Condition wait; enter condition waiter list and release mutex */ + mo_cond_wait(&cond, &mutex); + + /* Condition release, obtain lock again */ + mo_mutex_unlock(&mutex); + + while (1) { + mo_task_wfi(); + } +} + +/* Mutex condition timed lock task; try to obtain lock before timeout */ +void task_mutex_cond_timewait(void) +{ + /* Acquire mutex */ + mo_mutex_lock(&mutex); + + /* Condition wait; enter condition waiter list and release mutex */ + uint32_t ticks = 10; + TEST_ASSERT(mo_cond_timedwait(&cond, &mutex, ticks) == ERR_TIMEOUT, + " Mutex condition timeout unlock successful "); + + /* Condition release, obtain lock again */ + mo_mutex_unlock(&mutex); + mo_task_resume(test_controller); + + while (1) { + mo_task_wfi(); + } +} + +/* Semaphore task; try to obtain lock */ +void task_sem(void) +{ + mo_sem_wait(sem); + while (1) + mo_task_wfi(); +} + +/* Idle taskk */ +void task_idle(void) +{ + while (1) { + mo_task_wfi(); + } +} + +/* Helpers for verification */ + +/* Bitmap check */ +static bool bit_in_bitmap(int prio) +{ + return ((kcb->ready_bitmap & (1U << prio))) ? true : false; +} + +/* Task count check, list length approach */ +static int32_t task_cnt_in_sched(int prio) +{ + return kcb->ready_queues[prio]->length; +} + +/* Compare all list node id in the ready queue */ +static bool task_in_rq(int task_id, int prio) +{ + list_node_t *node = kcb->ready_queues[prio]->head->next; + while (node != kcb->ready_queues[prio]->tail) { + if (((tcb_t *) (node->data))->id == task_id) + return true; + node = node->next; + } + return false; +} + +/* Test priority bitmap consistency across task lifecycle transitions: + * basic task creation, priority migration, and cancellation. + */ +void test_bitmap(void) +{ + printf("\n=== Testing Priority Bitmap Consistency ===\n"); + + /* task count = 1 after spawn → bitmap bit should be set */ + int task_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + TEST_ASSERT(bit_in_bitmap(4) == true && task_cnt_in_sched(4) == 1, + "Bitmap sets bit when a same-priority task is spawned"); + + /* migrate task to a different priority queue → bitmap updates bits */ + mo_task_priority(task_id, TASK_PRIO_HIGH); + TEST_ASSERT(bit_in_bitmap(2) == true && bit_in_bitmap(4) == false && + task_cnt_in_sched(2) == 1 && task_cnt_in_sched(4) == 0, + "Bitmap updates bits correctly after priority migration"); + + /* cancel task → ready queue becomes empty, bitmap bit should clear */ + mo_task_cancel(task_id); + TEST_ASSERT(bit_in_bitmap(2) == false && task_cnt_in_sched(2) == 0, + "Bitmap clears bit when the migrated task is cancelled"); +} + +/* Test RR cursor consistency across task lifecycle transitions and + * task-count changes within a single priority queue. + * + * Cursor invariants for a ready queue: + * - task count == 0: cursor points to NULL + * - task count == 1: cursor points to the only task node + * - task count > 1: cursor points to a task node that differs from + * the running task + * + * Scenarios: + * - Running task creates and cancels a same-priority task + * - Running task creates and cancels tasks in a different priority + * queue + */ +void test_cursor(void) +{ + printf("\n=== Testing Cursor Consistency ===\n"); + + /* --- Test1: Running task creates a same-priority task and cancels it --- + */ + + /* task count = 1, cursor should point to the only task node + * (controller, TASK_RUNNING) + */ + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[0]->data))->id == test_controller && + task_cnt_in_sched(0) == 1, + " Task count 1: Cursor points to the only task node"); + + /* task count from 1 -> 2, cursor points to a task node different + * from the running task + */ + int task_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + mo_task_priority(task_id, TASK_PRIO_CRIT); + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[0]->data))->id == task_id && + task_cnt_in_sched(0) == 2, + " Task count 1->2: Cursor points to the new task node which " + "originally points to the running task "); + + /* cancel the cursor(new) task, task count from 2 -> 1; cursor should move + * back to the next task(controller) + */ + mo_task_cancel(task_id); + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[0]->data))->id == test_controller && + task_cnt_in_sched(0) == 1, + " Task count 2->1: Cursor points to next task (controller) " + "which points to the removed node "); + + + /* --- Test2: Running task creates different-priority task and cancels it + * --- */ + + /* task count = 0 */ + TEST_ASSERT(kcb->rr_cursors[4] == NULL && task_cnt_in_sched(4) == 0, + "Task count 0: Cursor is NULL when the ready queue is empty"); + + /* task count from 0 -> 1 */ + int task1_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + TEST_ASSERT( + ((tcb_t *) (kcb->rr_cursors[4]->data))->id == task1_id && + task_cnt_in_sched(4) == 1, + "Task count 0->1: Cursor initialized and points to the new task"); + + /* task count from 1 -> 2, cursor does not need to advance */ + int task2_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[4]->data))->id == task1_id && + task_cnt_in_sched(4) == 2, + "Task count 1->2: Cursor is maintained when cursor not same as " + "the running task "); + + /* task count from 2 -> 1, cancel the cursor task */ + mo_task_cancel(task1_id); + TEST_ASSERT( + ((tcb_t *) (kcb->rr_cursors[4]->data))->id == task2_id && + task_cnt_in_sched(4) == 1, + "Task count 2->1: Cursor is advanced when cancelled cursor task "); + + /* task count from 1 -> 0 */ + mo_task_cancel(task2_id); + TEST_ASSERT(kcb->rr_cursors[4] == NULL && task_cnt_in_sched(4) == 0, + "Task count 1->0: Cursor is NULL when the ready queue becomes " + "empty again"); +} + +/* Task state transition APIs test, including APIs in task.c, semaphore.c and + * mutex.c */ + +/* Test ready queue consistency with state transition APIs - normal case */ +void test_normal_state_transition(void) +{ + printf("\n=== Testing APIs normal task state transition ===\n"); + + /* --- Test1: State transition from TASK_READY ---*/ + + /* TASK_STOPPED to TASK_READY */ + int suspend_task = mo_task_spawn(task_suspend, DEFAULT_STACK_SIZE); + TEST_ASSERT(task_in_rq(suspend_task, 4) && task_cnt_in_sched(4) == 1, + " Enqueue successfully: TASK_STOPPED -> TASK_READY"); + + /* TASK_READY to TASK_SUSPEND */ + mo_task_suspend(suspend_task); + TEST_ASSERT(!task_in_rq(suspend_task, 4) && task_cnt_in_sched(4) == 0, + " Dequeue successfully: TASK_READY -> TASK_SUSPEND"); + + /* TASK_SUSPEND to TASK_READY */ + mo_task_resume(suspend_task); + TEST_ASSERT(task_in_rq(suspend_task, 4) && task_cnt_in_sched(4) == 1, + " Enqueue successfully: TASK_SUSPEND -> TASK_READY"); + + /* --- Test2: State transition from TASK_RUNNING ---*/ + + /* When suspend task is executing (TASK_RUNNING), it will suspend itself + * (TASK_SUSPENDED) */ + mo_task_priority(suspend_task, TASK_PRIO_CRIT); + mo_task_yield(); + + /* Suspended task should not in the ready queue */ + TEST_ASSERT(!task_in_rq(suspend_task, 0) && task_cnt_in_sched(0) == 1, + " Dequeue successfully: TASK_RUNNING -> TASK_SUSPEND"); + + /* Resume suspended task, it will be enqueued again */ + mo_task_resume(suspend_task); + TEST_ASSERT(task_in_rq(suspend_task, 0) && task_cnt_in_sched(0) == 2, + " Enqueue successfully: TASK_SUSPEND -> TASK_READY"); + + mo_task_cancel(suspend_task); + + /* --- Test3: Delay task test (TASK_RUNNING) -> (TASK_BLOCKED) ---*/ + + int delay_id = mo_task_spawn(task_delay, DEFAULT_STACK_SIZE); + mo_task_priority(delay_id, TASK_PRIO_CRIT); + + /* Yield to block task, block itself and return to the controller */ + mo_task_yield(); + TEST_ASSERT(!task_in_rq(delay_id, 0) && task_cnt_in_sched(0) == 1, + " Dequeue successfully: TASK_RUNNING -> TASK_BLOCKED (delay)"); + + /* Suspend controller, the delay task will resume controller after delay + * task wakeup from TASK_BLOCK */ + mo_task_suspend(test_controller); + TEST_ASSERT(task_cnt_in_sched(0) == 2, + " Enqueue successfully: TASK_BLOCKED (delay) -> TASK_READY"); + + mo_task_cancel(delay_id); +} + +/* Test ready queue consistency with state transition APIs - semaphore case */ +void test_sem_block_state_transition(void) +{ + printf("\n=== Testing Semaphore ===\n"); + + sem = mo_sem_create(1, 1); + mo_sem_wait(sem); + + /* Create semaphore task and yield controller to it; semaphore task state + * from TASK_RUNNING to TASK_BLOCKED (wait resource) */ + int sem_id = mo_task_spawn(task_sem, DEFAULT_STACK_SIZE); + mo_task_priority(sem_id, TASK_PRIO_CRIT); + mo_task_yield(); + TEST_ASSERT( + task_cnt_in_sched(0) == 1 && mo_sem_waiting_count(sem) == 1, + " Semaphore task dequeue successfully when no semaphore resource "); + + /* Controller release a resource, the semaphore task state from TASK_BLOCKED + * to TASK_READY */ + mo_sem_signal(sem); + mo_task_yield(); + TEST_ASSERT( + task_cnt_in_sched(0) == 2 && mo_sem_waiting_count(sem) == 0, + " Semaphore task enqueue successfully when resource available "); + mo_sem_destroy(sem); + mo_task_cancel(sem_id); +} + +void test_mutex(void) +{ + printf("\n=== Testing Mutex ===\n"); + + /* --- Test1: Mutex lock and unlock --- */ + + /* Create a mutex lock task */ + mo_mutex_init(&mutex); + int mutex_id = mo_task_spawn(task_mutex, DEFAULT_STACK_SIZE); + mo_task_priority(mutex_id, TASK_PRIO_CRIT); + + /* Controller acquire mutex lock, yield to mutex task that block itself. The + * mutex task will try to acquire the mutex lock which has been acquired by + * the controller; block itself */ + mo_mutex_lock(&mutex); + mo_task_yield(); + + /* Mutex task block itself, return to the controller task */ + TEST_ASSERT( + task_cnt_in_sched(0) == 1 && mo_mutex_waiting_count(&mutex) == 1, + " Mutex task dequeue successfully when mutex lock is not available "); + + /* Controller release the mutex lock, mutex task will re-enqueue */ + mo_mutex_unlock(&mutex); + mo_task_yield(); + TEST_ASSERT( + task_cnt_in_sched(0) == 2 && mo_mutex_waiting_count(&mutex) == 0, + " Mutex task enqueue successfully when mutex released by the " + "controller"); + mo_task_cancel(mutex_id); + + /* --- Test2: Mutex lock timeout --- */ + + mo_mutex_lock(&mutex); + mutex_id = mo_task_spawn(task_mutex_timedlock, DEFAULT_STACK_SIZE); + mo_task_priority(mutex_id, TASK_PRIO_CRIT); + + mo_task_yield(); + /* Mutex task block itself, the ready queue task count reduced, mutex lock + * waiting count added */ + TEST_ASSERT( + task_cnt_in_sched(0) == 1 && mo_mutex_waiting_count(&mutex) == 1, + " Timed mutex task dequeue successfully when mutex lock is not " + "available "); + /* Controller suspend, timed mutex lock will wakeup controller when timeout + * happen */ + mo_task_suspend(mo_task_id()); + mo_mutex_unlock(&mutex); + + TEST_ASSERT( + task_cnt_in_sched(0) == 2 && mo_mutex_waiting_count(&mutex) == 0, + " Timed mutex task enqueue successfully when timeout "); + mo_task_cancel(mutex_id); +} + +void test_mutex_cond(void) +{ + printf("\n=== Testing Mutex Condition ===\n"); + + /*--- Test 1: Mutex condition wait ---*/ + mo_cond_init(&cond); + /* Spawn condition wait task */ + int c_wait1 = mo_task_spawn(task_mutex_cond, DEFAULT_STACK_SIZE); + int c_wait2 = mo_task_spawn(task_mutex_cond, DEFAULT_STACK_SIZE); + int c_wait3 = mo_task_spawn(task_mutex_cond, DEFAULT_STACK_SIZE); + mo_task_priority(c_wait1, TASK_PRIO_CRIT); + mo_task_priority(c_wait2, TASK_PRIO_CRIT); + mo_task_priority(c_wait3, TASK_PRIO_CRIT); + mo_task_yield(); /* Yield to condition wait task*/ + + /* Check condition wait task is in the waiting list and removed from the + * ready queue. */ + TEST_ASSERT(task_cnt_in_sched(0) == 1 && mo_cond_waiting_count(&cond) == 3, + " Condition wait dequeue successfully "); + + mo_cond_signal(&cond); + mo_task_yield(); + + /* Check condition wait task enqueued by signal. */ + TEST_ASSERT(task_cnt_in_sched(0) == 2 && mo_cond_waiting_count(&cond) == 2, + " Condition wait enqueue successfully by signal "); + + /* Broadcast all condition tasks */ + mo_cond_broadcast(&cond); + TEST_ASSERT(task_cnt_in_sched(0) == 4 && mo_cond_waiting_count(&cond) == 0, + " Condition wait enqueue successfully by broadcast "); + + mo_task_cancel(c_wait1); + mo_task_cancel(c_wait2); + mo_task_cancel(c_wait3); + + /*--- Test 2: Mutex condition timed wait ---*/ + int c_t_wait1 = mo_task_spawn(task_mutex_cond_timewait, DEFAULT_STACK_SIZE); + mo_task_priority(c_t_wait1, TASK_PRIO_CRIT); + mo_task_yield(); + + TEST_ASSERT(task_cnt_in_sched(0) == 1 && mo_cond_waiting_count(&cond) == 1, + " Condition timed wait dequeue successfully "); + + /* Suspend controller task, waiting for condition timed task wake up by + * timeout and resume controller task*/ + mo_task_suspend(test_controller); + + /* Check waked up task enqueue */ + TEST_ASSERT(task_cnt_in_sched(0) == 2 && mo_cond_waiting_count(&cond) == 0, + " Condition timed wait enqueue successfully by timeout "); + mo_task_cancel(c_t_wait1); +} + +/* Print test results */ +void print_test_results(void) +{ + printf("\n=== Test Results ===\n"); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Total tests: %d\n", tests_passed + tests_failed); + + if (tests_failed == 0) { + printf("All tests PASSED!\n"); + } else { + printf("Some tests FAILED!\n"); + } +} + +void schedule_test_task(void) +{ + printf("Starting RR-cursor based scheduler test suits...\n"); + + mo_logger_flush(); + + test_bitmap(); + test_cursor(); + test_normal_state_transition(); + test_sem_block_state_transition(); + test_mutex(); + test_mutex_cond(); + + print_test_results(); + printf("RR-cursor based scheduler tests completed successfully.\n"); + + mo_logger_async_resume(); + /* Test complete - go into low-activity mode */ + while (1) + mo_task_wfi(); +} + +int32_t app_main(void) +{ + int idle_id = mo_task_spawn(task_idle, DEFAULT_STACK_SIZE); + mo_task_priority(idle_id, TASK_PRIO_IDLE); + + test_controller = mo_task_spawn(schedule_test_task, DEFAULT_STACK_SIZE); + mo_task_priority(test_controller, TASK_PRIO_CRIT); + /* preemptive scheduling */ + return 1; +}