diff --git a/Makefile b/Makefile index 23ef4d06..683e76d3 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ deps += $(LIB_OBJS:%.o=%.o.d) APPS := coop echo hello mqueues semaphore mutex cond \ pipes pipes_small pipes_struct prodcons progress \ rtsched suspend test64 timer timer_kill \ - cpubench test_libc + cpubench test_libc test_sched # Output files for __link target IMAGE_BASE := $(BUILD_DIR)/image diff --git a/app/test_sched.c b/app/test_sched.c new file mode 100644 index 00000000..7903738e --- /dev/null +++ b/app/test_sched.c @@ -0,0 +1,517 @@ +/* + * Test suite for the RR-cursor based scheduler implementation. + * + * This suite verifies the consistency of the O(1) RR-cursor based scheduler + * data structures, including bitmap bit positions, ready queues, and RR-cursor + * It exercises all task states (TASK_READY, TASK_BLOCKED, + * TASK_SUSPENDED, and TASK_RUNNING) and also verifies correct handling + * of task cancellation. + * + * The semaphore and mutex operations are also included in this unit tests file, + * verifying the alignment of the new scheduler design. + */ + +#include + +#include "private/error.h" + +#define TEST_ASSERT(condition, description) \ + do { \ + if (condition) { \ + printf("PASS: %s\n", description); \ + tests_passed++; \ + } else { \ + printf("FAIL: %s\n", description); \ + tests_failed++; \ + } \ + } while (0) + +/* Test results tracking */ +static int tests_passed = 0; +static int tests_failed = 0; + +/* Tasks and resources */ +int test_controller; +sem_t *sem; +mutex_t mutex; +cond_t cond; + +void task_normal(void) +{ + for (;;) { + mo_task_wfi(); + } +} + +/* Suspend self task */ +void task_suspend(void) +{ + mo_task_suspend(mo_task_id()); + while (1) + mo_task_wfi(); +} + +/* Delay and block self task */ +void task_delay(void) +{ + mo_task_delay(1); + mo_task_resume(test_controller); + mo_task_yield(); + while (1) + mo_task_wfi(); +} + +/* Mutex lock task; try to obtain lock */ +void task_mutex(void) +{ + mo_mutex_lock(&mutex); + mo_mutex_unlock(&mutex); + while (1) { + mo_task_wfi(); + } +} + +/* Mutex timed lock task; try to obtain lock before timeout */ +void task_mutex_timedlock(void) +{ + uint32_t ticks = 10; + TEST_ASSERT(mo_mutex_timedlock(&mutex, ticks) == ERR_TIMEOUT, + " Mutex timeout unlock successful "); + mo_task_resume(test_controller); + while (1) { + mo_task_wfi(); + } +} + +/* Mutex condition task; try to obtain lock before timeout */ +void task_mutex_cond(void) +{ + /* Acquire mutex */ + mo_mutex_lock(&mutex); + + /* Condition wait; enter condition waiter list and release mutex */ + mo_cond_wait(&cond, &mutex); + + /* Condition release, obtain lock again */ + mo_mutex_unlock(&mutex); + + while (1) { + mo_task_wfi(); + } +} + +/* Mutex condition timed lock task; try to obtain lock before timeout */ +void task_mutex_cond_timewait(void) +{ + /* Acquire mutex */ + mo_mutex_lock(&mutex); + + /* Condition wait; enter condition waiter list and release mutex */ + uint32_t ticks = 10; + TEST_ASSERT(mo_cond_timedwait(&cond, &mutex, ticks) == ERR_TIMEOUT, + " Mutex condition timeout unlock successful "); + + /* Condition release, obtain lock again */ + mo_mutex_unlock(&mutex); + mo_task_resume(test_controller); + + while (1) { + mo_task_wfi(); + } +} + +/* Semaphore task; try to obtain lock */ +void task_sem(void) +{ + mo_sem_wait(sem); + while (1) + mo_task_wfi(); +} + +/* Idle taskk */ +void task_idle(void) +{ + while (1) { + mo_task_wfi(); + } +} + +/* Helpers for verification */ + +/* Bitmap check */ +static bool bit_in_bitmap(int prio) +{ + return ((kcb->ready_bitmap & (1U << prio))) ? true : false; +} + +/* Task count check, list length approach */ +static int32_t task_cnt_in_sched(int prio) +{ + return kcb->ready_queues[prio]->length; +} + +/* Compare all list node id in the ready queue */ +static bool task_in_rq(int task_id, int prio) +{ + list_node_t *node = kcb->ready_queues[prio]->head->next; + while (node != kcb->ready_queues[prio]->tail) { + if (((tcb_t *) (node->data))->id == task_id) + return true; + node = node->next; + } + return false; +} + +/* Test priority bitmap consistency across task lifecycle transitions: + * basic task creation, priority migration, and cancellation. + */ +void test_bitmap(void) +{ + printf("\n=== Testing Priority Bitmap Consistency ===\n"); + + /* task count = 1 after spawn → bitmap bit should be set */ + int task_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + TEST_ASSERT(bit_in_bitmap(4) == true && task_cnt_in_sched(4) == 1, + "Bitmap sets bit when a same-priority task is spawned"); + + /* migrate task to a different priority queue → bitmap updates bits */ + mo_task_priority(task_id, TASK_PRIO_HIGH); + TEST_ASSERT(bit_in_bitmap(2) == true && bit_in_bitmap(4) == false && + task_cnt_in_sched(2) == 1 && task_cnt_in_sched(4) == 0, + "Bitmap updates bits correctly after priority migration"); + + /* cancel task → ready queue becomes empty, bitmap bit should clear */ + mo_task_cancel(task_id); + TEST_ASSERT(bit_in_bitmap(2) == false && task_cnt_in_sched(2) == 0, + "Bitmap clears bit when the migrated task is cancelled"); +} + +/* Test RR cursor consistency across task lifecycle transitions and + * task-count changes within a single priority queue. + * + * Cursor invariants for a ready queue: + * - task count == 0: cursor points to NULL + * - task count == 1: cursor points to the only task node + * - task count > 1: cursor points to a task node that differs from + * the running task + * + * Scenarios: + * - Running task creates and cancels a same-priority task + * - Running task creates and cancels tasks in a different priority + * queue + */ +void test_cursor(void) +{ + printf("\n=== Testing Cursor Consistency ===\n"); + + /* --- Test1: Running task creates a same-priority task and cancels it --- + */ + + /* task count = 1, cursor should point to the only task node + * (controller, TASK_RUNNING) + */ + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[0]->data))->id == test_controller && + task_cnt_in_sched(0) == 1, + " Task count 1: Cursor points to the only task node"); + + /* task count from 1 -> 2, cursor points to a task node different + * from the running task + */ + int task_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + mo_task_priority(task_id, TASK_PRIO_CRIT); + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[0]->data))->id == task_id && + task_cnt_in_sched(0) == 2, + " Task count 1->2: Cursor points to the new task node which " + "originally points to the running task "); + + /* cancel the cursor(new) task, task count from 2 -> 1; cursor should move + * back to the next task(controller) + */ + mo_task_cancel(task_id); + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[0]->data))->id == test_controller && + task_cnt_in_sched(0) == 1, + " Task count 2->1: Cursor points to next task (controller) " + "which points to the removed node "); + + + /* --- Test2: Running task creates different-priority task and cancels it + * --- */ + + /* task count = 0 */ + TEST_ASSERT(kcb->rr_cursors[4] == NULL && task_cnt_in_sched(4) == 0, + "Task count 0: Cursor is NULL when the ready queue is empty"); + + /* task count from 0 -> 1 */ + int task1_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + TEST_ASSERT( + ((tcb_t *) (kcb->rr_cursors[4]->data))->id == task1_id && + task_cnt_in_sched(4) == 1, + "Task count 0->1: Cursor initialized and points to the new task"); + + /* task count from 1 -> 2, cursor does not need to advance */ + int task2_id = mo_task_spawn(task_normal, DEFAULT_STACK_SIZE); + TEST_ASSERT(((tcb_t *) (kcb->rr_cursors[4]->data))->id == task1_id && + task_cnt_in_sched(4) == 2, + "Task count 1->2: Cursor is maintained when cursor not same as " + "the running task "); + + /* task count from 2 -> 1, cancel the cursor task */ + mo_task_cancel(task1_id); + TEST_ASSERT( + ((tcb_t *) (kcb->rr_cursors[4]->data))->id == task2_id && + task_cnt_in_sched(4) == 1, + "Task count 2->1: Cursor is advanced when cancelled cursor task "); + + /* task count from 1 -> 0 */ + mo_task_cancel(task2_id); + TEST_ASSERT(kcb->rr_cursors[4] == NULL && task_cnt_in_sched(4) == 0, + "Task count 1->0: Cursor is NULL when the ready queue becomes " + "empty again"); +} + +/* Task state transition APIs test, including APIs in task.c, semaphore.c and + * mutex.c */ + +/* Test ready queue consistency with state transition APIs - normal case */ +void test_normal_state_transition(void) +{ + printf("\n=== Testing APIs normal task state transition ===\n"); + + /* --- Test1: State transition from TASK_READY ---*/ + + /* TASK_STOPPED to TASK_READY */ + int suspend_task = mo_task_spawn(task_suspend, DEFAULT_STACK_SIZE); + TEST_ASSERT(task_in_rq(suspend_task, 4) && task_cnt_in_sched(4) == 1, + " Enqueue successfully: TASK_STOPPED -> TASK_READY"); + + /* TASK_READY to TASK_SUSPEND */ + mo_task_suspend(suspend_task); + TEST_ASSERT(!task_in_rq(suspend_task, 4) && task_cnt_in_sched(4) == 0, + " Dequeue successfully: TASK_READY -> TASK_SUSPEND"); + + /* TASK_SUSPEND to TASK_READY */ + mo_task_resume(suspend_task); + TEST_ASSERT(task_in_rq(suspend_task, 4) && task_cnt_in_sched(4) == 1, + " Enqueue successfully: TASK_SUSPEND -> TASK_READY"); + + /* --- Test2: State transition from TASK_RUNNING ---*/ + + /* When suspend task is executing (TASK_RUNNING), it will suspend itself + * (TASK_SUSPENDED) */ + mo_task_priority(suspend_task, TASK_PRIO_CRIT); + mo_task_yield(); + + /* Suspended task should not in the ready queue */ + TEST_ASSERT(!task_in_rq(suspend_task, 0) && task_cnt_in_sched(0) == 1, + " Dequeue successfully: TASK_RUNNING -> TASK_SUSPEND"); + + /* Resume suspended task, it will be enqueued again */ + mo_task_resume(suspend_task); + TEST_ASSERT(task_in_rq(suspend_task, 0) && task_cnt_in_sched(0) == 2, + " Enqueue successfully: TASK_SUSPEND -> TASK_READY"); + + mo_task_cancel(suspend_task); + + /* --- Test3: Delay task test (TASK_RUNNING) -> (TASK_BLOCKED) ---*/ + + int delay_id = mo_task_spawn(task_delay, DEFAULT_STACK_SIZE); + mo_task_priority(delay_id, TASK_PRIO_CRIT); + + /* Yield to block task, block itself and return to the controller */ + mo_task_yield(); + TEST_ASSERT(!task_in_rq(delay_id, 0) && task_cnt_in_sched(0) == 1, + " Dequeue successfully: TASK_RUNNING -> TASK_BLOCKED (delay)"); + + /* Suspend controller, the delay task will resume controller after delay + * task wakeup from TASK_BLOCK */ + mo_task_suspend(test_controller); + TEST_ASSERT(task_cnt_in_sched(0) == 2, + " Enqueue successfully: TASK_BLOCKED (delay) -> TASK_READY"); + + mo_task_cancel(delay_id); +} + +/* Test ready queue consistency with state transition APIs - semaphore case */ +void test_sem_block_state_transition(void) +{ + printf("\n=== Testing Semaphore ===\n"); + + sem = mo_sem_create(1, 1); + mo_sem_wait(sem); + + /* Create semaphore task and yield controller to it; semaphore task state + * from TASK_RUNNING to TASK_BLOCKED (wait resource) */ + int sem_id = mo_task_spawn(task_sem, DEFAULT_STACK_SIZE); + mo_task_priority(sem_id, TASK_PRIO_CRIT); + mo_task_yield(); + TEST_ASSERT( + task_cnt_in_sched(0) == 1 && mo_sem_waiting_count(sem) == 1, + " Semaphore task dequeue successfully when no semaphore resource "); + + /* Controller release a resource, the semaphore task state from TASK_BLOCKED + * to TASK_READY */ + mo_sem_signal(sem); + mo_task_yield(); + TEST_ASSERT( + task_cnt_in_sched(0) == 2 && mo_sem_waiting_count(sem) == 0, + " Semaphore task enqueue successfully when resource available "); + mo_sem_destroy(sem); + mo_task_cancel(sem_id); +} + +void test_mutex(void) +{ + printf("\n=== Testing Mutex ===\n"); + + /* --- Test1: Mutex lock and unlock --- */ + + /* Create a mutex lock task */ + mo_mutex_init(&mutex); + int mutex_id = mo_task_spawn(task_mutex, DEFAULT_STACK_SIZE); + mo_task_priority(mutex_id, TASK_PRIO_CRIT); + + /* Controller acquire mutex lock, yield to mutex task that block itself. The + * mutex task will try to acquire the mutex lock which has been acquired by + * the controller; block itself */ + mo_mutex_lock(&mutex); + mo_task_yield(); + + /* Mutex task block itself, return to the controller task */ + TEST_ASSERT( + task_cnt_in_sched(0) == 1 && mo_mutex_waiting_count(&mutex) == 1, + " Mutex task dequeue successfully when mutex lock is not available "); + + /* Controller release the mutex lock, mutex task will re-enqueue */ + mo_mutex_unlock(&mutex); + mo_task_yield(); + TEST_ASSERT( + task_cnt_in_sched(0) == 2 && mo_mutex_waiting_count(&mutex) == 0, + " Mutex task enqueue successfully when mutex released by the " + "controller"); + mo_task_cancel(mutex_id); + + /* --- Test2: Mutex lock timeout --- */ + + mo_mutex_lock(&mutex); + mutex_id = mo_task_spawn(task_mutex_timedlock, DEFAULT_STACK_SIZE); + mo_task_priority(mutex_id, TASK_PRIO_CRIT); + + mo_task_yield(); + /* Mutex task block itself, the ready queue task count reduced, mutex lock + * waiting count added */ + TEST_ASSERT( + task_cnt_in_sched(0) == 1 && mo_mutex_waiting_count(&mutex) == 1, + " Timed mutex task dequeue successfully when mutex lock is not " + "available "); + /* Controller suspend, timed mutex lock will wakeup controller when timeout + * happen */ + mo_task_suspend(mo_task_id()); + mo_mutex_unlock(&mutex); + + TEST_ASSERT( + task_cnt_in_sched(0) == 2 && mo_mutex_waiting_count(&mutex) == 0, + " Timed mutex task enqueue successfully when timeout "); + mo_task_cancel(mutex_id); +} + +void test_mutex_cond(void) +{ + printf("\n=== Testing Mutex Condition ===\n"); + + /*--- Test 1: Mutex condition wait ---*/ + mo_cond_init(&cond); + /* Spawn condition wait task */ + int c_wait1 = mo_task_spawn(task_mutex_cond, DEFAULT_STACK_SIZE); + int c_wait2 = mo_task_spawn(task_mutex_cond, DEFAULT_STACK_SIZE); + int c_wait3 = mo_task_spawn(task_mutex_cond, DEFAULT_STACK_SIZE); + mo_task_priority(c_wait1, TASK_PRIO_CRIT); + mo_task_priority(c_wait2, TASK_PRIO_CRIT); + mo_task_priority(c_wait3, TASK_PRIO_CRIT); + mo_task_yield(); /* Yield to condition wait task*/ + + /* Check condition wait task is in the waiting list and removed from the + * ready queue. */ + TEST_ASSERT(task_cnt_in_sched(0) == 1 && mo_cond_waiting_count(&cond) == 3, + " Condition wait dequeue successfully "); + + mo_cond_signal(&cond); + mo_task_yield(); + + /* Check condition wait task enqueued by signal. */ + TEST_ASSERT(task_cnt_in_sched(0) == 2 && mo_cond_waiting_count(&cond) == 2, + " Condition wait enqueue successfully by signal "); + + /* Broadcast all condition tasks */ + mo_cond_broadcast(&cond); + TEST_ASSERT(task_cnt_in_sched(0) == 4 && mo_cond_waiting_count(&cond) == 0, + " Condition wait enqueue successfully by broadcast "); + + mo_task_cancel(c_wait1); + mo_task_cancel(c_wait2); + mo_task_cancel(c_wait3); + + /*--- Test 2: Mutex condition timed wait ---*/ + int c_t_wait1 = mo_task_spawn(task_mutex_cond_timewait, DEFAULT_STACK_SIZE); + mo_task_priority(c_t_wait1, TASK_PRIO_CRIT); + mo_task_yield(); + + TEST_ASSERT(task_cnt_in_sched(0) == 1 && mo_cond_waiting_count(&cond) == 1, + " Condition timed wait dequeue successfully "); + + /* Suspend controller task, waiting for condition timed task wake up by + * timeout and resume controller task*/ + mo_task_suspend(test_controller); + + /* Check waked up task enqueue */ + TEST_ASSERT(task_cnt_in_sched(0) == 2 && mo_cond_waiting_count(&cond) == 0, + " Condition timed wait enqueue successfully by timeout "); + mo_task_cancel(c_t_wait1); +} + +/* Print test results */ +void print_test_results(void) +{ + printf("\n=== Test Results ===\n"); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + printf("Total tests: %d\n", tests_passed + tests_failed); + + if (tests_failed == 0) { + printf("All tests PASSED!\n"); + } else { + printf("Some tests FAILED!\n"); + } +} + +void schedule_test_task(void) +{ + printf("Starting RR-cursor based scheduler test suits...\n"); + + mo_logger_flush(); + + test_bitmap(); + test_cursor(); + test_normal_state_transition(); + test_sem_block_state_transition(); + test_mutex(); + test_mutex_cond(); + + print_test_results(); + printf("RR-cursor based scheduler tests completed successfully.\n"); + + mo_logger_async_resume(); + /* Test complete - go into low-activity mode */ + while (1) + mo_task_wfi(); +} + +int32_t app_main(void) +{ + int idle_id = mo_task_spawn(task_idle, DEFAULT_STACK_SIZE); + mo_task_priority(idle_id, TASK_PRIO_IDLE); + + test_controller = mo_task_spawn(schedule_test_task, DEFAULT_STACK_SIZE); + mo_task_priority(test_controller, TASK_PRIO_CRIT); + /* preemptive scheduling */ + return 1; +} diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..ce791a2b 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4d..cc8d2570 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -86,6 +86,10 @@ typedef struct tcb { /* Stack Protection */ uint32_t canary; /* Random stack canary for overflow detection */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -108,14 +112,18 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attributions */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ @@ -288,6 +296,14 @@ uint64_t mo_uptime(void); */ void _sched_block(queue_t *wait_q); +/* Dequeue path for task with TASK_BLOCKED state. It must be called before task + * state set as TASK_BLOCKED. Currently, this API is used in mutex lock case.*/ +void _sched_block_dequeue(tcb_t *blocked_task); + +/* Enqueue path for the task with TASK_BLOCKED state. This API is the mainly + * enqueuing path for semaphore and mutex operations. */ +void _sched_block_enqueue(tcb_t *blocked_task); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/mutex.c b/kernel/mutex.c index 5ff9c8aa..e8747a27 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -78,6 +78,9 @@ static void mutex_block_atomic(list_t *waiters) /* Block and yield atomically */ self->state = TASK_BLOCKED; + + /* Explicit remove list node from the ready queue */ + _sched_block_dequeue(self); _yield(); /* This releases NOSCHED when we context switch */ } @@ -227,26 +230,21 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) /* Set up timeout using task delay mechanism */ self->delay = ticks; self->state = TASK_BLOCKED; + _sched_block_dequeue(self); NOSCHED_LEAVE(); /* Yield and let the scheduler handle timeout via delay mechanism */ mo_task_yield(); - /* Check result after waking up */ + /* Check result after waking up; the task is in TASK_RUNNING and in the + * ready queue */ int32_t result; NOSCHED_ENTER(); - if (self->state == TASK_BLOCKED) { - /* We woke up due to timeout, not mutex unlock */ - if (remove_self_from_waiters(m->waiters)) { - self->state = TASK_READY; - result = ERR_TIMEOUT; - } else { - /* Race condition: we were both timed out and unlocked */ - /* Check if we now own the mutex */ - result = (m->owner_tid == self_tid) ? ERR_OK : ERR_TIMEOUT; - } + /* If task still in the waiter list, it is woken up due to time out. */ + if (remove_self_from_waiters(m->waiters)) { + result = ERR_TIMEOUT; } else { /* We were woken by mutex unlock - check ownership */ result = (m->owner_tid == self_tid) ? ERR_OK : ERR_FAIL; @@ -282,7 +280,7 @@ int32_t mo_mutex_unlock(mutex_t *m) /* Validate task state before waking */ if (likely(next_owner->state == TASK_BLOCKED)) { m->owner_tid = next_owner->id; - next_owner->state = TASK_READY; + _sched_block_enqueue(next_owner); /* Clear any pending timeout since we're granting ownership */ next_owner->delay = 0; } else { @@ -387,6 +385,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) panic(ERR_SEM_OPERATION); } self->state = TASK_BLOCKED; + _sched_block_dequeue(self); NOSCHED_LEAVE(); /* Release mutex */ @@ -395,7 +394,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) /* Failed to unlock - remove from wait list and restore state */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); NOSCHED_LEAVE(); return unlock_result; } @@ -430,6 +429,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) } self->delay = ticks; self->state = TASK_BLOCKED; + _sched_block_dequeue(self); NOSCHED_LEAVE(); /* Release mutex */ @@ -438,7 +438,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Failed to unlock - cleanup and restore */ NOSCHED_ENTER(); remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + _sched_block_enqueue(self); self->delay = 0; NOSCHED_LEAVE(); return unlock_result; @@ -447,14 +447,13 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) /* Yield and wait for signal or timeout */ mo_task_yield(); - /* Determine why we woke up */ + /* Determine why we woke up; the task is in the TASK_RUNNING state and in + * the ready queue. */ int32_t wait_status; NOSCHED_ENTER(); - if (self->state == TASK_BLOCKED) { - /* Timeout occurred - remove from wait list */ - remove_self_from_waiters(c->waiters); - self->state = TASK_READY; + /* Timeout occurred - remove from wait list */ + if (remove_self_from_waiters(c->waiters)) { self->delay = 0; wait_status = ERR_TIMEOUT; } else { @@ -483,7 +482,7 @@ int32_t mo_cond_signal(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're signaling */ waiter->delay = 0; } else { @@ -510,7 +509,7 @@ int32_t mo_cond_broadcast(cond_t *c) if (likely(waiter)) { /* Validate task state before waking */ if (likely(waiter->state == TASK_BLOCKED)) { - waiter->state = TASK_READY; + _sched_block_enqueue(waiter); /* Clear any pending timeout since we're broadcasting */ waiter->delay = 0; } else { diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372d..9fffa679 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + _sched_block_enqueue(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ diff --git a/kernel/task.c b/kernel/task.c index 1304a1ee..2b4109fb 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -26,6 +26,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; @@ -71,7 +74,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -354,29 +357,64 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + kcb->ready_bitmap |= (1U << (task->prio_level)); + return; } -/* Remove task from ready queues - state-based approach for compatibility */ +/* Remove task from ready queue */ void sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (rq->length == 0) { + *cursor = NULL; + kcb->ready_bitmap &= ~(1U << (task->prio_level)); + } + return; } /* Handle time slice expiration for current task */ @@ -401,36 +439,47 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); +} + +/* Dequeue blocked task when mutex lock is triggered */ +void _sched_block_dequeue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_dequeue_task(blocked_task); +} + +void _sched_block_enqueue(tcb_t *blocked_task) +{ + if (unlikely(!blocked_task || blocked_task->state != TASK_BLOCKED)) + return; + + sched_enqueue_task(blocked_task); } /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -443,53 +492,39 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ + /* Bitmap search, from bit0 (highest priority level) to bit7 (lowest + * priority level) */ + uint8_t bitmap = kcb->ready_bitmap; + if (unlikely(bitmap == 0)) + panic(ERR_NO_TASKS); - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; + uint8_t top_prio_level = 0; + while (top_prio_level < TASK_PRIORITY_LEVELS) { + if (bitmap & 1U) + break; - tcb_t *task = node->data; + bitmap >>= 1; + top_prio_level++; + } - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; + list_t *rq = kcb->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - return task->id; + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - } while (node != start_node && ++iterations < SCHED_IMAX); + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - /* No ready tasks found in preemptive mode - all tasks are blocked. - * This is normal for periodic RT tasks waiting for their next period. - * We CANNOT return a BLOCKED task as that would cause it to run. - * Instead, find ANY task (even blocked) as a placeholder, then wait for - * interrupt. - */ - if (kcb->preemptive) { - /* Select any task as placeholder (dispatcher won't actually switch to - * it if blocked) */ - list_node_t *any_node = list_next(kcb->tasks->head); - while (any_node && any_node != kcb->tasks->tail) { - if (any_node->data) { - kcb->task_current = any_node; - tcb_t *any_task = any_node->data; - return any_task->id; - } - any_node = list_next(any_node); - } - /* No tasks at all - this is a real error */ - panic(ERR_NO_TASKS); - } + if (kcb->task_current) + return new_task->id; /* In cooperative mode, having no ready tasks is an error */ panic(ERR_NO_TASKS); @@ -770,8 +805,15 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ + /* Binding ready queue node */ + tcb->rq_node.data = tcb; + tcb->rq_node.next = NULL; + if (!kcb->task_current) - kcb->task_current = node; + kcb->task_current = &tcb->rq_node; + + /* Push node to ready queue */ + sched_enqueue_task(tcb); CRITICAL_LEAVE(); @@ -791,7 +833,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } @@ -826,6 +867,10 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ @@ -855,7 +900,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -882,6 +929,11 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; bool is_current = (kcb->task_current->data == task); @@ -910,9 +962,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -936,12 +987,30 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) { + sched_dequeue_task(task); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue task node into new priority ready queue*/ + sched_enqueue_task(task); + } + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } @@ -1034,6 +1103,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION);