From 67d15d8e7cf3e3fb8dec639dfbf143df9225ce3a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 26 Nov 2025 18:48:21 +0800 Subject: [PATCH] Move mutex_block_atomic() to task.c mutex_block_atomic() performs task state transitions and enqueues the current task into a waiting queue. Its behavior is analogous to _sched_block(), which is used by semaphore blocking and serves the same purpose of transitioning a task into a blocked state. For consistency, both task-blocking primitives should be managed under task.c. This commit moves mutex_block_atomic() to task.c and adds its prototype to task.h for use by mutex.c. No functional changes are introduced. --- include/sys/task.h | 9 +++++++++ kernel/mutex.c | 18 ------------------ kernel/task.c | 19 +++++++++++++++++++ 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4d..74fbd8fd 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -288,6 +288,15 @@ uint64_t mo_uptime(void); */ void _sched_block(queue_t *wait_q); +/* Atomically blocks the current task and invokes the scheduler. + * + * This internal kernel primitive is the basis for all blocking operations. + * Support for mutex lock data structure. + * + * @waiters : The wait list to which the current task will be added + */ +void mutex_block_atomic(list_t *waiters); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/mutex.c b/kernel/mutex.c index 52a16a72..18a61a69 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -64,24 +64,6 @@ static bool remove_self_from_waiters(list_t *waiters) return false; } -/* Atomic block operation with enhanced error checking */ -static void mutex_block_atomic(list_t *waiters) -{ - if (unlikely(!waiters || !kcb || !kcb->task_current || - !kcb->task_current->data)) - panic(ERR_SEM_OPERATION); - - tcb_t *self = kcb->task_current->data; - - /* Add to waiters list */ - if (unlikely(!list_pushback(waiters, self))) - panic(ERR_SEM_OPERATION); - - /* Block and yield atomically */ - self->state = TASK_BLOCKED; - _yield(); /* This releases NOSCHED when we context switch */ -} - int32_t mo_mutex_init(mutex_t *m) { if (unlikely(!m)) diff --git a/kernel/task.c b/kernel/task.c index 1304a1ee..efdf6ab8 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -1041,3 +1041,22 @@ void _sched_block(queue_t *wait_q) self->state = TASK_BLOCKED; _yield(); } + +/* Atomic block operation with enhanced error checking, mainly used in mutex + * operations */ +void mutex_block_atomic(list_t *waiters) +{ + if (unlikely(!waiters || !kcb || !kcb->task_current || + !kcb->task_current->data)) + panic(ERR_SEM_OPERATION); + + tcb_t *self = kcb->task_current->data; + + /* Add to waiters list */ + if (unlikely(!list_pushback(waiters, self))) + panic(ERR_SEM_OPERATION); + + /* Block and yield atomically */ + self->state = TASK_BLOCKED; + _yield(); /* This releases NOSCHED when we context switch */ +}