Skip to content

Commit 42ba098

Browse files
committed
Change task management to use an embedded list node design
The previous non-intrusive list node approach for tcb_t introduced unnecessary memory operations and ambiguity around list node lifecycle management. This change switches task list management to an embedded list node design, aligning task handling with the updated list operation model.
1 parent bb06633 commit 42ba098

File tree

5 files changed

+77
-77
lines changed

5 files changed

+77
-77
lines changed

arch/riscv/hal.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -653,7 +653,7 @@ void hal_switch_stack(void **old_sp, void *new_sp)
653653
*/
654654
void hal_interrupt_tick(void)
655655
{
656-
tcb_t *task = kcb->task_current->data;
656+
tcb_t *task = tcb_from_global_node(kcb->task_current);
657657
if (unlikely(!task))
658658
hal_panic();
659659

include/sys/task.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,10 @@ typedef struct tcb {
8686

8787
/* Stack Protection */
8888
uint32_t canary; /* Random stack canary for overflow detection */
89+
90+
/* Embedded nodes */
91+
list_node_t global_node; /* Global task list */
92+
list_node_t mutex_node; /* Mutex waiting list */
8993
} tcb_t;
9094

9195
/* Kernel Control Block (KCB)

kernel/main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ int32_t main(void)
6363
* 'kcb->task_current' was set by the first call to mo_task_spawn.
6464
* This function transfers control and does not return.
6565
*/
66-
tcb_t *first_task = kcb->task_current->data;
66+
tcb_t *first_task = tcb_from_global_node(kcb->task_current);
6767
if (!first_task)
6868
panic(ERR_NO_TASKS);
6969

kernel/mutex.c

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -45,35 +45,33 @@ static inline void cond_invalidate(cond_t *c)
4545
*/
4646
static bool remove_self_from_waiters(list_t *waiters)
4747
{
48-
if (unlikely(!waiters || !kcb || !kcb->task_current ||
49-
!kcb->task_current->data))
48+
if (unlikely(!waiters || !kcb || !kcb->task_current))
5049
return false;
5150

52-
tcb_t *self = kcb->task_current->data;
51+
tcb_t *self = tcb_from_global_node(kcb->task_current);
5352

5453
/* Search for and remove self from waiters list */
55-
list_node_t *curr = waiters->head->next;
56-
while (curr && curr != waiters->tail) {
57-
if (curr->data == self) {
58-
list_remove(waiters, curr);
54+
list_node_t *curr_mutex_node = waiters->head->next;
55+
while (curr_mutex_node && curr_mutex_node != waiters->tail) {
56+
if (tcb_from_mutex_node(curr_mutex_node) == self) {
57+
list_remove(waiters, curr_mutex_node);
5958
return true;
6059
}
61-
curr = curr->next;
60+
curr_mutex_node = curr_mutex_node->next;
6261
}
6362
return false;
6463
}
6564

6665
/* Atomic block operation with enhanced error checking */
6766
static void mutex_block_atomic(list_t *waiters)
6867
{
69-
if (unlikely(!waiters || !kcb || !kcb->task_current ||
70-
!kcb->task_current->data))
68+
if (unlikely(!waiters || !kcb || !kcb->task_current))
7169
panic(ERR_SEM_OPERATION);
7270

73-
tcb_t *self = kcb->task_current->data;
71+
tcb_t *self = tcb_from_global_node(kcb->task_current);
7472

7573
/* Add to waiters list */
76-
if (unlikely(!list_pushback(waiters, self)))
74+
if (unlikely(!list_pushback(waiters, &self->mutex_node)))
7775
panic(ERR_SEM_OPERATION);
7876

7977
/* Block and yield atomically */
@@ -218,8 +216,8 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
218216
}
219217

220218
/* Slow path: must block with timeout using delay mechanism */
221-
tcb_t *self = kcb->task_current->data;
222-
if (unlikely(!list_pushback(m->waiters, self))) {
219+
tcb_t *self = tcb_from_global_node(kcb->task_current);
220+
if (unlikely(!list_pushback(m->waiters, &self->mutex_node))) {
223221
NOSCHED_LEAVE();
224222
panic(ERR_SEM_OPERATION);
225223
}
@@ -277,7 +275,8 @@ int32_t mo_mutex_unlock(mutex_t *m)
277275
m->owner_tid = 0;
278276
} else {
279277
/* Transfer ownership to next waiter (FIFO) */
280-
tcb_t *next_owner = (tcb_t *) list_pop(m->waiters);
278+
list_node_t *next_owner_node = (list_node_t *) list_pop(m->waiters);
279+
tcb_t *next_owner = tcb_from_mutex_node(next_owner_node);
281280
if (likely(next_owner)) {
282281
/* Validate task state before waking */
283282
if (likely(next_owner->state == TASK_BLOCKED)) {
@@ -378,11 +377,11 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
378377
if (unlikely(!mo_mutex_owned_by_current(m)))
379378
return ERR_NOT_OWNER;
380379

381-
tcb_t *self = kcb->task_current->data;
380+
tcb_t *self = tcb_from_global_node(kcb->task_current);
382381

383382
/* Atomically add to wait list */
384383
NOSCHED_ENTER();
385-
if (unlikely(!list_pushback(c->waiters, self))) {
384+
if (unlikely(!list_pushback(c->waiters, &self->mutex_node))) {
386385
NOSCHED_LEAVE();
387386
panic(ERR_SEM_OPERATION);
388387
}
@@ -420,11 +419,11 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
420419
return ERR_TIMEOUT;
421420
}
422421

423-
tcb_t *self = kcb->task_current->data;
422+
tcb_t *self = tcb_from_global_node(kcb->task_current);
424423

425424
/* Atomically add to wait list with timeout */
426425
NOSCHED_ENTER();
427-
if (unlikely(!list_pushback(c->waiters, self))) {
426+
if (unlikely(!list_pushback(c->waiters, &self->mutex_node))) {
428427
NOSCHED_LEAVE();
429428
panic(ERR_SEM_OPERATION);
430429
}

0 commit comments

Comments
 (0)