mutex.c
3.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
/*
* Copyright (C) 2015 Kaspar Schleiser <kaspar@schleiser.de>
* 2013 Freie Universität Berlin
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup core_sync
* @{
*
* @file
* @brief Kernel mutex implementation
*
* @author Kaspar Schleiser <kaspar@schleiser.de>
* @author Joakim Nohlgård <joakim.nohlgard@eistec.se>
*
* @}
*/
#include <stdio.h>
#include <inttypes.h>
#include "mutex.h"
#include "thread.h"
#include "atomic.h"
#include "sched.h"
#include "thread.h"
#include "irq.h"
#include "thread.h"
#include "list.h"
#define ENABLE_DEBUG (0)
#include "debug.h"
int _mutex_lock(mutex_t *mutex, int blocking)
{
unsigned irqstate = irq_disable();
DEBUG("PID[%" PRIkernel_pid "]: Mutex in use.\n", sched_active_pid);
if (mutex->queue.next == NULL) {
/* mutex is unlocked. */
mutex->queue.next = MUTEX_LOCKED;
DEBUG("PID[%" PRIkernel_pid "]: mutex_wait early out.\n",
sched_active_pid);
irq_restore(irqstate);
return 1;
}
else if (blocking) {
thread_t *me = (thread_t*)sched_active_thread;
DEBUG("PID[%" PRIkernel_pid "]: Adding node to mutex queue: prio: %"
PRIu32 "\n", sched_active_pid, (uint32_t)me->priority);
sched_set_status(me, STATUS_MUTEX_BLOCKED);
if (mutex->queue.next == MUTEX_LOCKED) {
mutex->queue.next = (list_node_t*)&me->rq_entry;
mutex->queue.next->next = NULL;
}
else {
thread_add_to_list(&mutex->queue, me);
}
irq_restore(irqstate);
thread_yield_higher();
/* We were woken up by scheduler. Waker removed us from queue.
* We have the mutex now. */
return 1;
}
else {
irq_restore(irqstate);
return 0;
}
}
void mutex_unlock(mutex_t *mutex)
{
unsigned irqstate = irq_disable();
DEBUG("mutex_unlock(): queue.next: 0x%08x pid: %" PRIkernel_pid "\n",
(unsigned)mutex->queue.next, sched_active_pid);
if (mutex->queue.next == NULL) {
/* the mutex was not locked */
irq_restore(irqstate);
return;
}
if (mutex->queue.next == MUTEX_LOCKED) {
mutex->queue.next = NULL;
/* the mutex was locked and no thread was waiting for it */
irq_restore(irqstate);
return;
}
list_node_t *next = list_remove_head(&mutex->queue);
thread_t *process = container_of((clist_node_t*)next, thread_t, rq_entry);
DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n",
process->pid);
sched_set_status(process, STATUS_PENDING);
if (!mutex->queue.next) {
mutex->queue.next = MUTEX_LOCKED;
}
uint16_t process_priority = process->priority;
irq_restore(irqstate);
sched_switch(process_priority);
}
void mutex_unlock_and_sleep(mutex_t *mutex)
{
DEBUG("PID[%" PRIkernel_pid "]: unlocking mutex. queue.next: 0x%08x, and "
"taking a nap\n", sched_active_pid, (unsigned)mutex->queue.next);
unsigned irqstate = irq_disable();
if (mutex->queue.next) {
if (mutex->queue.next == MUTEX_LOCKED) {
mutex->queue.next = NULL;
}
else {
list_node_t *next = list_remove_head(&mutex->queue);
thread_t *process = container_of((clist_node_t*)next, thread_t,
rq_entry);
DEBUG("PID[%" PRIkernel_pid "]: waking up waiter.\n", process->pid);
sched_set_status(process, STATUS_PENDING);
if (!mutex->queue.next) {
mutex->queue.next = MUTEX_LOCKED;
}
}
}
DEBUG("PID[%" PRIkernel_pid "]: going to sleep.\n", sched_active_pid);
sched_set_status((thread_t*)sched_active_thread, STATUS_SLEEPING);
irq_restore(irqstate);
thread_yield_higher();
}