Blame view

RIOT/cpu/x86/x86_threading.c 5.81 KB
a752c7ab   elopes   add first test an...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  /*
   * Copyright (C) 2014  René Kijewski  <rene.kijewski@fu-berlin.de>
   *
   * This library is free software; you can redistribute it and/or
   * modify it under the terms of the GNU Lesser General Public
   * License as published by the Free Software Foundation; either
   * version 2.1 of the License, or (at your option) any later version.
   *
   * This library is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * Lesser General Public License for more details.
   *
   * You should have received a copy of the GNU Lesser General Public
   * License along with this library; if not, write to the Free Software
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
   */
  
  /**
   * @ingroup     x86-multithreading
   * @{
   *
   * @file
   * @brief       Multi-thread management for x86.
   *
   * @author      René Kijewski <rene.kijewski@fu-berlin.de>
   *
   * @}
   */
  
  #include "x86_interrupts.h"
  #include "x86_reboot.h"
  #include "x86_registers.h"
  #include "x86_threading.h"
  #include "cpu.h"
  #include "irq.h"
  #include "ucontext.h"
  #include "sched.h"
  #include "stdbool.h"
  #include "thread.h"
  
  #define ENABLE_DEBUG (0)
  #include "debug.h"
  
  static char isr_stack[SIGSTKSZ];
  static ucontext_t isr_context;
  
  static char end_stack[SIGSTKSZ];
  static ucontext_t end_context;
  
  bool x86_in_isr = true;
  
  static kernel_pid_t fpu_owner = KERNEL_PID_UNDEF;
  
  //static ucontext_t *cur_ctx, *isr_ctx;
  
  static struct x86_fxsave initial_fpu_state;
  
  int irq_is_in(void)
  {
      return x86_in_isr;
  }
  
  unsigned irq_disable(void)
  {
      unsigned long eflags = x86_pushf_cli();
      return (eflags & X86_IF) != 0;
  }
  
  unsigned irq_enable(void)
  {
      unsigned long eflags;
      __asm__ volatile ("pushf; pop %0; sti" : "=g"(eflags));
      return (eflags & X86_IF) != 0;
  }
  
  void irq_restore(unsigned state)
  {
      if (state) {
          __asm__ volatile ("sti");
      }
      else {
          __asm__ volatile ("cli");
      }
  }
  
  int irq_is_in(void);
  
  static void __attribute__((noreturn)) isr_thread_yield(void)
  {
      sched_run();
      ucontext_t *ctx = (ucontext_t *) sched_active_thread->sp;
      DEBUG("isr_thread_yield(): switching to (%s, %p)\n\n", sched_active_thread->name, ctx->uc_context.ip);
  
      uint32_t cr0 = cr0_read();
      cr0 |= CR0_TS;
      cr0_write(cr0);
  
      x86_in_isr = false;
      setcontext(ctx);
  }
  
  void thread_yield_higher(void)
  {
      if (x86_in_isr) {
          isr_thread_yield();
      }
  
      unsigned old_intr = irq_disable();
  
      x86_in_isr = true;
      isr_context.uc_stack.ss_sp = isr_stack;
      isr_context.uc_stack.ss_size = sizeof isr_stack;
      makecontext(&isr_context, isr_thread_yield, 0);
      swapcontext((ucontext_t *) sched_active_thread->sp, &isr_context);
  
      irq_restore(old_intr);
  }
  
  void *thread_arch_isr_stack_pointer(void)
  {
      return isr_context.uc_stack.ss_sp;
  }
  
  void *thread_arch_isr_stack_start(void)
  {
      return isr_stack;
  }
  
  void isr_cpu_switch_context_exit(void)
  {
      DEBUG("XXX: cpu_switch_context_exit(), num_tasks = %d\n", sched_num_threads);
  
      if (sched_num_threads <= 2) {
          /* there is always "idle" and "x86-hwtimer" */
          DEBUG("cpu_switch_context_exit(): last task has ended. Shutting down.\n");
          x86_shutdown();
      }
  
      if ((sched_context_switch_request == 1) || (sched_active_thread == NULL)) {
          sched_run();
      }
  
      ucontext_t *ctx = (ucontext_t *)(sched_active_thread->sp);
      DEBUG("XXX: cpu_switch_context_exit(): calling setcontext(%s, %p)\n\n", sched_active_thread->name, ctx->uc_context.ip);
  
      x86_in_isr = false;
  
      setcontext(ctx);
  }
  
  void cpu_switch_context_exit(void)
  {
      irq_disable();
  
      if (!x86_in_isr) {
          x86_in_isr = true;
          isr_context.uc_stack.ss_sp = isr_stack;
          isr_context.uc_stack.ss_size = sizeof isr_stack;
          makecontext(&isr_context, isr_cpu_switch_context_exit, 0);
          setcontext(&isr_context);
      }
      else {
          isr_cpu_switch_context_exit();
      }
      __builtin_unreachable();
  }
  
  char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_start, int stacksize)
  {
      DEBUG("thread_stack_init()\n");
  
      unsigned int *stk = stack_start;
  
      ucontext_t *p = (ucontext_t *)(stk + ((stacksize - sizeof(ucontext_t)) / sizeof(void *)));
      stacksize -= sizeof(ucontext_t);
  
      getcontext(p);
      p->uc_stack.ss_sp = stk;
      p->uc_stack.ss_size = stacksize;
      p->uc_link = &end_context;
      p->uc_context.flags |= X86_IF;
      p->__fxsave = initial_fpu_state;
      makecontext(p, (makecontext_fun_t) task_func, 1, arg);
  
      return (char *) p;
  }
  
  static void fpu_used_interrupt(uint8_t intr_num, struct x86_pushad *orig_ctx, unsigned long error_code)
  {
      static volatile struct x86_fxsave fpu_data;
  
      (void) intr_num;
      (void) orig_ctx;
      (void) error_code;
  
      __asm__ volatile ("clts"); /* clear task switch flag */
  
      if (fpu_owner == sched_active_pid) {
          return;
      }
  
      if (fpu_owner != KERNEL_PID_UNDEF) {
          ucontext_t *ctx_owner = (ucontext_t *) sched_threads[fpu_owner]->sp;
          __asm__ volatile ("fxsave (%0)" :: "r"(&fpu_data));
          ctx_owner->__fxsave = fpu_data;
      }
  
      ucontext_t *ctx_active = (ucontext_t *) sched_active_thread->sp;
      fpu_data = ctx_active->__fxsave;
      __asm__ volatile ("fxrstor (%0)" :: "r"(&fpu_data));
  
      fpu_owner = sched_active_pid;
  }
  
  static void x86_thread_exit(void)
  {
      irq_disable();
      if (fpu_owner == sched_active_pid) {
          fpu_owner = KERNEL_PID_UNDEF;
      }
      sched_task_exit();
  }
  
  void x86_init_threading(void)
  {
      getcontext(&end_context);
      end_context.uc_stack.ss_sp = end_stack;
      end_context.uc_stack.ss_size = sizeof end_stack;
      makecontext(&end_context, x86_thread_exit, 0);
  
      x86_interrupt_handler_set(X86_INT_NM, fpu_used_interrupt);
      __asm__ volatile ("fxsave (%0)" :: "r"(&initial_fpu_state));
  
      DEBUG("Threading initialized\n");
  }