1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
|
From 9dd7ae82d3f3fa9dae31a442365e233a0b44cce3 Mon Sep 17 00:00:00 2001
From: Paul Kocialkowski <contact@paulk.fr>
Date: Sat, 23 Jul 2016 14:17:32 +0200
Subject: [PATCH 6/6] cortex-m0: Use assembly exception handlers for task
switching
The way Cortex processors handle exceptions allows writing exception
routines directly in C, as return from exception is handled by providing
a special value for the link register.
However, it is not safe to do this when doing context switching. In
particular, C handlers may push some general-purpose registers that
are used by the handler and pop them later, even when context switch
has happened in the meantime. While the processor will restore {r0-r3}
from the stack when returning from an exception, the C handler code
may push, use and pop another register, such as r4.
It turns out that GCC 4.8 would generally only use r3 in svc_handler and
pendsv_handler, but newer versions tend to use r4, thus clobbering r4
that was restored from the context switch and leading up to a fault
when r4 is used by the task code.
An occurrence of this behaviour takes place with GCC > 4.8 in __wait_evt,
where "me" is stored in r4, which gets clobbered after an exception
triggers pendsv_handler. The exception handler uses r4 internally, does
a context switch and then restores the previous value of r4, which is
not restored by the processor's internal, thus clobbering r4.
This ends up with the following assertion failure:
'tskid < TASK_ID_COUNT' in timer_cancel() at common/timer.c:137
For this reason, it is safer to have assembly routines for exception
handlers that do context switching.
BUG=chromium:631514
BRANCH=None
TEST=Build and run speedy EC with a recent GCC version
Change-Id: Ib068bc12ce2204aee3e0f563efcb94f15aa87013
Signed-off-by: Paul Kocialkowski <contact@paulk.fr>
---
core/cortex-m0/switch.S | 81 ++++++++++++++++++++++++++++++++++---------------
core/cortex-m0/task.c | 27 +----------------
2 files changed, 58 insertions(+), 50 deletions(-)
diff --git a/core/cortex-m0/switch.S b/core/cortex-m0/switch.S
index 95ea29e..d4b47cd 100644
--- a/core/cortex-m0/switch.S
+++ b/core/cortex-m0/switch.S
@@ -7,12 +7,52 @@
#include "config.h"
+#define CPU_SCB_ICSR 0xe000ed04
+
.text
.syntax unified
.code 16
/**
+ * Start the task scheduling. r0 is a pointer to task_stack_ready, which is
+ * set to 1 after the task stack is set up.
+ */
+.global __task_start
+.thumb_func
+__task_start:
+ ldr r2,=scratchpad @ area used as dummy thread stack for the first switch
+ movs r3, #2 @ use : priv. mode / thread stack / no floating point
+ adds r2, #17*4 @ put the pointer at the top of the stack
+ movs r1, #0 @ __Schedule parameter : re-schedule nothing
+ msr psp, r2 @ setup a thread stack up to the first context switch
+ movs r2, #1
+ isb @ ensure the write is done
+ msr control, r3
+ movs r3, r0
+ movs r0, #0 @ __Schedule parameter : de-schedule nothing
+ isb @ ensure the write is done
+ str r2, [r3] @ Task scheduling is now active
+ bl __schedule @ execute the task with the highest priority
+ /* we should never return here */
+ movs r0, #1 @ set to EC_ERROR_UNKNOWN
+ bx lr
+
+/**
+ * SVC exception handler
+ */
+.global svc_handler
+.thumb_func
+svc_handler:
+ push {lr} @ save link register
+ bl __svc_handler @ call svc handler helper
+ ldr r3,=current_task @ load the current task's address
+ ldr r1, [r3] @ load the current task
+ cmp r0, r1 @ compare with previous task returned by helper
+ beq svc_handler_return @ return if they are the same
+ /* continue to __switchto to switch to the new task */
+
+/**
* Task context switching
*
* Change the task scheduled after returning from the exception.
@@ -30,8 +70,6 @@
* r8, r9, r10, r11, r4, r5, r6, r7, r0, r1, r2, r3, r12, lr, pc, psr
* additional registers <|> exception frame
*/
-.global __switchto
-.thumb_func
__switchto:
mrs r2, psp @ get the task stack where the context has been saved
mov r3, sp
@@ -53,29 +91,24 @@ __switchto:
mov r11, r7
ldmia r2!, {r4-r7} @ restore r4-r7 for the next task context
msr psp, r2 @ set the process stack pointer to exception context
- bx lr @ return from exception
+
+svc_handler_return:
+ pop {pc} @ return from exception or return to caller
/**
- * Start the task scheduling. r0 is a pointer to task_stack_ready, which is
- * set to 1 after the task stack is set up.
+ * PendSVC exception handler
*/
-.global __task_start
+.global pendsv_handler
.thumb_func
-__task_start:
- ldr r2,=scratchpad @ area used as dummy thread stack for the first switch
- movs r3, #2 @ use : priv. mode / thread stack / no floating point
- adds r2, #17*4 @ put the pointer at the top of the stack
- movs r1, #0 @ __Schedule parameter : re-schedule nothing
- msr psp, r2 @ setup a thread stack up to the first context switch
- movs r2, #1
- isb @ ensure the write is done
- msr control, r3
- movs r3, r0
- movs r0, #0 @ __Schedule parameter : de-schedule nothing
- isb @ ensure the write is done
- str r2, [r3] @ Task scheduling is now active
- bl __schedule @ execute the task with the highest priority
- /* we should never return here */
- movs r0, #1 @ set to EC_ERROR_UNKNOWN
- bx lr
-
+pendsv_handler:
+ push {lr} @ save link register
+ ldr r0, =#CPU_SCB_ICSR @ load CPU_SCB_ICSR's address
+ movs r1, #1 @ prepare left shift (1 << 27)
+ lsls r1, #27 @ shift the bit
+ str r1, [r0] @ clear pending flag
+ cpsid i @ ensure we have priority 0 during re-scheduling
+ movs r1, #0 @ desched nothing
+ movs r0, #0 @ resched nothing
+ bl svc_handler @ re-schedule the highest priority task
+ cpsie i @ leave priority 0
+ pop {pc} @ return from exception
diff --git a/core/cortex-m0/task.c b/core/cortex-m0/task.c
index e51621b..f96ccf8 100644
--- a/core/cortex-m0/task.c
+++ b/core/cortex-m0/task.c
@@ -57,7 +57,6 @@ static uint32_t task_switches; /* Number of times active task changed */
static uint32_t irq_dist[CONFIG_IRQ_COUNT]; /* Distribution of IRQ calls */
#endif
-extern void __switchto(task_ *from, task_ *to);
extern int __task_start(int *task_stack_ready);
#ifndef CONFIG_LOW_POWER_IDLE
@@ -120,7 +119,7 @@ uint8_t task_stacks[0
/* Reserve space to discard context on first context switch. */
uint32_t scratchpad[17];
-static task_ *current_task = (task_ *)scratchpad;
+task_ *current_task = (task_ *)scratchpad;
/*
* Bitmap of all tasks ready to be run.
@@ -242,18 +241,6 @@ task_ *__svc_handler(int desched, task_id_t resched)
return current;
}
-void svc_handler(int desched, task_id_t resched)
-{
- /*
- * The layout of the this routine (and the __svc_handler companion one)
- * ensures that we are getting the right tail call optimization from
- * the compiler.
- */
- task_ *prev = __svc_handler(desched, resched);
- if (current_task != prev)
- __switchto(prev, current_task);
-}
-
void __schedule(int desched, int resched)
{
register int p0 asm("r0") = desched;
@@ -262,18 +249,6 @@ void __schedule(int desched, int resched)
asm("svc 0" : : "r"(p0), "r"(p1));
}
-void pendsv_handler(void)
-{
- /* Clear pending flag */
- CPU_SCB_ICSR = (1 << 27);
-
- /* ensure we have priority 0 during re-scheduling */
- __asm__ __volatile__("cpsid i");
- /* re-schedule the highest priority task */
- svc_handler(0, 0);
- __asm__ __volatile__("cpsie i");
-}
-
#ifdef CONFIG_TASK_PROFILING
void task_start_irq_handler(void *excep_return)
{
--
2.9.0
|