Rev 2 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2 | mjames | 1 | /**************************************************************************//** |
2 | * @file cmsis_gcc.h |
||
3 | * @brief CMSIS compiler specific macros, functions, instructions |
||
4 | * @version V1.0.2 |
||
5 | * @date 09. April 2018 |
||
6 | ******************************************************************************/ |
||
7 | /* |
||
8 | * Copyright (c) 2009-2018 Arm Limited. All rights reserved. |
||
9 | * |
||
10 | * SPDX-License-Identifier: Apache-2.0 |
||
11 | * |
||
12 | * Licensed under the Apache License, Version 2.0 (the License); you may |
||
13 | * not use this file except in compliance with the License. |
||
14 | * You may obtain a copy of the License at |
||
15 | * |
||
16 | * www.apache.org/licenses/LICENSE-2.0 |
||
17 | * |
||
18 | * Unless required by applicable law or agreed to in writing, software |
||
19 | * distributed under the License is distributed on an AS IS BASIS, WITHOUT |
||
20 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||
21 | * See the License for the specific language governing permissions and |
||
22 | * limitations under the License. |
||
23 | */ |
||
24 | |||
25 | #ifndef __CMSIS_GCC_H |
||
26 | #define __CMSIS_GCC_H |
||
27 | |||
28 | /* ignore some GCC warnings */ |
||
29 | #pragma GCC diagnostic push |
||
30 | #pragma GCC diagnostic ignored "-Wsign-conversion" |
||
31 | #pragma GCC diagnostic ignored "-Wconversion" |
||
32 | #pragma GCC diagnostic ignored "-Wunused-parameter" |
||
33 | |||
34 | /* Fallback for __has_builtin */ |
||
35 | #ifndef __has_builtin |
||
36 | #define __has_builtin(x) (0) |
||
37 | #endif |
||
38 | |||
39 | /* CMSIS compiler specific defines */ |
||
40 | #ifndef __ASM |
||
41 | #define __ASM asm |
||
42 | #endif |
||
43 | #ifndef __INLINE |
||
44 | #define __INLINE inline |
||
45 | #endif |
||
46 | #ifndef __FORCEINLINE |
||
47 | #define __FORCEINLINE __attribute__((always_inline)) |
||
48 | #endif |
||
49 | #ifndef __STATIC_INLINE |
||
50 | #define __STATIC_INLINE static inline |
||
51 | #endif |
||
52 | #ifndef __STATIC_FORCEINLINE |
||
53 | #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline |
||
54 | #endif |
||
55 | #ifndef __NO_RETURN |
||
56 | #define __NO_RETURN __attribute__((__noreturn__)) |
||
57 | #endif |
||
58 | #ifndef CMSIS_DEPRECATED |
||
59 | #define CMSIS_DEPRECATED __attribute__((deprecated)) |
||
60 | #endif |
||
61 | #ifndef __USED |
||
62 | #define __USED __attribute__((used)) |
||
63 | #endif |
||
64 | #ifndef __WEAK |
||
65 | #define __WEAK __attribute__((weak)) |
||
66 | #endif |
||
67 | #ifndef __PACKED |
||
68 | #define __PACKED __attribute__((packed, aligned(1))) |
||
69 | #endif |
||
70 | #ifndef __PACKED_STRUCT |
||
71 | #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) |
||
72 | #endif |
||
73 | #ifndef __UNALIGNED_UINT16_WRITE |
||
74 | #pragma GCC diagnostic push |
||
75 | #pragma GCC diagnostic ignored "-Wpacked" |
||
76 | /*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ |
||
77 | __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; |
||
78 | #pragma GCC diagnostic pop |
||
79 | #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) |
||
80 | #endif |
||
81 | #ifndef __UNALIGNED_UINT16_READ |
||
82 | #pragma GCC diagnostic push |
||
83 | #pragma GCC diagnostic ignored "-Wpacked" |
||
84 | /*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ |
||
85 | __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; |
||
86 | #pragma GCC diagnostic pop |
||
87 | #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) |
||
88 | #endif |
||
89 | #ifndef __UNALIGNED_UINT32_WRITE |
||
90 | #pragma GCC diagnostic push |
||
91 | #pragma GCC diagnostic ignored "-Wpacked" |
||
92 | /*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ |
||
93 | __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; |
||
94 | #pragma GCC diagnostic pop |
||
95 | #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) |
||
96 | #endif |
||
97 | #ifndef __UNALIGNED_UINT32_READ |
||
98 | #pragma GCC diagnostic push |
||
99 | #pragma GCC diagnostic ignored "-Wpacked" |
||
100 | __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; |
||
101 | #pragma GCC diagnostic pop |
||
102 | #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) |
||
103 | #endif |
||
104 | #ifndef __ALIGNED |
||
105 | #define __ALIGNED(x) __attribute__((aligned(x))) |
||
106 | #endif |
||
107 | |||
108 | /* ########################## Core Instruction Access ######################### */ |
||
109 | /** |
||
110 | \brief No Operation |
||
111 | */ |
||
112 | #define __NOP() __ASM volatile ("nop") |
||
113 | |||
114 | /** |
||
115 | \brief Wait For Interrupt |
||
116 | */ |
||
117 | #define __WFI() __ASM volatile ("wfi") |
||
118 | |||
119 | /** |
||
120 | \brief Wait For Event |
||
121 | */ |
||
122 | #define __WFE() __ASM volatile ("wfe") |
||
123 | |||
124 | /** |
||
125 | \brief Send Event |
||
126 | */ |
||
127 | #define __SEV() __ASM volatile ("sev") |
||
128 | |||
129 | /** |
||
130 | \brief Instruction Synchronization Barrier |
||
131 | \details Instruction Synchronization Barrier flushes the pipeline in the processor, |
||
132 | so that all instructions following the ISB are fetched from cache or memory, |
||
133 | after the instruction has been completed. |
||
134 | */ |
||
135 | __STATIC_FORCEINLINE void __ISB(void) |
||
136 | { |
||
137 | __ASM volatile ("isb 0xF":::"memory"); |
||
138 | } |
||
139 | |||
140 | |||
141 | /** |
||
142 | \brief Data Synchronization Barrier |
||
143 | \details Acts as a special kind of Data Memory Barrier. |
||
144 | It completes when all explicit memory accesses before this instruction complete. |
||
145 | */ |
||
146 | __STATIC_FORCEINLINE void __DSB(void) |
||
147 | { |
||
148 | __ASM volatile ("dsb 0xF":::"memory"); |
||
149 | } |
||
150 | |||
151 | /** |
||
152 | \brief Data Memory Barrier |
||
153 | \details Ensures the apparent order of the explicit memory operations before |
||
154 | and after the instruction, without ensuring their completion. |
||
155 | */ |
||
156 | __STATIC_FORCEINLINE void __DMB(void) |
||
157 | { |
||
158 | __ASM volatile ("dmb 0xF":::"memory"); |
||
159 | } |
||
160 | |||
161 | /** |
||
162 | \brief Reverse byte order (32 bit) |
||
163 | \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. |
||
164 | \param [in] value Value to reverse |
||
165 | \return Reversed value |
||
166 | */ |
||
167 | __STATIC_FORCEINLINE uint32_t __REV(uint32_t value) |
||
168 | { |
||
169 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) |
||
170 | return __builtin_bswap32(value); |
||
171 | #else |
||
172 | uint32_t result; |
||
173 | |||
174 | __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); |
||
175 | return result; |
||
176 | #endif |
||
177 | } |
||
178 | |||
179 | /** |
||
180 | \brief Reverse byte order (16 bit) |
||
181 | \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. |
||
182 | \param [in] value Value to reverse |
||
183 | \return Reversed value |
||
184 | */ |
||
185 | #ifndef __NO_EMBEDDED_ASM |
||
186 | __attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value) |
||
187 | { |
||
188 | uint32_t result; |
||
189 | __ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value)); |
||
190 | return result; |
||
191 | } |
||
192 | #endif |
||
193 | |||
194 | /** |
||
195 | \brief Reverse byte order (16 bit) |
||
196 | \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. |
||
197 | \param [in] value Value to reverse |
||
198 | \return Reversed value |
||
199 | */ |
||
200 | __STATIC_FORCEINLINE int16_t __REVSH(int16_t value) |
||
201 | { |
||
202 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) |
||
203 | return (int16_t)__builtin_bswap16(value); |
||
204 | #else |
||
205 | int16_t result; |
||
206 | |||
207 | __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); |
||
208 | return result; |
||
209 | #endif |
||
210 | } |
||
211 | |||
212 | /** |
||
213 | \brief Rotate Right in unsigned value (32 bit) |
||
214 | \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. |
||
215 | \param [in] op1 Value to rotate |
||
216 | \param [in] op2 Number of Bits to rotate |
||
217 | \return Rotated value |
||
218 | */ |
||
219 | __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) |
||
220 | { |
||
221 | op2 %= 32U; |
||
222 | if (op2 == 0U) { |
||
223 | return op1; |
||
224 | } |
||
225 | return (op1 >> op2) | (op1 << (32U - op2)); |
||
226 | } |
||
227 | |||
228 | |||
229 | /** |
||
230 | \brief Breakpoint |
||
231 | \param [in] value is ignored by the processor. |
||
232 | If required, a debugger can use it to store additional information about the breakpoint. |
||
233 | */ |
||
234 | #define __BKPT(value) __ASM volatile ("bkpt "#value) |
||
235 | |||
236 | /** |
||
237 | \brief Reverse bit order of value |
||
238 | \details Reverses the bit order of the given value. |
||
239 | \param [in] value Value to reverse |
||
240 | \return Reversed value |
||
241 | */ |
||
242 | __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) |
||
243 | { |
||
244 | uint32_t result; |
||
245 | |||
246 | #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ |
||
247 | (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ |
||
248 | (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) |
||
249 | __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); |
||
250 | #else |
||
251 | int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ |
||
252 | |||
253 | result = value; /* r will be reversed bits of v; first get LSB of v */ |
||
254 | for (value >>= 1U; value; value >>= 1U) |
||
255 | { |
||
256 | result <<= 1U; |
||
257 | result |= value & 1U; |
||
258 | s--; |
||
259 | } |
||
260 | result <<= s; /* shift when v's highest bits are zero */ |
||
261 | #endif |
||
262 | return result; |
||
263 | } |
||
264 | |||
265 | /** |
||
266 | \brief Count leading zeros |
||
267 | \param [in] value Value to count the leading zeros |
||
268 | \return number of leading zeros in value |
||
269 | */ |
||
270 | #define __CLZ (uint8_t)__builtin_clz |
||
271 | |||
272 | /** |
||
273 | \brief LDR Exclusive (8 bit) |
||
274 | \details Executes a exclusive LDR instruction for 8 bit value. |
||
275 | \param [in] ptr Pointer to data |
||
276 | \return value of type uint8_t at (*ptr) |
||
277 | */ |
||
278 | __STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) |
||
279 | { |
||
280 | uint32_t result; |
||
281 | |||
282 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) |
||
283 | __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); |
||
284 | #else |
||
285 | /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not |
||
286 | accepted by assembler. So has to use following less efficient pattern. |
||
287 | */ |
||
288 | __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); |
||
289 | #endif |
||
290 | return ((uint8_t) result); /* Add explicit type cast here */ |
||
291 | } |
||
292 | |||
293 | |||
294 | /** |
||
295 | \brief LDR Exclusive (16 bit) |
||
296 | \details Executes a exclusive LDR instruction for 16 bit values. |
||
297 | \param [in] ptr Pointer to data |
||
298 | \return value of type uint16_t at (*ptr) |
||
299 | */ |
||
300 | __STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) |
||
301 | { |
||
302 | uint32_t result; |
||
303 | |||
304 | #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) |
||
305 | __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); |
||
306 | #else |
||
307 | /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not |
||
308 | accepted by assembler. So has to use following less efficient pattern. |
||
309 | */ |
||
310 | __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); |
||
311 | #endif |
||
312 | return ((uint16_t) result); /* Add explicit type cast here */ |
||
313 | } |
||
314 | |||
315 | |||
316 | /** |
||
317 | \brief LDR Exclusive (32 bit) |
||
318 | \details Executes a exclusive LDR instruction for 32 bit values. |
||
319 | \param [in] ptr Pointer to data |
||
320 | \return value of type uint32_t at (*ptr) |
||
321 | */ |
||
322 | __STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) |
||
323 | { |
||
324 | uint32_t result; |
||
325 | |||
326 | __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); |
||
327 | return(result); |
||
328 | } |
||
329 | |||
330 | |||
331 | /** |
||
332 | \brief STR Exclusive (8 bit) |
||
333 | \details Executes a exclusive STR instruction for 8 bit values. |
||
334 | \param [in] value Value to store |
||
335 | \param [in] ptr Pointer to location |
||
336 | \return 0 Function succeeded |
||
337 | \return 1 Function failed |
||
338 | */ |
||
339 | __STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) |
||
340 | { |
||
341 | uint32_t result; |
||
342 | |||
343 | __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); |
||
344 | return(result); |
||
345 | } |
||
346 | |||
347 | |||
348 | /** |
||
349 | \brief STR Exclusive (16 bit) |
||
350 | \details Executes a exclusive STR instruction for 16 bit values. |
||
351 | \param [in] value Value to store |
||
352 | \param [in] ptr Pointer to location |
||
353 | \return 0 Function succeeded |
||
354 | \return 1 Function failed |
||
355 | */ |
||
356 | __STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) |
||
357 | { |
||
358 | uint32_t result; |
||
359 | |||
360 | __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); |
||
361 | return(result); |
||
362 | } |
||
363 | |||
364 | |||
365 | /** |
||
366 | \brief STR Exclusive (32 bit) |
||
367 | \details Executes a exclusive STR instruction for 32 bit values. |
||
368 | \param [in] value Value to store |
||
369 | \param [in] ptr Pointer to location |
||
370 | \return 0 Function succeeded |
||
371 | \return 1 Function failed |
||
372 | */ |
||
373 | __STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) |
||
374 | { |
||
375 | uint32_t result; |
||
376 | |||
377 | __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); |
||
378 | return(result); |
||
379 | } |
||
380 | |||
381 | |||
382 | /** |
||
383 | \brief Remove the exclusive lock |
||
384 | \details Removes the exclusive lock which is created by LDREX. |
||
385 | */ |
||
386 | __STATIC_FORCEINLINE void __CLREX(void) |
||
387 | { |
||
388 | __ASM volatile ("clrex" ::: "memory"); |
||
389 | } |
||
390 | |||
391 | /** |
||
392 | \brief Signed Saturate |
||
393 | \details Saturates a signed value. |
||
394 | \param [in] value Value to be saturated |
||
395 | \param [in] sat Bit position to saturate to (1..32) |
||
396 | \return Saturated value |
||
397 | */ |
||
398 | #define __SSAT(ARG1,ARG2) \ |
||
399 | __extension__ \ |
||
400 | ({ \ |
||
401 | int32_t __RES, __ARG1 = (ARG1); \ |
||
402 | __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ |
||
403 | __RES; \ |
||
404 | }) |
||
405 | |||
406 | |||
407 | /** |
||
408 | \brief Unsigned Saturate |
||
409 | \details Saturates an unsigned value. |
||
410 | \param [in] value Value to be saturated |
||
411 | \param [in] sat Bit position to saturate to (0..31) |
||
412 | \return Saturated value |
||
413 | */ |
||
414 | #define __USAT(ARG1,ARG2) \ |
||
415 | __extension__ \ |
||
416 | ({ \ |
||
417 | uint32_t __RES, __ARG1 = (ARG1); \ |
||
418 | __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ |
||
419 | __RES; \ |
||
420 | }) |
||
421 | |||
422 | /* ########################### Core Function Access ########################### */ |
||
423 | |||
424 | /** |
||
425 | \brief Enable IRQ Interrupts |
||
426 | \details Enables IRQ interrupts by clearing the I-bit in the CPSR. |
||
427 | Can only be executed in Privileged modes. |
||
428 | */ |
||
429 | __STATIC_FORCEINLINE void __enable_irq(void) |
||
430 | { |
||
431 | __ASM volatile ("cpsie i" : : : "memory"); |
||
432 | } |
||
433 | |||
434 | /** |
||
435 | \brief Disable IRQ Interrupts |
||
436 | \details Disables IRQ interrupts by setting the I-bit in the CPSR. |
||
437 | Can only be executed in Privileged modes. |
||
438 | */ |
||
439 | __STATIC_FORCEINLINE void __disable_irq(void) |
||
440 | { |
||
441 | __ASM volatile ("cpsid i" : : : "memory"); |
||
442 | } |
||
443 | |||
444 | /** |
||
445 | \brief Get FPSCR |
||
446 | \details Returns the current value of the Floating Point Status/Control register. |
||
447 | \return Floating Point Status/Control register value |
||
448 | */ |
||
449 | __STATIC_FORCEINLINE uint32_t __get_FPSCR(void) |
||
450 | { |
||
451 | #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ |
||
452 | (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) |
||
453 | #if __has_builtin(__builtin_arm_get_fpscr) |
||
454 | // Re-enable using built-in when GCC has been fixed |
||
455 | // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) |
||
456 | /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ |
||
457 | return __builtin_arm_get_fpscr(); |
||
458 | #else |
||
459 | uint32_t result; |
||
460 | |||
461 | __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); |
||
462 | return(result); |
||
463 | #endif |
||
464 | #else |
||
465 | return(0U); |
||
466 | #endif |
||
467 | } |
||
468 | |||
469 | /** |
||
470 | \brief Set FPSCR |
||
471 | \details Assigns the given value to the Floating Point Status/Control register. |
||
472 | \param [in] fpscr Floating Point Status/Control value to set |
||
473 | */ |
||
474 | __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) |
||
475 | { |
||
476 | #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ |
||
477 | (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) |
||
478 | #if __has_builtin(__builtin_arm_set_fpscr) |
||
479 | // Re-enable using built-in when GCC has been fixed |
||
480 | // || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) |
||
481 | /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ |
||
482 | __builtin_arm_set_fpscr(fpscr); |
||
483 | #else |
||
484 | __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); |
||
485 | #endif |
||
486 | #else |
||
487 | (void)fpscr; |
||
488 | #endif |
||
489 | } |
||
490 | |||
491 | /** \brief Get CPSR Register |
||
492 | \return CPSR Register value |
||
493 | */ |
||
494 | __STATIC_FORCEINLINE uint32_t __get_CPSR(void) |
||
495 | { |
||
496 | uint32_t result; |
||
497 | __ASM volatile("MRS %0, cpsr" : "=r" (result) ); |
||
498 | return(result); |
||
499 | } |
||
500 | |||
501 | /** \brief Set CPSR Register |
||
502 | \param [in] cpsr CPSR value to set |
||
503 | */ |
||
504 | __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) |
||
505 | { |
||
506 | __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory"); |
||
507 | } |
||
508 | |||
509 | /** \brief Get Mode |
||
510 | \return Processor Mode |
||
511 | */ |
||
512 | __STATIC_FORCEINLINE uint32_t __get_mode(void) |
||
513 | { |
||
514 | return (__get_CPSR() & 0x1FU); |
||
515 | } |
||
516 | |||
517 | /** \brief Set Mode |
||
518 | \param [in] mode Mode value to set |
||
519 | */ |
||
520 | __STATIC_FORCEINLINE void __set_mode(uint32_t mode) |
||
521 | { |
||
522 | __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); |
||
523 | } |
||
524 | |||
525 | /** \brief Get Stack Pointer |
||
526 | \return Stack Pointer value |
||
527 | */ |
||
528 | __STATIC_FORCEINLINE uint32_t __get_SP(void) |
||
529 | { |
||
530 | uint32_t result; |
||
531 | __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); |
||
532 | return result; |
||
533 | } |
||
534 | |||
535 | /** \brief Set Stack Pointer |
||
536 | \param [in] stack Stack Pointer value to set |
||
537 | */ |
||
538 | __STATIC_FORCEINLINE void __set_SP(uint32_t stack) |
||
539 | { |
||
540 | __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); |
||
541 | } |
||
542 | |||
543 | /** \brief Get USR/SYS Stack Pointer |
||
544 | \return USR/SYS Stack Pointer value |
||
545 | */ |
||
546 | __STATIC_FORCEINLINE uint32_t __get_SP_usr(void) |
||
547 | { |
||
548 | uint32_t cpsr = __get_CPSR(); |
||
549 | uint32_t result; |
||
550 | __ASM volatile( |
||
551 | "CPS #0x1F \n" |
||
552 | "MOV %0, sp " : "=r"(result) : : "memory" |
||
553 | ); |
||
554 | __set_CPSR(cpsr); |
||
555 | __ISB(); |
||
556 | return result; |
||
557 | } |
||
558 | |||
559 | /** \brief Set USR/SYS Stack Pointer |
||
560 | \param [in] topOfProcStack USR/SYS Stack Pointer value to set |
||
561 | */ |
||
562 | __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) |
||
563 | { |
||
564 | uint32_t cpsr = __get_CPSR(); |
||
565 | __ASM volatile( |
||
566 | "CPS #0x1F \n" |
||
567 | "MOV sp, %0 " : : "r" (topOfProcStack) : "memory" |
||
568 | ); |
||
569 | __set_CPSR(cpsr); |
||
570 | __ISB(); |
||
571 | } |
||
572 | |||
573 | /** \brief Get FPEXC |
||
574 | \return Floating Point Exception Control register value |
||
575 | */ |
||
576 | __STATIC_FORCEINLINE uint32_t __get_FPEXC(void) |
||
577 | { |
||
578 | #if (__FPU_PRESENT == 1) |
||
579 | uint32_t result; |
||
580 | __ASM volatile("VMRS %0, fpexc" : "=r" (result) ); |
||
581 | return(result); |
||
582 | #else |
||
583 | return(0); |
||
584 | #endif |
||
585 | } |
||
586 | |||
587 | /** \brief Set FPEXC |
||
588 | \param [in] fpexc Floating Point Exception Control value to set |
||
589 | */ |
||
590 | __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) |
||
591 | { |
||
592 | #if (__FPU_PRESENT == 1) |
||
593 | __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); |
||
594 | #endif |
||
595 | } |
||
596 | |||
597 | /* |
||
598 | * Include common core functions to access Coprocessor 15 registers |
||
599 | */ |
||
600 | |||
601 | #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) |
||
602 | #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) |
||
603 | #define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) |
||
604 | #define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) |
||
605 | |||
606 | #include "cmsis_cp15.h" |
||
607 | |||
608 | /** \brief Enable Floating Point Unit |
||
609 | |||
610 | Critical section, called from undef handler, so systick is disabled |
||
611 | */ |
||
612 | __STATIC_INLINE void __FPU_Enable(void) |
||
613 | { |
||
614 | __ASM volatile( |
||
615 | //Permit access to VFP/NEON, registers by modifying CPACR |
||
616 | " MRC p15,0,R1,c1,c0,2 \n" |
||
617 | " ORR R1,R1,#0x00F00000 \n" |
||
618 | " MCR p15,0,R1,c1,c0,2 \n" |
||
619 | |||
620 | //Ensure that subsequent instructions occur in the context of VFP/NEON access permitted |
||
621 | " ISB \n" |
||
622 | |||
623 | //Enable VFP/NEON |
||
624 | " VMRS R1,FPEXC \n" |
||
625 | " ORR R1,R1,#0x40000000 \n" |
||
626 | " VMSR FPEXC,R1 \n" |
||
627 | |||
628 | //Initialise VFP/NEON registers to 0 |
||
629 | " MOV R2,#0 \n" |
||
630 | |||
631 | //Initialise D16 registers to 0 |
||
632 | " VMOV D0, R2,R2 \n" |
||
633 | " VMOV D1, R2,R2 \n" |
||
634 | " VMOV D2, R2,R2 \n" |
||
635 | " VMOV D3, R2,R2 \n" |
||
636 | " VMOV D4, R2,R2 \n" |
||
637 | " VMOV D5, R2,R2 \n" |
||
638 | " VMOV D6, R2,R2 \n" |
||
639 | " VMOV D7, R2,R2 \n" |
||
640 | " VMOV D8, R2,R2 \n" |
||
641 | " VMOV D9, R2,R2 \n" |
||
642 | " VMOV D10,R2,R2 \n" |
||
643 | " VMOV D11,R2,R2 \n" |
||
644 | " VMOV D12,R2,R2 \n" |
||
645 | " VMOV D13,R2,R2 \n" |
||
646 | " VMOV D14,R2,R2 \n" |
||
647 | " VMOV D15,R2,R2 \n" |
||
648 | |||
649 | #if (defined(__ARM_NEON) && (__ARM_NEON == 1)) |
||
650 | //Initialise D32 registers to 0 |
||
651 | " VMOV D16,R2,R2 \n" |
||
652 | " VMOV D17,R2,R2 \n" |
||
653 | " VMOV D18,R2,R2 \n" |
||
654 | " VMOV D19,R2,R2 \n" |
||
655 | " VMOV D20,R2,R2 \n" |
||
656 | " VMOV D21,R2,R2 \n" |
||
657 | " VMOV D22,R2,R2 \n" |
||
658 | " VMOV D23,R2,R2 \n" |
||
659 | " VMOV D24,R2,R2 \n" |
||
660 | " VMOV D25,R2,R2 \n" |
||
661 | " VMOV D26,R2,R2 \n" |
||
662 | " VMOV D27,R2,R2 \n" |
||
663 | " VMOV D28,R2,R2 \n" |
||
664 | " VMOV D29,R2,R2 \n" |
||
665 | " VMOV D30,R2,R2 \n" |
||
666 | " VMOV D31,R2,R2 \n" |
||
667 | #endif |
||
668 | |||
669 | //Initialise FPSCR to a known state |
||
670 | " VMRS R2,FPSCR \n" |
||
671 | " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. |
||
672 | " AND R2,R2,R3 \n" |
||
673 | " VMSR FPSCR,R2 " |
||
674 | ); |
||
675 | } |
||
676 | |||
677 | #pragma GCC diagnostic pop |
||
678 | |||
679 | #endif /* __CMSIS_GCC_H */ |