cpu.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: 2014 Freie Universität Berlin (FUB) & INRIA
3  * SPDX-License-Identifier: LGPL-2.1-only
4  */
5 
6 #pragma once
7 
18 #include <stdint.h>
19 
20 #include <msp430.h>
21 
22 #include "sched.h"
23 #include "thread.h"
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
32 #define WORDSIZE 16
33 
37 #define PROVIDES_PM_SET_LOWEST
38 
42 #define ISR(a,b) void __attribute__((naked, interrupt (a))) b(void)
43 
47 extern volatile int __irq_is_in;
48 
52 static inline void __attribute__((always_inline)) __save_context(void)
53 {
54  __asm__("push r15");
55  __asm__("push r14");
56  __asm__("push r13");
57  __asm__("push r12");
58  __asm__("push r11");
59  __asm__("push r10");
60  __asm__("push r9");
61  __asm__("push r8");
62  __asm__("push r7");
63  __asm__("push r6");
64  __asm__("push r5");
65  __asm__("push r4");
66 
67  __asm__("mov.w r1,%0" : "=r"(thread_get_active()->sp));
68 }
69 
73 static inline void __attribute__((always_inline)) __restore_context(void)
74 {
75  __asm__("mov.w %0,r1" : : "m"(thread_get_active()->sp));
76 
77  __asm__("pop r4");
78  __asm__("pop r5");
79  __asm__("pop r6");
80  __asm__("pop r7");
81  __asm__("pop r8");
82  __asm__("pop r9");
83  __asm__("pop r10");
84  __asm__("pop r11");
85  __asm__("pop r12");
86  __asm__("pop r13");
87  __asm__("pop r14");
88  __asm__("pop r15");
89  __asm__("reti");
90 }
91 
95 static inline void __attribute__((always_inline)) __enter_isr(void)
96 {
97  /* modify state register pushed to stack to not got to power saving
98  * mode right again */
99  __asm__ volatile(
100  "bic %[mask], 0(SP)" "\n\t"
101  : /* no outputs */
102  : [mask] "i"(CPUOFF | SCG0 | SCG1 | OSCOFF)
103  : "memory"
104  );
105  extern char __stack; /* defined by linker script to end of RAM */
106  __save_context();
107  __asm__("mov.w %0,r1" : : "i"(&__stack));
108  __irq_is_in = 1;
109 }
110 
114 static inline void __attribute__((always_inline)) __exit_isr(void)
115 {
116  __irq_is_in = 0;
117 
119  sched_run();
120  }
121 
123 }
124 
128 __attribute__((always_inline))
129 static inline uintptr_t cpu_get_caller_pc(void)
130 {
131  return (uintptr_t)__builtin_return_address(0);
132 }
133 
134 #ifdef __cplusplus
135 }
136 #endif
137 
volatile unsigned int sched_context_switch_request
Flag indicating whether a context switch is necessary after handling an interrupt.
thread_t * sched_run(void)
Triggers the scheduler to schedule the next thread.
static thread_t * thread_get_active(void)
Returns a pointer to the Thread Control Block of the currently running thread.
Definition: thread.h:413
static uintptr_t cpu_get_caller_pc(void)
Returns the last instruction's address.
Definition: cpu.h:129
volatile int __irq_is_in
The current ISR state (inside or not)
static void __restore_context(void)
Restore the thread context from inside an ISR.
Definition: cpu.h:73
static void __enter_isr(void)
Run this code on entering interrupt routines.
Definition: cpu.h:95
static void __exit_isr(void)
Run this code on exiting interrupt routines.
Definition: cpu.h:114
static void __save_context(void)
Save the current thread context from inside an ISR.
Definition: cpu.h:52
Scheduler API definition.