1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
diff --git a/firmware/export/config.h b/firmware/export/config.h
index e64ca41..9a5aa73 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -793,7 +793,6 @@ Lyre prototype 1 */
 #define CORELOCK_NONE   0
 #define SW_CORELOCK     1 /* Mutual exclusion provided by a software algorithm
                              and not a special semaphore instruction */
-#define CORELOCK_SWAP   2 /* A swap (exchange) instruction */

 #if defined(CPU_PP)
 #define IDLE_STACK_SIZE  0x80
@@ -818,11 +817,7 @@ Lyre prototype 1 */
 #define IF_COP_VOID(...)    __VA_ARGS__
 #define IF_COP_CORE(core)   core

-#ifdef CPU_PP
 #define CONFIG_CORELOCK SW_CORELOCK /* SWP(B) is broken */
-#else
-#define CONFIG_CORELOCK CORELOCK_SWAP
-#endif

 #endif /* !defined(BOOTLOADER) && CONFIG_CPU != PP5002 */

diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 9678c04..f27557b 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -175,21 +175,6 @@ void corelock_init(struct corelock *cl);
 void corelock_lock(struct corelock *cl);
 int corelock_try_lock(struct corelock *cl);
 void corelock_unlock(struct corelock *cl);
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
-/* Use native atomic swap/exchange instruction */
-struct corelock
-{
-    volatile unsigned char locked;
-} __attribute__((packed));
-
-#define corelock_init(cl) \
-    ({ (cl)->locked = 0; })
-#define corelock_lock(cl) \
-    ({ while (test_and_set(&(cl)->locked, 1)); })
-#define corelock_try_lock(cl) \
-    ({ test_and_set(&(cl)->locked, 1) ? 0 : 1; })
-#define corelock_unlock(cl) \
-    ({ (cl)->locked = 0; })
 #else
 /* No atomic corelock op needed or just none defined */
 #define corelock_init(cl)
@@ -384,35 +369,6 @@ struct core_entry
     *(a) = (v);          \
     corelock_unlock(cl); \
     o; })
-#elif CONFIG_CORELOCK == CORELOCK_SWAP
-/* atomic */
-#define test_and_set(a, v, ...) \
-    xchg8((a), (v))
-#define xchg8(a, v, ...) \
-({  uint32_t o;                \
-    asm volatile(              \
-        "swpb %0, %1, [%2]"    \
-        : "=&r"(o)             \
-        : "r"(v),              \
-          "r"((uint8_t*)(a))); \
-    o; })
-/* atomic */
-#define xchg32(a, v, ...) \
-({  uint32_t o;                 \
-    asm volatile(               \
-        "swp %0, %1, [%2]"      \
-        : "=&r"(o)              \
-        : "r"((uint32_t)(v)),   \
-          "r"((uint32_t*)(a))); \
-    o; })
-/* atomic */
-#define xchgptr(a, v, ...) \
-({  typeof (*(a)) o;        \
-    asm volatile(           \
-        "swp %0, %1, [%2]"  \
-        : "=&r"(o)          \
-        : "r"(v), "r"(a));  \
-    o; })
 #endif /* locking selection */
 #elif defined (CPU_COLDFIRE)
 /* atomic */