diff --git a/firmware/export/thread.h b/firmware/export/thread.h
index 7fe128d..bd4f073 100644
--- a/firmware/export/thread.h
+++ b/firmware/export/thread.h
@@ -167,10 +167,10 @@ struct corelock
volatile unsigned char turn;
} __attribute__((packed));

-void corelock_init(struct corelock *cl);
-void corelock_lock(struct corelock *cl);
-int corelock_try_lock(struct corelock *cl);
-void corelock_unlock(struct corelock *cl);
+void corelock_init(struct corelock *cl) __attribute__((short_call)) ;
+void corelock_lock(struct corelock *cl) __attribute__((naked, short_call)) ;
+int corelock_try_lock(struct corelock *cl) __attribute__((naked, short_call)) ;
+void corelock_unlock(struct corelock *cl) __attribute__((naked, short_call)) ;
#elif CONFIG_CORELOCK == CORELOCK_SWAP
/* Use native atomic swap/exchange instruction */
struct corelock
diff --git a/firmware/target/arm/system-pp502x.c b/firmware/target/arm/system-pp502x.c
index b1cef71..3504fc6 100644
--- a/firmware/target/arm/system-pp502x.c
+++ b/firmware/target/arm/system-pp502x.c
@@ -233,6 +233,10 @@ void scale_suspend_core(bool suspend)
}

#ifdef HAVE_ADJUSTABLE_CPU_FREQ
+#if NUM_CORES > 1
+static void (*long_corelock_lock)(struct corelock*) = *corelock_lock;
+static void (*long_corelock_unlock)(struct corelock*) = *corelock_unlock;
+#endif
void set_cpu_frequency(long frequency) ICODE_ATTR;
void set_cpu_frequency(long frequency)
#else
@@ -240,7 +244,7 @@ static void pp_set_cpu_frequency(long frequency)
#endif
{
#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
- corelock_lock(&cpufreq_cl);
+ long_corelock_lock(&cpufreq_cl);
#endif

switch (frequency)
@@ -351,7 +355,7 @@ static void pp_set_cpu_frequency(long frequency)
}

#if defined(HAVE_ADJUSTABLE_CPU_FREQ) && (NUM_CORES > 1)
- corelock_unlock(&cpufreq_cl);
+ long_corelock_unlock(&cpufreq_cl);
#endif
}
#endif /* !BOOTLOADER || SANSA_E200 || SANSA_C200 */
diff --git a/firmware/thread.c b/firmware/thread.c
index f779ca3..34dfaba 100644
--- a/firmware/thread.c
+++ b/firmware/thread.c
@@ -265,7 +265,8 @@ static struct core_semaphores core_semaphores[NUM_CORES] IBSS_ATTR;
* Initialize the corelock structure.
*---------------------------------------------------------------------------
*/
-void corelock_init(struct corelock *cl)
+//void corelock_init(struct corelock *cl) __attribute__((short_call));
+void __attribute__((short_call)) corelock_init(struct corelock *cl)
{
memset(cl, 0, sizeof (*cl));
}
@@ -275,8 +276,8 @@ void corelock_init(struct corelock *cl)
* Wait for the corelock to become free and acquire it when it does.
*---------------------------------------------------------------------------
*/
-void corelock_lock(struct corelock *cl) __attribute__((naked));
-void corelock_lock(struct corelock *cl)
+//void corelock_lock(struct corelock *cl) __attribute__((naked, short_call));
+void __attribute__((naked, short_call)) corelock_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
@@ -302,8 +303,8 @@ void corelock_lock(struct corelock *cl)
* Try to aquire the corelock. If free, caller gets it, otherwise return 0.
*---------------------------------------------------------------------------
*/
-int corelock_try_lock(struct corelock *cl) __attribute__((naked));
-int corelock_try_lock(struct corelock *cl)
+//int corelock_try_lock(struct corelock *cl) __attribute__((naked, short_call));
+int __attribute__((naked, short_call)) corelock_try_lock(struct corelock *cl)
{
/* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
asm volatile (
@@ -331,8 +332,8 @@ int corelock_try_lock(struct corelock *cl)
* Release ownership of the corelock
*---------------------------------------------------------------------------
*/
-void corelock_unlock(struct corelock *cl) __attribute__((naked));
-void corelock_unlock(struct corelock *cl)
+//void corelock_unlock(struct corelock *cl) __attribute__((naked));
+void __attribute__((naked, short_call)) corelock_unlock(struct corelock *cl)
{
asm volatile (
"mov r1, %0 \n" /* r1 = PROCESSOR_ID */