@@ -99,6 +99,14 @@ static struct rt_spinlock _mp_scheduler_lock;
9999 rt_hw_local_irq_enable(level); \
100100 } while (0)
101101
102+ #ifdef ARCH_USING_HW_THREAD_SELF
103+ #define CRITICAL_SWITCH_FLAG (pcpu , curthr ) (RT_SCHED_CTX(curthr).critical_switch_flag)
104+
105+ #else /* !ARCH_USING_HW_THREAD_SELF */
106+ #define CRITICAL_SWITCH_FLAG (pcpu , curthr ) ((pcpu)->critical_switch_flag)
107+
108+ #endif /* ARCH_USING_HW_THREAD_SELF */
109+
102110static rt_uint32_t rt_thread_ready_priority_group ;
103111#if RT_THREAD_PRIORITY_MAX > 32
104112/* Maximum priority level, 256 */
@@ -749,15 +757,15 @@ rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
749757 /* leaving critical region of global context since we can't schedule */
750758 SCHEDULER_CONTEXT_UNLOCK (pcpu );
751759
752- pcpu -> critical_switch_flag = 1 ;
760+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 1 ;
753761 error = - RT_ESCHEDLOCKED ;
754762
755763 SCHEDULER_EXIT_CRITICAL (current_thread );
756764 }
757765 else
758766 {
759767 /* flush critical switch flag since a scheduling is done */
760- pcpu -> critical_switch_flag = 0 ;
768+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
761769
762770 /* pick the highest runnable thread, and pass the control to it */
763771 to_thread = _prepare_context_switch_locked (cpu_id , pcpu , current_thread );
@@ -828,7 +836,7 @@ void rt_schedule(void)
828836 /* whether caller had locked the local scheduler already */
829837 if (RT_SCHED_CTX (current_thread ).critical_lock_nest > 1 )
830838 {
831- pcpu -> critical_switch_flag = 1 ;
839+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 1 ;
832840
833841 SCHEDULER_EXIT_CRITICAL (current_thread );
834842
@@ -837,7 +845,7 @@ void rt_schedule(void)
837845 else
838846 {
839847 /* flush critical switch flag since a scheduling is done */
840- pcpu -> critical_switch_flag = 0 ;
848+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
841849 pcpu -> irq_switch_flag = 0 ;
842850
843851 /**
@@ -912,13 +920,13 @@ void rt_scheduler_do_irq_switch(void *context)
912920 /* whether caller had locked the local scheduler already */
913921 if (RT_SCHED_CTX (current_thread ).critical_lock_nest > 1 )
914922 {
915- pcpu -> critical_switch_flag = 1 ;
923+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 1 ;
916924 SCHEDULER_EXIT_CRITICAL (current_thread );
917925 }
918926 else if (rt_atomic_load (& (pcpu -> irq_nest )) == 0 )
919927 {
920928 /* flush critical & irq switch flag since a scheduling is done */
921- pcpu -> critical_switch_flag = 0 ;
929+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
922930 pcpu -> irq_switch_flag = 0 ;
923931
924932 SCHEDULER_CONTEXT_LOCK (pcpu );
@@ -1056,6 +1064,9 @@ void rt_sched_post_ctx_switch(struct rt_thread *thread)
10561064 }
10571065 /* safe to access since irq is masked out */
10581066 pcpu -> current_thread = thread ;
1067+ #ifdef ARCH_USING_HW_THREAD_SELF
1068+ rt_hw_thread_set_self (thread );
1069+ #endif /* ARCH_USING_HW_THREAD_SELF */
10591070}
10601071
10611072#ifdef RT_DEBUGING_CRITICAL
@@ -1101,9 +1112,11 @@ RTM_EXPORT(rt_exit_critical_safe);
11011112 */
11021113rt_base_t rt_enter_critical (void )
11031114{
1104- rt_base_t level ;
11051115 rt_base_t critical_level ;
11061116 struct rt_thread * current_thread ;
1117+
1118+ #ifndef ARCH_USING_HW_THREAD_SELF
1119+ rt_base_t level ;
11071120 struct rt_cpu * pcpu ;
11081121
11091122 /* disable interrupt */
@@ -1125,6 +1138,20 @@ rt_base_t rt_enter_critical(void)
11251138 /* enable interrupt */
11261139 rt_hw_local_irq_enable (level );
11271140
1141+ #else /* !ARCH_USING_HW_THREAD_SELF */
1142+
1143+ current_thread = rt_hw_thread_self ();
1144+ if (!current_thread )
1145+ {
1146+ /* scheduler unavailable */
1147+ return - RT_EINVAL ;
1148+ }
1149+
1150+ /* critical for local cpu */
1151+ RT_SCHED_CTX (current_thread ).critical_lock_nest ++ ;
1152+ critical_level = RT_SCHED_CTX (current_thread ).critical_lock_nest ;
1153+
1154+ #endif /* ARCH_USING_HW_THREAD_SELF */
11281155 return critical_level ;
11291156}
11301157RTM_EXPORT (rt_enter_critical );
@@ -1134,9 +1161,11 @@ RTM_EXPORT(rt_enter_critical);
11341161 */
11351162void rt_exit_critical (void )
11361163{
1137- rt_base_t level ;
11381164 struct rt_thread * current_thread ;
11391165 rt_bool_t need_resched ;
1166+
1167+ #ifndef ARCH_USING_HW_THREAD_SELF
1168+ rt_base_t level ;
11401169 struct rt_cpu * pcpu ;
11411170
11421171 /* disable interrupt */
@@ -1157,8 +1186,8 @@ void rt_exit_critical(void)
11571186 if (RT_SCHED_CTX (current_thread ).critical_lock_nest == 0 )
11581187 {
11591188 /* is there any scheduling request unfinished? */
1160- need_resched = pcpu -> critical_switch_flag ;
1161- pcpu -> critical_switch_flag = 0 ;
1189+ need_resched = CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) ;
1190+ CRITICAL_SWITCH_FLAG ( pcpu , current_thread ) = 0 ;
11621191
11631192 /* enable interrupt */
11641193 rt_hw_local_irq_enable (level );
@@ -1174,6 +1203,35 @@ void rt_exit_critical(void)
11741203 /* enable interrupt */
11751204 rt_hw_local_irq_enable (level );
11761205 }
1206+
1207+ #else /* !ARCH_USING_HW_THREAD_SELF */
1208+
1209+ current_thread = rt_hw_thread_self ();
1210+ if (!current_thread )
1211+ {
1212+ return ;
1213+ }
1214+
1215+ /* the necessary memory barrier is done on irq_(dis|en)able */
1216+ RT_SCHED_CTX (current_thread ).critical_lock_nest -- ;
1217+
1218+ /* may need a rescheduling */
1219+ if (RT_SCHED_CTX (current_thread ).critical_lock_nest == 0 )
1220+ {
1221+ /* is there any scheduling request unfinished? */
1222+ need_resched = CRITICAL_SWITCH_FLAG (pcpu , current_thread );
1223+ CRITICAL_SWITCH_FLAG (pcpu , current_thread ) = 0 ;
1224+
1225+ if (need_resched )
1226+ rt_schedule ();
1227+ }
1228+ else
1229+ {
1230+ /* each exit_critical is strictly corresponding to an enter_critical */
1231+ RT_ASSERT (RT_SCHED_CTX (current_thread ).critical_lock_nest > 0 );
1232+ }
1233+
1234+ #endif /* ARCH_USING_HW_THREAD_SELF */
11771235}
11781236RTM_EXPORT (rt_exit_critical );
11791237
0 commit comments