@@ -105,9 +105,32 @@ static void emu_update_timer_interrupt(hart_t *hart)
105105{
106106 emu_state_t * data = PRIV (hart );
107107
108- /* Sync global timer with local timer */
108+ /* Lazy timer checking. Only check timer interrupts when the current time
109+ * has reached the earliest scheduled interrupt time. This avoids expensive
110+ * semu_timer_get() calls and interrupt checks.
111+ *
112+ * Fast path: Skip if current time < next interrupt time
113+ * Slow path: Check all harts' timers and recalculate next interrupt time
114+ */
115+ uint64_t current_time = semu_timer_get (& data -> mtimer .mtime );
116+ if (current_time < data -> mtimer .next_interrupt_at ) {
117+ /* Fast path: No timer interrupt can fire yet, skip checking.
118+ * Still sync the timer for correctness (hart->time is used by CSR reads).
119+ */
120+ hart -> time = data -> mtimer .mtime ;
121+ return ;
122+ }
123+
124+ /* Slow path: At least one timer might fire, check this hart */
109125 hart -> time = data -> mtimer .mtime ;
110126 aclint_mtimer_update_interrupts (hart , & data -> mtimer );
127+
128+ /* Recalculate next interrupt time after potential interrupt delivery.
129+ * The kernel likely updated mtimecmp in the interrupt handler, which
130+ * already called recalc, but we call it again to be safe in case
131+ * multiple harts share the same mtimecmp value.
132+ */
133+ aclint_mtimer_recalc_next_interrupt (& data -> mtimer );
111134}
112135
113136static void emu_update_swi_interrupt (hart_t * hart )
@@ -342,6 +365,8 @@ static inline sbi_ret_t handle_sbi_ecall_TIMER(hart_t *hart, int32_t fid)
342365 (((uint64_t ) hart -> x_regs [RV_R_A1 ]) << 32 ) |
343366 (uint64_t ) (hart -> x_regs [RV_R_A0 ]);
344367 hart -> sip &= ~RV_INT_STI_BIT ;
368+ /* Recalculate next interrupt time for lazy timer checking */
369+ aclint_mtimer_recalc_next_interrupt (& data -> mtimer );
345370 return (sbi_ret_t ){SBI_SUCCESS , 0 };
346371 default :
347372 return (sbi_ret_t ){SBI_ERR_NOT_SUPPORTED , 0 };
@@ -766,6 +791,11 @@ static int semu_init(emu_state_t *emu, int argc, char **argv)
766791 /* Set up ACLINT */
767792 semu_timer_init (& emu -> mtimer .mtime , CLOCK_FREQ , hart_count );
768793 emu -> mtimer .mtimecmp = calloc (vm -> n_hart , sizeof (uint64_t ));
794+ emu -> mtimer .n_harts = vm -> n_hart ;
795+ /* mtimecmp is initialized to 0 by calloc, so next_interrupt_at starts at 0.
796+ * It will be updated when the kernel writes mtimecmp via SBI or MMIO.
797+ */
798+ emu -> mtimer .next_interrupt_at = 0 ;
769799 emu -> mswi .msip = calloc (vm -> n_hart , sizeof (uint32_t ));
770800 emu -> sswi .ssip = calloc (vm -> n_hart , sizeof (uint32_t ));
771801#if SEMU_HAS (VIRTIOSND )
@@ -953,6 +983,34 @@ static void print_mmu_cache_stats(vm_t *vm)
953983}
954984#endif
955985
986+ /* Calculate nanoseconds until next timer interrupt.
987+ * Returns 0 if interrupt is already due, or capped at 100ms maximum.
988+ */
989+ static uint64_t calc_ns_until_next_interrupt (emu_state_t * emu )
990+ {
991+ uint64_t current_time = semu_timer_get (& emu -> mtimer .mtime );
992+ uint64_t next_int = emu -> mtimer .next_interrupt_at ;
993+
994+ /* If interrupt is already due or very close, return immediately */
995+ if (current_time >= next_int )
996+ return 0 ;
997+
998+ /* Calculate ticks until interrupt */
999+ uint64_t ticks_remaining = next_int - current_time ;
1000+
1001+ /* Convert RISC-V timer ticks to nanoseconds:
1002+ * ns = ticks * (1e9 / CLOCK_FREQ)
1003+ */
1004+ uint64_t ns = (ticks_remaining * 1000000000ULL ) / emu -> mtimer .mtime .freq ;
1005+
1006+ /* Cap at 100ms to maintain responsiveness for UART and other events */
1007+ const uint64_t MAX_WAIT_NS = 100000000ULL ; /* 100ms */
1008+ if (ns > MAX_WAIT_NS )
1009+ ns = MAX_WAIT_NS ;
1010+
1011+ return ns ;
1012+ }
1013+
9561014static int semu_run (emu_state_t * emu )
9571015{
9581016 int ret ;
@@ -974,36 +1032,20 @@ static int semu_run(emu_state_t *emu)
9741032 return -1 ;
9751033 }
9761034
977- /* Add 1ms periodic timer */
978- struct kevent kev_timer ;
979- EV_SET (& kev_timer , 1 , EVFILT_TIMER , EV_ADD | EV_ENABLE , 0 , 1 , NULL );
980- if (kevent (kq , & kev_timer , 1 , NULL , 0 , NULL ) < 0 ) {
981- perror ("kevent timer setup" );
982- close (kq );
983- return -1 ;
984- }
985-
986- /* Note: UART input is polled via u8250_check_ready(), no need to
987- * monitor with kqueue. Timer events are sufficient to wake from WFI.
1035+ /* Note: Timer is configured dynamically in the event loop based on
1036+ * next_interrupt_at. UART input is polled via u8250_check_ready().
9881037 */
9891038#else
990- /* Linux: create timerfd for periodic wakeup */
1039+ /* Linux: create timerfd for dynamic timer wakeup */
9911040 int wfi_timer_fd = timerfd_create (CLOCK_MONOTONIC , TFD_NONBLOCK );
9921041 if (wfi_timer_fd < 0 ) {
9931042 perror ("timerfd_create" );
9941043 return -1 ;
9951044 }
9961045
997- /* Configure 1ms periodic timer */
998- struct itimerspec its = {
999- .it_interval = {.tv_sec = 0 , .tv_nsec = 1000000 },
1000- .it_value = {.tv_sec = 0 , .tv_nsec = 1000000 },
1001- };
1002- if (timerfd_settime (wfi_timer_fd , 0 , & its , NULL ) < 0 ) {
1003- perror ("timerfd_settime" );
1004- close (wfi_timer_fd );
1005- return -1 ;
1006- }
1046+ /* Timer is configured dynamically in the event loop based on
1047+ * next_interrupt_at to minimize unnecessary wakeups.
1048+ */
10071049#endif
10081050
10091051 while (!emu -> stopped ) {
@@ -1025,30 +1067,55 @@ static int semu_run(emu_state_t *emu)
10251067 }
10261068 if (all_waiting ) {
10271069 /* All harts waiting for interrupt - use event-driven wait
1028- * to reduce CPU usage while maintaining responsiveness
1070+ * to reduce CPU usage while maintaining responsiveness.
1071+ * Dynamically adjust timer based on next_interrupt_at.
10291072 */
1073+
1074+ /* Calculate how long to wait until next timer interrupt */
1075+ uint64_t wait_ns = calc_ns_until_next_interrupt (emu );
1076+
1077+ /* If interrupt is already due, don't wait - continue immediately */
1078+ if (wait_ns > 0 ) {
10301079#ifdef __APPLE__
1031- /* macOS: wait for kqueue events (timer or UART) */
1032- struct kevent events [2 ];
1033- int nevents = kevent (kq , NULL , 0 , events , 2 , NULL );
1034- /* Events are automatically handled - timer fires every 1ms,
1035- * UART triggers on input. No need to explicitly consume. */
1036- (void ) nevents ;
1080+ /* macOS: configure one-shot kqueue timer with dynamic timeout */
1081+ struct kevent kev_timer ;
1082+ /* NOTE_USECONDS for microseconds, wait_ns/1000 converts ns to us */
1083+ EV_SET (& kev_timer , 1 , EVFILT_TIMER , EV_ADD | EV_ENABLE | EV_ONESHOT ,
1084+ NOTE_USECONDS , wait_ns / 1000 , NULL );
1085+
1086+ struct kevent events [2 ];
1087+ int nevents = kevent (kq , & kev_timer , 1 , events , 2 , NULL );
1088+ /* Events are automatically handled. Wakeup occurs on:
1089+ * - Timer expiration (wait_ns elapsed)
1090+ * - UART input (if monitored)
1091+ */
1092+ (void ) nevents ;
10371093#else
1038- /* Linux: poll on timerfd and UART */
1039- struct pollfd pfds [2 ];
1040- pfds [0 ] = (struct pollfd ){wfi_timer_fd , POLLIN , 0 };
1041- pfds [1 ] = (struct pollfd ){emu -> uart .in_fd , POLLIN , 0 };
1042- poll (pfds , 2 , -1 );
1043-
1044- /* Consume timerfd event to prevent accumulation */
1045- if (pfds [0 ].revents & POLLIN ) {
1046- uint64_t expirations ;
1047- ssize_t ret =
1048- read (wfi_timer_fd , & expirations , sizeof (expirations ));
1049- (void ) ret ; /* Ignore read errors - timer will retry */
1050- }
1094+ /* Linux: configure timerfd with dynamic one-shot timeout */
1095+ struct itimerspec its = {
1096+ .it_interval = {0 , 0 }, /* One-shot, no repeat */
1097+ .it_value = {wait_ns / 1000000000 , wait_ns % 1000000000 },
1098+ };
1099+ if (timerfd_settime (wfi_timer_fd , 0 , & its , NULL ) < 0 ) {
1100+ perror ("timerfd_settime" );
1101+ /* Continue anyway - will retry next iteration */
1102+ }
1103+
1104+ /* Poll on timerfd and UART */
1105+ struct pollfd pfds [2 ];
1106+ pfds [0 ] = (struct pollfd ){wfi_timer_fd , POLLIN , 0 };
1107+ pfds [1 ] = (struct pollfd ){emu -> uart .in_fd , POLLIN , 0 };
1108+ poll (pfds , 2 , -1 );
1109+
1110+ /* Consume timerfd event to prevent accumulation */
1111+ if (pfds [0 ].revents & POLLIN ) {
1112+ uint64_t expirations ;
1113+ ssize_t ret =
1114+ read (wfi_timer_fd , & expirations , sizeof (expirations ));
1115+ (void ) ret ; /* Ignore read errors - timer will retry */
1116+ }
10511117#endif
1118+ }
10521119 }
10531120 }
10541121
0 commit comments