@@ -267,8 +267,10 @@ async def sync_crawls(self, data: MCSyncData):
267267 params ,
268268 )
269269
270+ force_update = crawl .paused_at and status .stopReason != "paused"
271+
270272 await self .increment_pod_exec_time (
271- pods , crawl , status , EXEC_TIME_UPDATE_SECS
273+ pods , crawl , status , EXEC_TIME_UPDATE_SECS if not force_update else 0
272274 )
273275
274276 else :
@@ -877,15 +879,12 @@ async def sync_crawl_state(
877879 if status .anyCrawlPodNewExit :
878880 await self .log_crashes (crawl .id , status .podStatus , redis )
879881
880- if crawl .paused_at and redis :
881- for name in pods .keys ():
882- pod_status = status .podStatus [name ]
883- if (
884- pod_status .isNewExit
885- and pod_status .exitTime
886- and pod_status .reason == "done"
887- ):
888- await redis .hset (f"{ crawl .id } :status" , name , "interrupted" )
882+ if crawl .paused_at and redis :
883+ for name in pods .keys ():
884+ pod_status = status .podStatus [name ]
885+ if pod_status .exitTime and pod_status .reason == "done" :
886+ await redis .hset (f"{ crawl .id } :status" , name , "interrupted" )
887+ pod_status .reason = "paused"
889888
890889 # remove stopping key (used for pause) unless actually stopping if:
891890 # 1. no more crawler pods running (to allow easily to resume)
0 commit comments