Skip to content

Commit 4091bf6

Browse files
committed
bugfix time_speech_total_per_sample=0
1 parent fa56f36 commit 4091bf6

File tree

2 files changed

+29
-24
lines changed

2 files changed

+29
-24
lines changed

examples/industrial_data_pretraining/paraformer/infer_after_finetune.sh

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@ python funasr/bin/inference.py \
44
--config-path="/Users/zhifu/funasr_github/test_local/funasr_cli_egs" \
55
--config-name="config.yaml" \
66
++init_param="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/model.pt" \
7-
+tokenizer_conf.token_list="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/tokens.txt" \
8-
+frontend_conf.cmvn_file="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/am.mvn" \
9-
+input="data/wav.scp" \
10-
+output_dir="./outputs/debug" \
11-
+device="cuda" \
7+
++tokenizer_conf.token_list="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/tokens.txt" \
8+
++frontend_conf.cmvn_file="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/am.mvn" \
9+
++input="data/wav.scp" \
10+
++output_dir="./outputs/debug" \
11+
++device="cuda" \
1212

funasr/auto/auto_model.py

Lines changed: 24 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **
260260
time_escape_total += time_escape
261261

262262
if pbar:
263-
pbar.update(1)
263+
# pbar.update(1)
264264
pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
265265
torch.cuda.empty_cache()
266266
return asr_result_list
@@ -285,10 +285,10 @@ def inference_with_vad(self, input, input_len=None, **cfg):
285285

286286
key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
287287
results_ret_list = []
288-
time_speech_total_all_samples = 0.0
288+
time_speech_total_all_samples = 1e-6
289289

290290
beg_total = time.time()
291-
pbar_total = tqdm(colour="red", total=len(res) + 1, dynamic_ncols=True)
291+
pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True)
292292
for i in range(len(res)):
293293
key = res[i]["key"]
294294
vadsegments = res[i]["value"]
@@ -310,14 +310,14 @@ def inference_with_vad(self, input, input_len=None, **cfg):
310310
batch_size_ms_cum = 0
311311
beg_idx = 0
312312
beg_asr_total = time.time()
313-
time_speech_total_per_sample = speech_lengths/16000 + 1e-6
313+
time_speech_total_per_sample = speech_lengths/16000
314314
time_speech_total_all_samples += time_speech_total_per_sample
315315

316-
pbar_sample = tqdm(colour="blue", total=n + 1, dynamic_ncols=True)
316+
# pbar_sample = tqdm(colour="blue", total=n, dynamic_ncols=True)
317317

318318
all_segments = []
319319
for j, _ in enumerate(range(0, n)):
320-
pbar_sample.update(1)
320+
# pbar_sample.update(1)
321321
batch_size_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
322322
if j < n - 1 and (
323323
batch_size_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size and (
@@ -336,19 +336,19 @@ def inference_with_vad(self, input, input_len=None, **cfg):
336336
segments = sv_chunk(vad_segments)
337337
all_segments.extend(segments)
338338
speech_b = [i[2] for i in segments]
339-
spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, **cfg)
339+
spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, disable_pbar=True, **cfg)
340340
results[_b]['spk_embedding'] = spk_res[0]['spk_embedding']
341341
beg_idx = end_idx
342342
if len(results) < 1:
343343
continue
344344
results_sorted.extend(results)
345345

346-
end_asr_total = time.time()
347-
time_escape_total_per_sample = end_asr_total - beg_asr_total
348-
pbar_sample.update(1)
349-
pbar_sample.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
350-
f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
351-
f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
346+
# end_asr_total = time.time()
347+
# time_escape_total_per_sample = end_asr_total - beg_asr_total
348+
# pbar_sample.update(1)
349+
# pbar_sample.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
350+
# f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
351+
# f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
352352

353353
restored_data = [0] * n
354354
for j in range(n):
@@ -386,7 +386,7 @@ def inference_with_vad(self, input, input_len=None, **cfg):
386386
# step.3 compute punc model
387387
if self.punc_model is not None:
388388
self.punc_kwargs.update(cfg)
389-
punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
389+
punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, disable_pbar=True, **cfg)
390390
import copy; raw_text = copy.copy(result["text"])
391391
result["text"] = punc_res[0]["text"]
392392

@@ -418,13 +418,18 @@ def inference_with_vad(self, input, input_len=None, **cfg):
418418

419419
result["key"] = key
420420
results_ret_list.append(result)
421+
end_asr_total = time.time()
422+
time_escape_total_per_sample = end_asr_total - beg_asr_total
421423
pbar_total.update(1)
422-
423-
pbar_total.update(1)
424+
pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
425+
f"time_speech: {time_speech_total_per_sample: 0.3f}, "
426+
f"time_escape: {time_escape_total_per_sample:0.3f}")
427+
428+
424429
end_total = time.time()
425430
time_escape_total_all_samples = end_total - beg_total
426-
pbar_total.set_description(f"rtf_avg_all_samples: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
427-
f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
428-
f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
431+
print(f"rtf_avg_all: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
432+
f"time_speech_all: {time_speech_total_all_samples: 0.3f}, "
433+
f"time_escape_all: {time_escape_total_all_samples:0.3f}")
429434
return results_ret_list
430435

0 commit comments

Comments
 (0)