|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | +""" |
| 3 | +Evaluate Transcription API correctness by computing Word Error Rate (WER) |
| 4 | +on a given ASR dataset. When provided, it will also compare the WER against |
| 5 | +a baseline. |
| 6 | +This simulates real work usage of the API and makes sure that the frontend and |
| 7 | +AsyncLLMEngine are working correctly. |
| 8 | +""" |
| 9 | +import asyncio |
| 10 | +import io |
| 11 | +import time |
| 12 | +from statistics import mean, median |
| 13 | +from typing import List |
| 14 | + |
| 15 | +import librosa |
| 16 | +import pytest |
| 17 | +import soundfile |
| 18 | +import torch |
| 19 | +from datasets import load_dataset |
| 20 | +from evaluate import load |
| 21 | +from transformers import AutoTokenizer |
| 22 | + |
| 23 | +from ....utils import RemoteOpenAIServer |
| 24 | + |
| 25 | + |
| 26 | +def to_bytes(y, sr): |
| 27 | + buffer = io.BytesIO() |
| 28 | + soundfile.write(buffer, y, sr, format="WAV") |
| 29 | + buffer.seek(0) |
| 30 | + return buffer |
| 31 | + |
| 32 | + |
| 33 | +async def transcribe_audio(client, tokenizer, y, sr): |
| 34 | + # Send loaded audio directly instead of loading from disk, |
| 35 | + # dont account for that time though |
| 36 | + with to_bytes(y, sr) as f: |
| 37 | + start_time = time.perf_counter() |
| 38 | + transcription = await client.audio.transcriptions.create( |
| 39 | + file=f, |
| 40 | + model=tokenizer.name_or_path, |
| 41 | + language="en", |
| 42 | + temperature=0.0, |
| 43 | + ) |
| 44 | + end_time = time.perf_counter() |
| 45 | + # NOTE there's no streaming in transcriptions, can't measure ttft |
| 46 | + latency = end_time - start_time |
| 47 | + num_output_tokens = len( |
| 48 | + tokenizer(transcription.text, add_special_tokens=False).input_ids) |
| 49 | + return latency, num_output_tokens, transcription.text |
| 50 | + |
| 51 | + |
| 52 | +async def bound_transcribe(model_name, sem, client, audio, reference): |
| 53 | + tokenizer = AutoTokenizer.from_pretrained(model_name) |
| 54 | + # Use semaphore to limit concurrent requests. |
| 55 | + async with sem: |
| 56 | + result = await transcribe_audio(client, tokenizer, *audio) |
| 57 | + # Normalize *english* output/reference for evaluation. |
| 58 | + out = tokenizer.normalize(result[2]) |
| 59 | + ref = tokenizer.normalize(reference) |
| 60 | + return result[:2] + (out, ref) |
| 61 | + |
| 62 | + |
| 63 | +async def process_dataset(model, client, data, concurrent_request): |
| 64 | + sem = asyncio.Semaphore(concurrent_request) |
| 65 | + |
| 66 | + # Warmup call as the first `librosa.load` server-side is quite slow. |
| 67 | + audio, sr = data[0]["audio"]["array"], data[0]["audio"]["sampling_rate"] |
| 68 | + _ = await bound_transcribe(model, sem, client, (audio, sr), "") |
| 69 | + |
| 70 | + tasks: List[asyncio.Task] = [] |
| 71 | + for sample in data: |
| 72 | + audio, sr = sample["audio"]["array"], sample["audio"]["sampling_rate"] |
| 73 | + task = asyncio.create_task( |
| 74 | + bound_transcribe(model, sem, client, (audio, sr), sample["text"])) |
| 75 | + tasks.append(task) |
| 76 | + return await asyncio.gather(*tasks) |
| 77 | + |
| 78 | + |
| 79 | +def print_performance_metrics(results, total_time): |
| 80 | + latencies = [res[0] for res in results] |
| 81 | + total_tokens = sum([res[1] for res in results]) |
| 82 | + |
| 83 | + total = len(results) |
| 84 | + print(f"Total Requests: {total}") |
| 85 | + print(f"Successful Requests: {len(latencies)}") |
| 86 | + print(f"Average Latency: {mean(latencies):.4f} seconds") |
| 87 | + print(f"Median Latency: {median(latencies):.4f} seconds") |
| 88 | + perc = sorted(latencies)[int(len(latencies) * 0.95) - 1] |
| 89 | + print(f"95th Percentile Latency: {perc:.4f} seconds") |
| 90 | + # Throughput |
| 91 | + req_throughput = len(latencies) / total_time |
| 92 | + print(f"Estimated req_Throughput: {req_throughput:.2f} requests/s") |
| 93 | + throughput = total_tokens / total_time |
| 94 | + print(f"Estimated Throughput: {throughput:.2f} tok/s") |
| 95 | + |
| 96 | + |
| 97 | +def add_duration(sample): |
| 98 | + y, sr = sample['audio']["array"], sample['audio']["sampling_rate"] |
| 99 | + sample['duration_ms'] = librosa.get_duration(y=y, sr=sr) * 1000 |
| 100 | + return sample |
| 101 | + |
| 102 | + |
| 103 | +def load_hf_dataset(dataset_repo: str, split='validation', **hf_kwargs): |
| 104 | + ## Load and filter the dataset |
| 105 | + dataset = load_dataset(dataset_repo, split=split, **hf_kwargs) |
| 106 | + if 'duration_ms' not in dataset[0]: |
| 107 | + # compute duration to filter |
| 108 | + dataset = dataset.map(add_duration) |
| 109 | + |
| 110 | + # Whisper max supported duration |
| 111 | + dataset = dataset.filter(lambda example: example['duration_ms'] < 30000) |
| 112 | + return dataset |
| 113 | + |
| 114 | + |
| 115 | +def run_evaluation(model: str, |
| 116 | + client, |
| 117 | + dataset, |
| 118 | + max_concurrent_reqs: int, |
| 119 | + n_examples: int = -1, |
| 120 | + print_metrics: bool = True): |
| 121 | + if n_examples > 0: |
| 122 | + dataset = dataset.select(range(n_examples)) |
| 123 | + start = time.perf_counter() |
| 124 | + results = asyncio.run( |
| 125 | + process_dataset(model, client, dataset, max_concurrent_reqs)) |
| 126 | + end = time.perf_counter() |
| 127 | + total_time = end - start |
| 128 | + print(f"Total Test Time: {total_time:.4f} seconds") |
| 129 | + if print_metrics: |
| 130 | + print_performance_metrics(results, total_time) |
| 131 | + # Compute WER |
| 132 | + predictions = [res[2] for res in results] |
| 133 | + references = [res[3] for res in results] |
| 134 | + wer = load("wer") |
| 135 | + wer_score = 100 * wer.compute(references=references, |
| 136 | + predictions=predictions) |
| 137 | + print("WER:", wer_score) |
| 138 | + return wer_score |
| 139 | + |
| 140 | + |
| 141 | +# alternatives "openai/whisper-large-v2", "openai/whisper-large-v3-turbo".. |
| 142 | +@pytest.mark.parametrize("model_name", ["openai/whisper-large-v3"]) |
| 143 | +# Original dataset is 20GB+ in size, hence we use a pre-filtered slice. |
| 144 | +@pytest.mark.parametrize( |
| 145 | + "dataset_repo", ["D4nt3/esb-datasets-earnings22-validation-tiny-filtered"]) |
| 146 | +# NOTE: Expected WER measured with equivalent hf.transformers args: |
| 147 | +# whisper-large-v3 + esb-datasets-earnings22-validation-tiny-filtered. |
| 148 | +@pytest.mark.parametrize("expected_wer", [12.744980]) |
| 149 | +def test_wer_correctness(model_name, |
| 150 | + dataset_repo, |
| 151 | + expected_wer, |
| 152 | + n_examples=-1, |
| 153 | + max_concurrent_request=None): |
| 154 | + with RemoteOpenAIServer(model_name, ['--enforce-eager']) as remote_server: |
| 155 | + dataset = load_hf_dataset(dataset_repo) |
| 156 | + |
| 157 | + if not max_concurrent_request: |
| 158 | + # No max concurrency |
| 159 | + max_concurrent_request = n_examples if n_examples > 0\ |
| 160 | + else len(dataset) |
| 161 | + |
| 162 | + client = remote_server.get_async_client() |
| 163 | + wer = run_evaluation(model_name, client, dataset, |
| 164 | + max_concurrent_request, n_examples) |
| 165 | + if expected_wer: |
| 166 | + torch.testing.assert_close(wer, expected_wer, atol=1e-1, rtol=1e-2) |
0 commit comments