Skip to content

Commit d995940

Browse files
authored
[serve.llm] Remove upstreamed workarounds 1/3 (#54512)
Signed-off-by: Seiji Eicher <[email protected]> Signed-off-by: Seiji Eicher <[email protected]>
1 parent 0bda55a commit d995940

File tree

1 file changed

+0
-5
lines changed

1 file changed

+0
-5
lines changed

python/ray/llm/_internal/serve/deployments/llm/vllm/vllm_engine.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
from starlette.datastructures import State
66
from starlette.requests import Request
7-
from transformers.dynamic_module_utils import init_hf_modules
87
from vllm.engine.arg_utils import AsyncEngineArgs
98
from vllm.entrypoints.openai.cli_args import FrontendArgs
109
from vllm.entrypoints.openai.protocol import ErrorResponse as VLLMErrorResponse
@@ -110,10 +109,6 @@ def __init__(
110109
"""
111110
super().__init__(llm_config)
112111

113-
# Ensure transformers_modules is initialized early in worker processes.
114-
# This is critical for models with trust_remote_code=True to avoid pickle errors.
115-
init_hf_modules()
116-
117112
self.llm_config = llm_config
118113

119114
if vllm is None:

0 commit comments

Comments
 (0)