Skip to content

Commit

Permalink
Add prints to prediction pipeline loading
Browse files Browse the repository at this point in the history
  • Loading branch information
Hermitao committed Mar 11, 2024
1 parent 73f2d78 commit 236664b
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion hubconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,15 @@ def promptsentinel_unbalanced_paraphrase_v1(*args, **kwargs):
def prediction_pipeline(*args, **kwargs):
valid_strings = ['PromptSentinel-Unbalanced-v1', 'PromptSentinel-Balanced-v1', 'PromptSentinel-Unbalanced-Paraphrase-v1']
arg1 = args[0] if args and args[0] in valid_strings else 'PromptSentinel-Unbalanced-Paraphrase-v1'
model_path = os.path.join(os.path.dirname(__file__), f'PromptSentinel/{arg1}/model.pth')

print(f"Loading model {arg1}...")
model_path = os.path.join(os.path.dirname(__file__), f'PromptSentinel/{arg1}/model.pth')
model = torch.load(model_path) if torch.cuda.is_available() else torch.load(model_path, map_location=torch.device('cpu'))
print(f'Model {arg1} loaded successfully.')

print("Creating prediction pipeline...")
pipeline = PredictionPipeline(model)
print('Pipeline created successfully. Use PredictionPipeline.predict("{text}") to classify prompts.')

return pipeline

Expand Down

0 comments on commit 236664b

Please sign in to comment.