-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathload_embeddings.py
35 lines (26 loc) · 1.17 KB
/
load_embeddings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import torch
from transformers import AutoTokenizer
from models.EmbeddingModel import EmbeddingModel
from utilities.args_parser import parse_args
from utilities.embeddings_load import load_embeddings_model
from utilities.embeddings_print import print_direct_embeddings
def main(tokenizer_name, embeddings_filename, dimensions, prompt):
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
# Load the embedding model with pre-trained weights
model = load_embeddings_model(embeddings_filename, tokenizer.vocab_size, dimensions)
# Tokenize the input text
tokens = tokenizer(prompt, return_tensors="pt")
input_ids = tokens['input_ids']
# make a forward pass
outputs = model(input_ids)
# Directly use the embeddings layer to get embeddings for the input_ids
embeddings = outputs
# Use the utility function to print direct embeddings
print_direct_embeddings(tokenizer, embeddings, input_ids)
if __name__ == "__main__":
# parse the arguments
args = parse_args()
# execute
main(args.tokenizer, args.embeddings_file, args.dimensions, args.prompt)