@@ -20,23 +20,23 @@ struct Args {
20
20
model_architecture : llm:: ModelArchitecture ,
21
21
model_path : PathBuf ,
22
22
#[ arg( long, short = 'v' ) ]
23
- vocabulary_path : Option < PathBuf > ,
23
+ tokenizer_path : Option < PathBuf > ,
24
24
#[ arg( long, short = 'r' ) ]
25
- vocabulary_repository : Option < String > ,
25
+ tokenizer_repository : Option < String > ,
26
26
#[ arg( long, short = 'h' ) ]
27
27
host : String ,
28
28
#[ arg( long, short = 'p' ) ]
29
29
port : u16 ,
30
30
}
31
31
impl Args {
32
- pub fn to_vocabulary_source ( & self ) -> llm:: VocabularySource {
33
- match ( & self . vocabulary_path , & self . vocabulary_repository ) {
32
+ pub fn to_tokenizer_source ( & self ) -> llm:: TokenizerSource {
33
+ match ( & self . tokenizer_path , & self . tokenizer_repository ) {
34
34
( Some ( _) , Some ( _) ) => {
35
- panic ! ( "Cannot specify both --vocabulary -path and --vocabulary -repository" ) ;
35
+ panic ! ( "Cannot specify both --tokenizer -path and --tokenizer -repository" ) ;
36
36
}
37
- ( Some ( path) , None ) => llm:: VocabularySource :: HuggingFaceTokenizerFile ( path. to_owned ( ) ) ,
38
- ( None , Some ( repo) ) => llm:: VocabularySource :: HuggingFaceRemote ( repo. to_owned ( ) ) ,
39
- ( None , None ) => llm:: VocabularySource :: Model ,
37
+ ( Some ( path) , None ) => llm:: TokenizerSource :: HuggingFaceTokenizerFile ( path. to_owned ( ) ) ,
38
+ ( None , Some ( repo) ) => llm:: TokenizerSource :: HuggingFaceRemote ( repo. to_owned ( ) ) ,
39
+ ( None , None ) => llm:: TokenizerSource :: Embedded ,
40
40
}
41
41
}
42
42
}
@@ -86,13 +86,13 @@ fn infer(
86
86
rx_infer : std:: sync:: mpsc:: Receiver < String > ,
87
87
tx_callback : tokio:: sync:: mpsc:: Sender < llm:: InferenceResponse > ,
88
88
) -> Result < ( ) > {
89
- let vocabulary_source = args. to_vocabulary_source ( ) ;
89
+ let vocabulary_source = args. to_tokenizer_source ( ) ;
90
90
let model_architecture = args. model_architecture ;
91
91
let model_path = & args. model_path ;
92
92
let now = std:: time:: Instant :: now ( ) ;
93
93
94
94
let llm_model = llm:: load_dynamic (
95
- model_architecture,
95
+ Some ( model_architecture) ,
96
96
& model_path,
97
97
vocabulary_source,
98
98
Default :: default ( ) ,
0 commit comments