Skip to content

Commit de94875

Browse files
authored
Merge pull request #563 from boeckers/main
Remove max_tokens from openai_chat.py
2 parents 3176ccc + 0a6f9c8 commit de94875

File tree

1 file changed

+0
-9
lines changed

1 file changed

+0
-9
lines changed

src/vanna/openai/openai_chat.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,10 @@ def __init__(self, client=None, config=None):
1111

1212
# default parameters - can be overrided using config
1313
self.temperature = 0.7
14-
self.max_tokens = 500
1514

1615
if "temperature" in config:
1716
self.temperature = config["temperature"]
1817

19-
if "max_tokens" in config:
20-
self.max_tokens = config["max_tokens"]
21-
2218
if "api_type" in config:
2319
raise Exception(
2420
"Passing api_type is now deprecated. Please pass an OpenAI client instead."
@@ -75,7 +71,6 @@ def submit_prompt(self, prompt, **kwargs) -> str:
7571
response = self.client.chat.completions.create(
7672
model=model,
7773
messages=prompt,
78-
max_tokens=self.max_tokens,
7974
stop=None,
8075
temperature=self.temperature,
8176
)
@@ -87,7 +82,6 @@ def submit_prompt(self, prompt, **kwargs) -> str:
8782
response = self.client.chat.completions.create(
8883
engine=engine,
8984
messages=prompt,
90-
max_tokens=self.max_tokens,
9185
stop=None,
9286
temperature=self.temperature,
9387
)
@@ -98,7 +92,6 @@ def submit_prompt(self, prompt, **kwargs) -> str:
9892
response = self.client.chat.completions.create(
9993
engine=self.config["engine"],
10094
messages=prompt,
101-
max_tokens=self.max_tokens,
10295
stop=None,
10396
temperature=self.temperature,
10497
)
@@ -109,7 +102,6 @@ def submit_prompt(self, prompt, **kwargs) -> str:
109102
response = self.client.chat.completions.create(
110103
model=self.config["model"],
111104
messages=prompt,
112-
max_tokens=self.max_tokens,
113105
stop=None,
114106
temperature=self.temperature,
115107
)
@@ -123,7 +115,6 @@ def submit_prompt(self, prompt, **kwargs) -> str:
123115
response = self.client.chat.completions.create(
124116
model=model,
125117
messages=prompt,
126-
max_tokens=self.max_tokens,
127118
stop=None,
128119
temperature=self.temperature,
129120
)

0 commit comments

Comments
 (0)