Skip to content

Commit

Permalink
Update to use base url
Browse files Browse the repository at this point in the history
  • Loading branch information
snekkenull authored Jul 6, 2024
1 parent 4a3542c commit e5b5e52
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 84 deletions.
5 changes: 3 additions & 2 deletions app/webui/README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

## Translation Agent WebUI

A Gradio Web UI for translation agent.
This repository contains a Gradio web UI for a translation agent that utilizes various language models for translation.

### Preview

Expand Down Expand Up @@ -72,6 +72,7 @@ A Gradio Web UI for translation agent.
3. Input the source text or upload your document file.
4. Submit and get translation, the UI will display the translated text with tokenization and highlight differences.
5. Enable Second Endpoint, you can add another endpoint by different LLMs for reflection.
6. Using a custom endpoint, you can enter an OpenAI compatible API base url.

**Customization:**

Expand All @@ -87,4 +88,4 @@ This project is licensed under the MIT License.

**DEMO:**

[Huggingface Demo](https://huggingface.co/spaces/vilarin/Translation-Agent-WebUI)
[Huggingface Demo](https://huggingface.co/spaces/vilarin/Translation-Agent-WebUI)
24 changes: 17 additions & 7 deletions app/webui/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@

def huanik(
endpoint: str,
base: str,
model: str,
api_key: str,
choice: str,
endpoint2: str,
base2: str,
model2: str,
api_key2: str,
source_lang: str,
Expand All @@ -25,7 +27,7 @@ def huanik(
raise gr.Error("Please check that the content or options are entered correctly.")

try:
model_load(endpoint, model, api_key, temperature, rpm)
model_load(endpoint, base, model, api_key, temperature, rpm)
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")

Expand All @@ -34,6 +36,7 @@ def huanik(
if choice:
init_translation, reflect_translation, final_translation = translator_sec(
endpoint2=endpoint2,
base2=base2,
model2=model2,
api_key2=api_key2,
source_lang=source_lang,
Expand Down Expand Up @@ -68,8 +71,13 @@ def update_model(endpoint):
"OpenAI": "gpt-4o",
"TogetherAI": "Qwen/Qwen2-72B-Instruct",
"Ollama": "llama3",
"CUSTOM": "",
}
return gr.update(value=endpoint_model_map[endpoint])
if endpoint == "CUSTOM":
base = gr.update(visible=True)
else:
base = gr.update(visible=False)
return gr.update(value=endpoint_model_map[endpoint]), base

def read_doc(path):
file_type = path.split(".")[-1]
Expand Down Expand Up @@ -189,20 +197,22 @@ def closeBtnHide(output_final):
with gr.Column(scale=1) as menubar:
endpoint = gr.Dropdown(
label="Endpoint",
choices=["Groq","OpenAI","TogetherAI","Ollama"],
choices=["OpenAI","Groq","TogetherAI","Ollama","CUSTOM"],
value="OpenAI",
)
choice = gr.Checkbox(label="Additional Endpoint", info="Additional endpoint for reflection")
model = gr.Textbox(label="Model", value="gpt-4o", )
api_key = gr.Textbox(label="API_KEY", type="password", )
base = gr.Textbox(label="BASE URL", visible=False)
with gr.Column(visible=False) as AddEndpoint:
endpoint2 = gr.Dropdown(
label="Additional Endpoint",
choices=["Groq","OpenAI","TogetherAI","Ollama"],
choices=["OpenAI","Groq","TogetherAI","Ollama","CUSTOM"],
value="OpenAI",
)
model2 = gr.Textbox(label="Model", value="gpt-4o", )
api_key2 = gr.Textbox(label="API_KEY", type="password", )
base2 = gr.Textbox(label="BASE URL", visible=False)
with gr.Row():
source_lang = gr.Textbox(
label="Source Lang",
Expand Down Expand Up @@ -268,12 +278,12 @@ def closeBtnHide(output_final):
switchBtn.click(fn=switch, inputs=[source_lang,source_text,target_lang,output_final], outputs=[source_lang,source_text,target_lang,output_final])

menuBtn.click(fn=update_menu, inputs=visible, outputs=[visible, menubar], js=JS)
endpoint.change(fn=update_model, inputs=[endpoint], outputs=[model])
endpoint.change(fn=update_model, inputs=[endpoint], outputs=[model, base])

choice.select(fn=enable_sec, inputs=[choice], outputs=[AddEndpoint])
endpoint2.change(fn=update_model, inputs=[endpoint2], outputs=[model2])
endpoint2.change(fn=update_model, inputs=[endpoint2], outputs=[model2, base2])

start_ta = submit.click(fn=huanik, inputs=[endpoint, model, api_key, choice, endpoint2, model2, api_key2, source_lang, target_lang, source_text, country, max_tokens, temperature, rpm], outputs=[output_init, output_reflect, output_final, output_diff])
start_ta = submit.click(fn=huanik, inputs=[endpoint, base, model, api_key, choice, endpoint2, base2, model2, api_key2, source_lang, target_lang, source_text, country, max_tokens, temperature, rpm], outputs=[output_init, output_reflect, output_final, output_diff])
upload.upload(fn=read_doc, inputs = upload, outputs = source_text)
output_final.change(fn=export_txt, inputs=output_final, outputs=[export])

Expand Down
111 changes: 38 additions & 73 deletions app/webui/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,27 +5,26 @@
from threading import Lock
from typing import Union
import translation_agent.utils as Utils
from groq import Groq
from together import Together
from ollama import Client
import openai
import gradio as gr

RPM = 60
MODEL = ""
TEMPERATURE = 0.3
# Hide js_mode in UI now, update in plan.
JS_MODE = False
ENDPOINT = ""
client = Utils.client
client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

# Add your LLMs here
def model_load(
endpoint: str,
base_url: str,
model: str,
api_key: str = None,
temperature: float = TEMPERATURE,
rpm: int = RPM,
js_mode: bool = JS_MODE,

):
global client, RPM, MODEL, TEMPERATURE, JS_MODE, ENDPOINT
ENDPOINT = endpoint
Expand All @@ -35,13 +34,15 @@ def model_load(
JS_MODE = js_mode

if endpoint == "Groq":
client = Groq(api_key=api_key if api_key else os.getenv("GROQ_API_KEY"))
client = openai.OpenAI(api_key=api_key if api_key else os.getenv("GROQ_API_KEY"), base_url="https://api.groq.com/openai/v1")
elif endpoint == "TogetherAI":
client = Together(api_key=api_key if api_key else os.getenv("TOGETHER_API_KEY"))
client = openai.OpenAI(api_key=api_key if api_key else os.getenv("TOGETHER_API_KEY"), base_url="https://api.together.xyz/v1")
elif endpoint == "CUSTOM":
client = openai.OpenAI(api_key=api_key, base_url=base_url)
elif endpoint == "Ollama":
client = Client(host='http://localhost:11434')
client = openai.OpenAI(api_key="ollama", base_url="http://localhost:11434/v1")
else:
client = Utils.openai.OpenAI(api_key=api_key if api_key else os.getenv("OPENAI_API_KEY"))
client = openai.OpenAI(api_key=api_key if api_key else os.getenv("OPENAI_API_KEY"))

def rate_limit(get_max_per_minute):
def decorator(func):
Expand Down Expand Up @@ -97,71 +98,35 @@ def get_completion(
temperature = TEMPERATURE
json_mode = JS_MODE

if ENDPOINT == "Ollama":
if json_mode:
try:
response = client.chat(
model=model,
format="json",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
options = {
"temperature": TEMPERATURE,
"top_p": 1.0
},
)
print(response)
return response['message']['content']
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")
else:
try:
response = client.chat(
model=model,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
options = {
"temperature": TEMPERATURE,
"top_p": 1.0
},
)
return response['message']['content']
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")
if json_mode:
try:
response = client.chat.completions.create(
model=model,
temperature=temperature,
top_p=1,
response_format={"type": "json_object"},
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")
else:
if json_mode:
try:
response = client.chat.completions.create(
model=model,
temperature=temperature,
top_p=1,
response_format={"type": "json_object"},
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")
else:
try:
response = client.chat.completions.create(
model=model,
temperature=temperature,
top_p=1,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")
try:
response = client.chat.completions.create(
model=model,
temperature=temperature,
top_p=1,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
],
)
return response.choices[0].message.content
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")

Utils.get_completion = get_completion

Expand Down
5 changes: 3 additions & 2 deletions app/webui/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ def translator(

def translator_sec(
endpoint2: str,
base2: str,
model2: str,
api_key2: str,
source_lang: str,
Expand All @@ -172,7 +173,7 @@ def translator_sec(
)

try:
model_load(endpoint2, model2, api_key2)
model_load(endpoint2, base2, model2, api_key2)
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")

Expand Down Expand Up @@ -213,7 +214,7 @@ def translator_sec(
init_translation = "".join(translation_1_chunks)

try:
model_load(endpoint2, model2, api_key2)
model_load(endpoint2, base2, model2, api_key2)
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")

Expand Down

0 comments on commit e5b5e52

Please sign in to comment.