Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
  • Loading branch information
snekkenull authored Jul 6, 2024
1 parent 98bb86b commit 4a3542c
Show file tree
Hide file tree
Showing 8 changed files with 230 additions and 217 deletions.
Empty file removed app/__init__.py
Empty file.
38 changes: 18 additions & 20 deletions app/webui/README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

## Translation Agent WebUI

This repository contains a Gradio web UI for a translation agent that utilizes various language models for translation.
A Gradio Web UI for translation agent.

### Preview

Expand All @@ -14,35 +14,30 @@ This repository contains a Gradio web UI for a translation agent that utilizes v
- **Multiple API Support:** Integrates with popular language models like:
- Groq
- OpenAI
- Cohere
- Ollama
- Together AI
- Hugging Face Inference API
...
Llama Index supported, easily extendable
- **Different LLM for reflection**: Now you can enable second Endpoint to use another LLM for reflection.


**Getting Started**

1. **Install Dependencies(Using Python Venv):**
1. **Install Dependencies:**

**Linux**
```bash
git clone https://github.com/andrewyng/translation-agent.git
cd translation-agent
python -m venv web_ui
source web_ui/bin/activate
pip install -r app/webui/requirements.txt

poetry install --with app
poetry shell
```
**Windows**
```bash
git clone https://github.com/andrewyng/translation-agent.git
cd translation-agent
python -m venv web_ui
.\web_ui\Scripts\activate
pip install -r app/webui/requirements.txt
poetry install --with app
poetry shell
python .\app\webui\app.py
```

Expand All @@ -52,15 +47,19 @@ Llama Index supported, easily extendable
```
OPENAI_API_KEY="sk-xxxxx" # Keep this field
GROQ_API_KEY="xxxxx"
COHERE_API_KEY="xxxxx"
TOGETHER_API_KEY="xxxxx"
HF_TOKEN="xxxxx"
```
- Then you can also set the API_KEY in webui.

3. **Run the Web UI:**

**Linux**
```bash
python app/webui/app.py
```
**Windows**
```bash
python -m app.webui.app
python .\app\webui\app.py
```

4. **Access the Web UI:**
Expand All @@ -70,10 +69,9 @@ Llama Index supported, easily extendable

1. Select your desired translation API from the Endpoint dropdown menu.
2. Input the source language, target language, and country(optional).
3. If using Hugging Face API, enter your `HF_TOKEN` in the `api_key` textbox, enter `MODEL_ID` or `HF_ENDPOINT_URL` in `Model` textbox.
4. Input the source text or upload your document file.
5. Submit and get translation, the UI will display the translated text with tokenization and highlight differences.
6. Enable Second Endpoint, you can add another endpoint by different LLMs for reflection.
3. Input the source text or upload your document file.
4. Submit and get translation, the UI will display the translated text with tokenization and highlight differences.
5. Enable Second Endpoint, you can add another endpoint by different LLMs for reflection.

**Customization:**

Expand All @@ -89,4 +87,4 @@ This project is licensed under the MIT License.

**DEMO:**

[Huggingface Demo](https://huggingface.co/spaces/vilarin/Translation-Agent-WebUI)
[Huggingface Demo](https://huggingface.co/spaces/vilarin/Translation-Agent-WebUI)
1 change: 1 addition & 0 deletions app/webui/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .app import *
73 changes: 32 additions & 41 deletions app/webui/app.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,8 @@
import sys
import os

# Add the project root to the Python path
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, project_root)

import re
import gradio as gr
from glob import glob
from app.webui.process import model_load, diff_texts, translator, translator_sec
from llama_index.core import SimpleDirectoryReader
from process import model_load, diff_texts, translator, translator_sec, extract_docx, extract_pdf, extract_text

def huanik(
endpoint: str,
Expand All @@ -24,16 +17,15 @@ def huanik(
source_text: str,
country: str,
max_tokens: int,
context_window: int,
num_output: int,
temperature: int,
rpm: int,
):

if not source_text or source_lang == target_lang:
raise gr.Error("Please check that the content or options are entered correctly.")

try:
model_load(endpoint, model, api_key, context_window, num_output, rpm)
model_load(endpoint, model, api_key, temperature, rpm)
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")

Expand All @@ -44,8 +36,6 @@ def huanik(
endpoint2=endpoint2,
model2=model2,
api_key2=api_key2,
context_window=context_window,
num_output=num_output,
source_lang=source_lang,
target_lang=target_lang,
source_text=source_text,
Expand Down Expand Up @@ -76,20 +66,24 @@ def update_model(endpoint):
endpoint_model_map = {
"Groq": "llama3-70b-8192",
"OpenAI": "gpt-4o",
"Cohere": "command-r",
"TogetherAI": "Qwen/Qwen2-72B-Instruct",
"Ollama": "llama3",
"Huggingface": "mistralai/Mistral-7B-Instruct-v0.3"
}
return gr.update(value=endpoint_model_map[endpoint])

def read_doc(file):
docs = SimpleDirectoryReader(input_files=[file]).load_data()
texts = ""
for doc in docs:
texts += doc.text
texts = re.sub(r'(?m)^\s*$\n?', '', texts)
return texts
def read_doc(path):
file_type = path.split(".")[-1]
print(file_type)
if file_type in ["pdf", "txt", "py", "docx", "json", "cpp", "md"]:
if file_type.endswith("pdf"):
content = extract_pdf(path)
elif file_type.endswith("docx"):
content = extract_docx(path)
else:
content = extract_text(path)
return re.sub(r'(?m)^\s*$\n?', '', content)
else:
raise gr.Error("Oops, unsupported files.")

def enable_sec(choice):
if choice:
Expand Down Expand Up @@ -195,7 +189,7 @@ def closeBtnHide(output_final):
with gr.Column(scale=1) as menubar:
endpoint = gr.Dropdown(
label="Endpoint",
choices=["Groq","OpenAI","Cohere","TogetherAI","Ollama","Huggingface"],
choices=["Groq","OpenAI","TogetherAI","Ollama"],
value="OpenAI",
)
choice = gr.Checkbox(label="Additional Endpoint", info="Additional endpoint for reflection")
Expand All @@ -204,7 +198,7 @@ def closeBtnHide(output_final):
with gr.Column(visible=False) as AddEndpoint:
endpoint2 = gr.Dropdown(
label="Additional Endpoint",
choices=["Groq","OpenAI","Cohere","TogetherAI","Ollama","Huggingface"],
choices=["Groq","OpenAI","TogetherAI","Ollama"],
value="OpenAI",
)
model2 = gr.Textbox(label="Model", value="gpt-4o", )
Expand All @@ -230,19 +224,12 @@ def closeBtnHide(output_final):
value=1000,
step=8,
)
context_window = gr.Slider(
label="Context Window",
minimum=512,
maximum=8192,
value=4096,
step=8,
)
num_output = gr.Slider(
label="Output Num",
minimum=256,
maximum=8192,
value=512,
step=8,
temperature = gr.Slider(
label="Temperature",
minimum=0,
maximum=1.0,
value=0.3,
step=0.1,
)
rpm = gr.Slider(
label="Request Per Minute",
Expand All @@ -251,6 +238,10 @@ def closeBtnHide(output_final):
value=60,
step=1,
)
# json_mode = gr.Checkbox(
# False,
# label="Json Mode",
# )
with gr.Column(scale=4):
source_text = gr.Textbox(
label="Source Text",
Expand All @@ -275,14 +266,14 @@ def closeBtnHide(output_final):
close = gr.Button(value="Stop", visible=False)

switchBtn.click(fn=switch, inputs=[source_lang,source_text,target_lang,output_final], outputs=[source_lang,source_text,target_lang,output_final])

menuBtn.click(fn=update_menu, inputs=visible, outputs=[visible, menubar], js=JS)
endpoint.change(fn=update_model, inputs=[endpoint], outputs=[model])

choice.select(fn=enable_sec, inputs=[choice], outputs=[AddEndpoint])
endpoint2.change(fn=update_model, inputs=[endpoint2], outputs=[model2])
start_ta = submit.click(fn=huanik, inputs=[endpoint, model, api_key, choice, endpoint2, model2, api_key2, source_lang, target_lang, source_text, country, max_tokens, context_window, num_output, rpm], outputs=[output_init, output_reflect, output_final, output_diff])

start_ta = submit.click(fn=huanik, inputs=[endpoint, model, api_key, choice, endpoint2, model2, api_key2, source_lang, target_lang, source_text, country, max_tokens, temperature, rpm], outputs=[output_init, output_reflect, output_final, output_diff])
upload.upload(fn=read_doc, inputs = upload, outputs = source_text)
output_final.change(fn=export_txt, inputs=output_final, outputs=[export])

Expand Down
Loading

0 comments on commit 4a3542c

Please sign in to comment.