From db062e3e356243ab14b9a159df7cc4d1a0fd6ac7 Mon Sep 17 00:00:00 2001 From: Yu Ishikawa Date: Tue, 29 Oct 2024 08:00:16 +0900 Subject: [PATCH] Support Google AI Studio Signed-off-by: Yu Ishikawa --- docs/docs/usage-guide/changing_a_model.md | 17 +++++++++++++++++ pr_agent/algo/__init__.py | 2 ++ pr_agent/algo/ai_handlers/litellm_ai_handler.py | 5 +++++ pr_agent/settings/.secrets_template.toml | 3 +++ requirements.txt | 1 + 5 files changed, 28 insertions(+) diff --git a/docs/docs/usage-guide/changing_a_model.md b/docs/docs/usage-guide/changing_a_model.md index 9d994d173..f214f7168 100644 --- a/docs/docs/usage-guide/changing_a_model.md +++ b/docs/docs/usage-guide/changing_a_model.md @@ -133,9 +133,26 @@ Your [application default credentials](https://cloud.google.com/docs/authenticat If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file. +### Google AI Studio + +To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant models in the configuration section of the configuration file: + +```toml +[config] # in configuration.toml +model="google_ai_studio/gemini-1.5-flash" +model_turbo="google_ai_studio/gemini-1.5-flash" +fallback_models=["google_ai_studio/gemini-1.5-flash"] + +[google_ai_studio] # in .secrets.toml +gemini_api_key = "..." +``` + +If you don't want to set the API key in the .secrets.toml file, you can set the `GOOGLE_AI_STUDIO.GEMINI_API_KEY` environment variable. + ### Anthropic To use Anthropic models, set the relevant models in the configuration section of the configuration file: + ``` [config] model="anthropic/claude-3-opus-20240229" diff --git a/pr_agent/algo/__init__.py b/pr_agent/algo/__init__.py index dbdf673b2..83d001a15 100644 --- a/pr_agent/algo/__init__.py +++ b/pr_agent/algo/__init__.py @@ -38,6 +38,8 @@ 'vertex_ai/gemini-1.5-pro': 1048576, 'vertex_ai/gemini-1.5-flash': 1048576, 'vertex_ai/gemma2': 8200, + 'gemini/gemini-1.5-pro': 1048576, + 'gemini/gemini-1.5-flash': 1048576, 'codechat-bison': 6144, 'codechat-bison-32k': 32000, 'anthropic.claude-instant-v1': 100000, diff --git a/pr_agent/algo/ai_handlers/litellm_ai_handler.py b/pr_agent/algo/ai_handlers/litellm_ai_handler.py index b88bc7c79..882e23708 100644 --- a/pr_agent/algo/ai_handlers/litellm_ai_handler.py +++ b/pr_agent/algo/ai_handlers/litellm_ai_handler.py @@ -83,6 +83,11 @@ def __init__(self): litellm.vertex_location = get_settings().get( "VERTEXAI.VERTEX_LOCATION", None ) + # Google AI Studio + # SEE https://docs.litellm.ai/docs/providers/gemini + if get_settings().get("GOOGLE_AI_STUDIO.GEMINI_API_KEY", None): + os.environ["GEMINI_API_KEY"] = get_settings().google_ai_studio.gemini_api_key + def prepare_logs(self, response, system, user, resp, finish_reason): response_log = response.dict().copy() response_log['system'] = system diff --git a/pr_agent/settings/.secrets_template.toml b/pr_agent/settings/.secrets_template.toml index 674a3221c..8fea7ff11 100644 --- a/pr_agent/settings/.secrets_template.toml +++ b/pr_agent/settings/.secrets_template.toml @@ -43,6 +43,9 @@ api_base = "" # the base url for your local Llama 2, Code Llama, and other model vertex_project = "" # the google cloud platform project name for your vertexai deployment vertex_location = "" # the google cloud platform location for your vertexai deployment +[google_ai_studio] +gemini_api_key = "" # the google AI Studio API key + [github] # ---- Set the following only for deployment type == "user" user_token = "" # A GitHub personal access token with 'repo' scope. diff --git a/requirements.txt b/requirements.txt index a1003e6c5..0adf66cad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ dynaconf==3.2.4 fastapi==0.111.0 GitPython==3.1.41 google-cloud-aiplatform==1.38.0 +google-generativeai==0.8.3 google-cloud-storage==2.10.0 Jinja2==3.1.2 litellm==1.50.2