-
-
Notifications
You must be signed in to change notification settings - Fork 182
Expand file tree
/
Copy pathconfig.local.toml.example
More file actions
54 lines (50 loc) · 2.07 KB
/
config.local.toml.example
File metadata and controls
54 lines (50 loc) · 2.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# config.local.toml — local API agents (gitignored)
#
# Copy this file to config.local.toml and uncomment/edit the sections below.
# These agents connect to OpenAI-compatible endpoints (Ollama, llama-server,
# LM Studio, vLLM, etc.) and participate in the chat room via @mentions.
#
# Usage:
# 1. cp config.local.toml.example config.local.toml
# 2. Edit the [agents.NAME] section below
# 3. Start the server: python run.py
# 4. Start the wrapper: python wrapper_api.py NAME
# (or use: windows/start_api_agent.bat NAME)
# --- Example: Qwen via llama-server ---
# [agents.qwen]
# type = "api" # required — marks this as an API agent
# base_url = "http://localhost:8189/v1" # OpenAI-compatible base URL
# model = "qwen3-4b" # model name (sent in API request)
# color = "#8b5cf6" # status pill color in chat UI
# label = "Qwen" # display name
# # api_key_env = "OPENAI_API_KEY" # optional: env var containing API key
# # system_prompt = "You are a helpful AI." # optional: custom system prompt
# # context_messages = 20 # optional: how many recent messages to send as context
# --- Example: Ollama ---
# [agents.llama]
# type = "api"
# base_url = "http://localhost:11434/v1"
# model = "llama3.2"
# color = "#f97316"
# label = "Llama"
# --- Example: LM Studio ---
# [agents.lmstudio]
# type = "api"
# base_url = "http://localhost:1234/v1"
# model = "local-model"
# color = "#06b6d4"
# label = "LM Studio"
# --- Example: MiniMax (cloud API) ---
# MiniMax offers OpenAI-compatible endpoints. Get an API key at https://platform.minimax.io
# Set MINIMAX_API_KEY in your environment before launching.
# Available models: MiniMax-M2.7 (default), MiniMax-M2.7-highspeed, MiniMax-M2.5, MiniMax-M2.5-highspeed
# China mainland users: change base_url to https://api.minimaxi.com/v1
#
# [agents.minimax]
# type = "api"
# base_url = "https://api.minimax.io/v1"
# model = "MiniMax-M2.7"
# color = "#2fe898"
# label = "MiniMax"
# api_key_env = "MINIMAX_API_KEY"
# temperature = 1.0