Skip to content

Commit 09e2adc

Browse files
author
Enias Cailliau
committed
Add dev container and clean up repo
1 parent 3a93ae3 commit 09e2adc

File tree

12 files changed

+342
-211
lines changed

12 files changed

+342
-211
lines changed

.devcontainer/devcontainer.json

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
2+
// README at: https://github.com/devcontainers/templates/tree/main/src/python
3+
{
4+
"name": "Python 3",
5+
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
6+
// Note: Steamship currently requires Python 3.8
7+
"image": "mcr.microsoft.com/devcontainers/python:0-3.8",
8+
9+
// Features to add to the dev container. More info: https://containers.dev/features.
10+
// "features": {},
11+
12+
// Configure tool-specific properties.
13+
"customizations": {
14+
// Configure properties specific to VS Code.
15+
"vscode": {
16+
"settings": {},
17+
"extensions": [
18+
"streetsidesoftware.code-spell-checker"
19+
]
20+
}
21+
},
22+
23+
// Use 'postCreateCommand' to run commands after the container is created.
24+
"postCreateCommand": "pip3 install -r requirements.txt"
25+
}

.vscode/launch.json

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
{
2+
"version": "0.1.0",
3+
"configurations": [
4+
{
5+
"name": "Run",
6+
"type": "python",
7+
"request": "launch",
8+
"program": "${workspaceRoot}/main.py",
9+
"console": "integratedTerminal",
10+
"justMyCode": true,
11+
"env": {
12+
"PYTHONPATH": "${workspaceRoot}/src"
13+
}
14+
}
15+
]
16+
}

README.md

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,17 @@
1-
# Tutorial: Telegram chatbot with LangChain
1+
# : Deploying Multi-Modal LangChain Agents
22

3-
This project contains the necessary scaffolding to deploy LangChain conversation agents with memory and connect them to Telegram.
3+
This project contains the necessary scaffolding to deploy LangChain conversation agents with memory and connect them to
4+
Telegram.
45

5-
These 4 steps should get you online. If not, shoot me a message on [Discord](https://steamship.com/discord). Happy to help you out.
6+
These 4 steps should get you online. If not, shoot me a message on [Discord](https://steamship.com/discord). Happy to
7+
help you out.
68

9+
Let's go:
710

8-
Let's go:
11+
> Step 1: Just copy and paste your LangChain agent into `src/chatbot/get_agent`
912
10-
> Step 1: Just copy paste your LangChain conversation LLMChain into `src/chatbot/get_chatbot`
1113

14+
> Step 2: Pip-install the latest `steamship_langchain`: `pip install --upgrade steamship_langchain`
1215
13-
> Step 2: Add your telegram bot access token under `BOT_TOKEN` in `src/chatbot.py`. More info [here](docs/register-telegram-bot.md)
1416

15-
16-
> Step 3: Pip install the latest `steamship_langchain`: `pip install --upgrade steamship_langchain`
17-
18-
19-
> Step 4: Run `python deploy.py`
20-
21-
22-
## Variations
23-
24-
Examples of this package:
25-
* Gym Bro with long-term memory: https://github.com/steamship-packages/langchain-telegram-chatbot/tree/ec/gym-bro
17+
> Step 3: Run `python deploy.py`

deploy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
package_handle=manifest.handle,
1616
version=manifest.version,
1717
instance_handle=f"{manifest.handle}-{manifest.version.replace('.', '-')}",
18-
config={"bot_token": "5629695237:AAFwmYgYRIV1tyPSBEhdYhuQMPVFu_dliAA"},
18+
config={"bot_token": "6140681319:AAFqNDOs68qROhCxUO8qOhR8V0IEr5k5vb8"},
1919
)
2020

2121
bot.wait_for_init()

main.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
import logging
2+
import sys
3+
from functools import partial
4+
from typing import List
5+
6+
from steamship.experimental.transports.chat import ChatMessage
7+
8+
from api import LangChainTelegramChatbot
9+
10+
sys.path.insert(0, "src")
11+
from steamship import Steamship, SteamshipError
12+
from steamship.cli.ship_spinner import ship_spinner
13+
from termcolor import colored
14+
15+
16+
def show_results(response_messages: List[ChatMessage]):
17+
print(colored("\nResults: ", "blue", attrs=["bold"]))
18+
for message in response_messages:
19+
if message.mime_type and message.mime_type.startswith("image"):
20+
print(message.url, end="\n\n")
21+
else:
22+
print(message.text, end="\n\n")
23+
24+
25+
class LoggingDisabled:
26+
"""Context manager that turns off logging within context."""
27+
28+
def __enter__(self):
29+
logging.disable(logging.CRITICAL)
30+
31+
def __exit__(self, exit_type, exit_value, exit_traceback):
32+
logging.disable(logging.NOTSET)
33+
34+
35+
def main():
36+
Steamship()
37+
38+
with Steamship.temporary_workspace() as client:
39+
run = partial(run_agent, agent=LangChainTelegramChatbot(client=client, config={"bot_token": "test"}))
40+
print(f"Starting Agent...")
41+
42+
print(
43+
f"If you make code changes, you will need to restart this client. Press CTRL+C to exit at any time.\n"
44+
)
45+
46+
count = 1
47+
48+
while True:
49+
print(f"----- Agent Run {count} -----")
50+
prompt = input(colored(f"Prompt: ", "blue"))
51+
run(
52+
# client,
53+
prompt=prompt,
54+
)
55+
count += 1
56+
57+
58+
def run_agent(agent, prompt: str, as_api: bool = False) -> None:
59+
# For Debugging
60+
if not agent.is_verbose_logging_enabled(): # display progress when verbose is False
61+
print("Running: ", end="")
62+
with ship_spinner():
63+
response = agent.create_response(incoming_message=ChatMessage(text=prompt, chat_id="123"))
64+
else:
65+
response = agent.create_response(incoming_message=ChatMessage(text=prompt, chat_id="123"))
66+
67+
show_results(response)
68+
69+
70+
if __name__ == "__main__":
71+
# when running locally, we can use print statements to capture logs / info.
72+
# as a result, we will disable python logging to run. this will keep the output cleaner.
73+
with LoggingDisabled():
74+
try:
75+
main()
76+
except SteamshipError as e:
77+
print(colored("Aborting! ", "red"), end="")
78+
print(f"There was an error encountered when running: {e}")

requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
steamship_langchain==0.0.20rc2
22
pytimeparse
3-
steamship==2.16.10rc1
3+
steamship==2.16.10rc1
4+
termcolor

src/agent/base.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
"""Define your LangChain chatbot."""
2+
import re
3+
from abc import abstractmethod
4+
from typing import List, Optional
5+
6+
from langchain.agents import AgentExecutor
7+
from steamship import Block
8+
from steamship.experimental.package_starters.telegram_bot import TelegramBot
9+
from steamship.experimental.transports.chat import ChatMessage
10+
from steamship.invocable import post
11+
12+
from agent.utils import is_valid_uuid, make_image_public, UUID_PATTERN
13+
14+
15+
class LangChainAgentBot(TelegramBot):
16+
17+
@abstractmethod
18+
def get_agent(self, chat_id: str) -> AgentExecutor:
19+
raise NotImplementedError()
20+
21+
def is_verbose_logging_enabled(self):
22+
return True
23+
24+
@post("send_message")
25+
def send_message(self, message: str, chat_id: str) -> str:
26+
"""Send a message to Telegram.
27+
28+
Note: This is a private endpoint that requires authentication."""
29+
self.telegram_transport.send([ChatMessage(text=message, chat_id=chat_id)])
30+
return "ok"
31+
32+
def _invoke_later(self, delay_ms: int, message: str, chat_id: str):
33+
self.invoke_later(
34+
"send_message",
35+
delay_ms=delay_ms,
36+
arguments={
37+
"message": message,
38+
"chat_id": chat_id,
39+
},
40+
)
41+
42+
def create_response(
43+
self, incoming_message: ChatMessage
44+
) -> Optional[List[ChatMessage]]:
45+
"""Use the LLM to prepare the next response by appending the user input to the file and then generating."""
46+
if incoming_message.text == "/start":
47+
return [
48+
ChatMessage(
49+
text="New conversation started.",
50+
chat_id=incoming_message.get_chat_id(),
51+
)
52+
]
53+
54+
conversation = self.get_agent(
55+
chat_id=incoming_message.get_chat_id(),
56+
)
57+
response = conversation.run(input=incoming_message.text)
58+
response = UUID_PATTERN.split(response)
59+
response = [re.sub(r"^\W+", "", el) for el in response]
60+
return self.agent_output_to_chat_messages(
61+
chat_id=incoming_message.get_chat_id(), agent_output=response
62+
)
63+
64+
def agent_output_to_chat_messages(
65+
self, chat_id: str, agent_output: List[str]
66+
) -> List[ChatMessage]:
67+
"""Transform the output of the Multi-Modal Agent into a list of ChatMessage objects.
68+
69+
The response of a Multi-Modal Agent contains one or more:
70+
- parseable UUIDs, representing a block containing binary data, or:
71+
- Text
72+
73+
This method inspects each string and creates a ChatMessage of the appropriate type.
74+
"""
75+
ret = []
76+
for part_response in agent_output:
77+
if is_valid_uuid(part_response):
78+
block = Block.get(self.client, _id=part_response)
79+
message = ChatMessage.from_block(
80+
block,
81+
chat_id=chat_id,
82+
)
83+
message.url = make_image_public(self.client, block)
84+
85+
else:
86+
message = ChatMessage(
87+
client=self.client,
88+
chat_id=chat_id,
89+
text=part_response,
90+
)
91+
92+
ret.append(message)
93+
return ret

src/agent/get_agent.py

Lines changed: 0 additions & 60 deletions
This file was deleted.

src/agent/parser.py

Lines changed: 0 additions & 49 deletions
This file was deleted.

0 commit comments

Comments
 (0)