Skip to content

Commit ded2bd7

Browse files
authored
Add files via upload
1 parent 5d9fb40 commit ded2bd7

File tree

4 files changed

+127
-0
lines changed

4 files changed

+127
-0
lines changed

Transformers/Clear_chat.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from transformers import AutoModelForCausalLM, AutoTokenizer
2+
import torch
3+
4+
# Load a pretrained model and tokenizer
5+
model_name = "microsoft/DialoGPT-small"
6+
tokenizer = AutoTokenizer.from_pretrained(model_name)
7+
model = AutoModelForCausalLM.from_pretrained(model_name)
8+
9+
# Chatbot interaction loop
10+
print("AI Chatbot ready! Type 'quit' to exit.")
11+
12+
while True:
13+
user_input = input("You: ")
14+
if user_input.lower() == 'quit':
15+
print("Goodbye!")
16+
break
17+
18+
# Encode the user input
19+
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
20+
21+
# Generate a response
22+
response_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
23+
response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
24+
25+
print(f"Bot: {response}")
26+

Transformers/Conversational_Model.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import openai
2+
3+
openai.api_key = 'your-api-key'
4+
5+
def generate_response(messages):
6+
response = openai.ChatCompletion.create(
7+
model="gpt-3.5-turbo",
8+
messages=messages,
9+
max_tokens=150
10+
)
11+
return response['choices'][0]['message']['content']
12+
13+
# Chatbot interaction loop
14+
print("AI Chatbot ready! Type 'quit' to exit.")
15+
chat_history = []
16+
17+
while True:
18+
user_input = input("You: ")
19+
if user_input.lower() == 'quit':
20+
print("Goodbye!")
21+
break
22+
23+
# Add user input to chat history
24+
chat_history.append({"role": "user", "content": user_input})
25+
26+
# Generate a response
27+
response = generate_response(chat_history)
28+
29+
# Add bot response to chat history
30+
chat_history.append({"role": "assistant", "content": response})
31+
32+
print(f"Bot: {response}")
33+

Transformers/Limit_chat.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from transformers import AutoModelForCausalLM, AutoTokenizer
2+
import torch
3+
4+
# Load a pretrained model and tokenizer
5+
model_name = "microsoft/DialoGPT-small"
6+
tokenizer = AutoTokenizer.from_pretrained(model_name)
7+
model = AutoModelForCausalLM.from_pretrained(model_name)
8+
9+
# Chatbot interaction loop
10+
print("AI Chatbot ready! Type 'quit' to exit.")
11+
chat_history = []
12+
13+
while True:
14+
user_input = input("You: ")
15+
if user_input.lower() == 'quit':
16+
print("Goodbye!")
17+
break
18+
19+
# Add user input to chat history, limit size to last 3 exchanges
20+
chat_history.append(user_input)
21+
if len(chat_history) > 6: # Each exchange includes both user and bot messages
22+
chat_history = chat_history[-6:]
23+
24+
# Join the chat history and encode it
25+
input_ids = tokenizer.encode(" ".join(chat_history) + tokenizer.eos_token, return_tensors='pt')
26+
27+
# Generate a response
28+
response_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
29+
response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
30+
31+
# Add bot response to chat history
32+
chat_history.append(response)
33+
34+
print(f"Bot: {response}")
35+

Transformers/bot.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from transformers import AutoModelForCausalLM, AutoTokenizer
2+
import torch
3+
4+
# Load a pretrained model and tokenizer
5+
model_name = "microsoft/DialoGPT-small"
6+
tokenizer = AutoTokenizer.from_pretrained(model_name)
7+
model = AutoModelForCausalLM.from_pretrained(model_name)
8+
9+
# Chatbot interaction loop
10+
print("AI Chatbot ready! Type 'quit' to exit.")
11+
chat_history = []
12+
13+
while True:
14+
user_input = input("You: ")
15+
if user_input.lower() == 'quit':
16+
print("Goodbye!")
17+
break
18+
19+
# Add user input to chat history
20+
chat_history.append(user_input)
21+
22+
# Join the chat history and encode it as input for the model
23+
input_ids = tokenizer.encode(" ".join(chat_history) + tokenizer.eos_token, return_tensors='pt')
24+
25+
# Generate a response
26+
response_ids = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
27+
response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
28+
29+
# Add bot response to chat history
30+
chat_history.append(response)
31+
32+
print(f"Bot: {response}")
33+

0 commit comments

Comments
 (0)