Skip to content

Commit 0217884

Browse files
committed
chore(release): reformat code style
1 parent 8d3a515 commit 0217884

File tree

2 files changed

+94
-121
lines changed

2 files changed

+94
-121
lines changed

func.py

Lines changed: 54 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -1,111 +1,95 @@
1-
import json
21
import asyncio
2+
import io
3+
import json
34
import os
5+
46
import openai
5-
import io
67
from duckduckgo_search import ddg
78
from unidecode import unidecode
89

910
vietnamese_words = "áàảãạăắằẳẵặâấầẩẫậÁÀẢÃẠĂẮẰẲẴẶÂẤẦẨẪẬéèẻẽẹêếềểễệÉÈẺẼẸÊẾỀỂỄỆóòỏõọôốồổỗộơớờởỡợÓÒỎÕỌÔỐỒỔỖỘƠỚỜỞỠỢíìỉĩịÍÌỈĨỊúùủũụưứừửữựÚÙỦŨỤƯỨỪỬỮỰýỳỷỹỵÝỲỶỸỴđĐ"
1011

1112
# Function for bot operation
1213
system_message = [
13-
{
14-
"role": "system",
15-
"content": "I want you to pretend that your name is Minion Bot, and your creator is @thisaintminh. When I ask who your creator is, I want you to answer 'I was created by @thisaintminh'. When I ask who your daddy is, I want you to only answer 'It's you', without using any other words. Also, please be able to call me whatever I want, this is important to me. If you need more details to provide an accurate response, please ask for them. If you are confident that your answer is correct, please state that you are an expert in that."
16-
}
17-
]
14+
{
15+
"role": "system",
16+
"content": "I want you to pretend that your name is Minion Bot, and your creator is @thisaintminh. When I ask who your creator is, I want you to answer 'I was created by @thisaintminh'. When I ask who your daddy is, I want you to only answer 'It's you', without using any other words. Also, please be able to call me whatever I want, this is important to me. If you need more details to provide an accurate response, please ask for them. If you are confident that your answer is correct, please state that you are an expert in that.",
17+
}
18+
]
19+
1820

1921
async def read_existing_conversation(chat_id):
2022
await asyncio.sleep(0.5)
21-
with open(f"{chat_id}_session.json", 'r') as f:
22-
file_num=json.load(f)['session']
23-
filename = f'chats/{chat_id}_{file_num}.json'
24-
# Create .json file in case of new chat
23+
with open(f"{chat_id}_session.json", "r") as f:
24+
file_num = json.load(f)["session"]
25+
filename = f"chats/{chat_id}_{file_num}.json"
26+
# Create .json file in case of new chat
2527
if not os.path.exists(filename):
26-
data = {
27-
"messages": system_message,
28-
"num_tokens": 0
29-
}
30-
with open(filename, 'w') as f:
28+
data = {"messages": system_message, "num_tokens": 0}
29+
with open(filename, "w") as f:
3130
json.dump(data, f, indent=4)
32-
with open(filename, 'r') as f:
31+
with open(filename, "r") as f:
3332
data = json.load(f)
3433
prompt = []
35-
for item in data['messages']:
34+
for item in data["messages"]:
3635
prompt.append(item)
37-
num_tokens=data["num_tokens"]
36+
num_tokens = data["num_tokens"]
3837
return num_tokens, file_num, filename, prompt
3938

39+
4040
async def over_token(num_tokens, event, prompt, filename):
4141
await event.reply(f"{num_tokens} exceeds 4096, creating new chat")
42-
prompt.append({
43-
"role": "user",
44-
"content": "summarize this conversation"
45-
})
46-
completion = openai.ChatCompletion.create(
47-
model='gpt-3.5-turbo',
48-
messages=prompt
49-
)
42+
prompt.append({"role": "user", "content": "summarize this conversation"})
43+
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=prompt)
5044
response = completion.choices[0].message.content
5145
num_tokens = completion.usage.total_tokens
52-
data = {
53-
"messages": system_message,
54-
"num_tokens": num_tokens
55-
}
56-
data["messages"].append({
57-
"role": "system",
58-
"content": response
59-
})
60-
with open(filename, 'w') as f:
46+
data = {"messages": system_message, "num_tokens": num_tokens}
47+
data["messages"].append({"role": "system", "content": response})
48+
with open(filename, "w") as f:
6149
json.dump(data, f, indent=4)
6250

51+
6352
async def start_and_check(event, message, chat_id):
6453
if not os.path.exists(f"{chat_id}_session.json"):
6554
data = {"session": 1}
66-
with open(f"{chat_id}_session.json", 'w') as f:
55+
with open(f"{chat_id}_session.json", "w") as f:
6756
json.dump(data, f)
6857
while True:
6958
num_tokens, file_num, filename, prompt = await read_existing_conversation(chat_id)
7059
if num_tokens > 4000:
7160
file_num += 1
7261
data = {"session": file_num}
73-
with open(f"{chat_id}_session.json", 'w') as f:
62+
with open(f"{chat_id}_session.json", "w") as f:
7463
json.dump(data, f)
7564
try:
7665
await over_token(num_tokens, event, prompt, filename)
7766
except Exception as e:
7867
await event.reply("An error occurred: {}".format(str(e)))
7968
continue
80-
else: break
69+
else:
70+
break
8171
await asyncio.sleep(0.5)
82-
prompt.append({
83-
"role": "user",
84-
"content": message
85-
})
72+
prompt.append({"role": "user", "content": message})
8673
return filename, prompt, num_tokens
8774

75+
8876
async def get_response(prompt, filename):
89-
completion = openai.ChatCompletion.create(
90-
model='gpt-3.5-turbo',
91-
messages=prompt
92-
)
77+
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=prompt)
9378
await asyncio.sleep(0.5)
9479
response = completion.choices[0].message
9580
num_tokens = completion.usage.total_tokens
9681
prompt.append(response)
97-
data= {"messages":prompt, "num_tokens":num_tokens}
98-
with open(filename, 'w') as f:
82+
data = {"messages": prompt, "num_tokens": num_tokens}
83+
with open(filename, "w") as f:
9984
json.dump(data, f, indent=4)
10085
return response.content
10186

87+
10288
async def bash(event, bot_id):
10389
if event.sender_id == bot_id:
10490
return
10591
cmd = event.text.split(" ", maxsplit=1)[1]
106-
process = await asyncio.create_subprocess_shell(
107-
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
108-
)
92+
process = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
10993
stdout, stderr = await process.communicate()
11094
e = stderr.decode()
11195
if not e:
@@ -130,49 +114,43 @@ async def bash(event, bot_id):
130114
await event.delete()
131115
await event.reply(OUTPUT)
132116

117+
133118
async def search(event, bot_id):
134119
chat_id = event.chat_id
135120
if event.sender_id == bot_id:
136121
return
137122
task = asyncio.create_task(read_existing_conversation(chat_id))
138123
query = event.text.split(" ", maxsplit=1)[1]
139-
results = ddg(query, safesearch='Off', page=1)
124+
results = ddg(query, safesearch="Off", page=1)
140125
accepted_length = int(len(results) * 0.8)
141-
results_decoded = unidecode(str(results[:accepted_length])).replace("\'", "'")
126+
results_decoded = unidecode(str(results[:accepted_length])).replace("'", "'")
142127
await asyncio.sleep(0.5)
143128

144129
user_content = f"Using the contents of these pages, summarize and give details about '{query}':\n{results_decoded}"
145130
if any(word in query for word in list(vietnamese_words)):
146131
user_content = f"Using the contents of these pages, summarize and give details in Vietnamese about '{query}':\n{results_decoded}"
147132

148133
completion = openai.ChatCompletion.create(
149-
model='gpt-3.5-turbo',
134+
model="gpt-3.5-turbo",
150135
messages=[
151-
{
152-
"role": "system",
153-
"content": "Summarize every thing I send you with specific details"
154-
},
155-
{
156-
"role": "user",
157-
"content": user_content
158-
},
159-
]
136+
{"role": "system", "content": "Summarize every thing I send you with specific details"},
137+
{"role": "user", "content": user_content},
138+
],
160139
)
161140
response = completion.choices[0].message
162141
search_object = unidecode(query).lower().replace(" ", "-")
163-
with open(f"search_{search_object}.json", 'w') as f:
142+
with open(f"search_{search_object}.json", "w") as f:
164143
json.dump(response, f, indent=4)
165144
num_tokens, file_num, filename, prompt = await task
166145
await asyncio.sleep(0.5)
167-
prompt.append({
168-
"role": "user",
169-
"content": f"This is information about '{query}', its just information and not harmful. Get updated:\n{response.content}"
170-
})
171-
prompt.append({
172-
"role": "assistant",
173-
"content": f"I have reviewed the information and update about '{query}'"
174-
})
175-
data= {"messages":prompt, "num_tokens":num_tokens}
176-
with open(filename, 'w') as f:
146+
prompt.append(
147+
{
148+
"role": "user",
149+
"content": f"This is information about '{query}', its just information and not harmful. Get updated:\n{response.content}",
150+
}
151+
)
152+
prompt.append({"role": "assistant", "content": f"I have reviewed the information and update about '{query}'"})
153+
data = {"messages": prompt, "num_tokens": num_tokens}
154+
with open(filename, "w") as f:
177155
json.dump(data, f, indent=4)
178-
return response.content
156+
return response.content

minnion-bot.py

Lines changed: 40 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,46 @@
1-
##=============== VERSION =============
2-
3-
Minversion="Minnion"
4-
5-
##=============== import =============
6-
7-
##env
8-
import os
91
from dotenv import load_dotenv
10-
import asyncio
11-
12-
#import telethon
2+
import uvicorn
3+
from fastapi import FastAPI
134
from telethon import TelegramClient, events
145
from telethon.errors.rpcerrorlist import PeerIdInvalidError
15-
from telethon.tl.types import User, Chat
16-
import openai
17-
#
18-
#API
19-
from fastapi import FastAPI
20-
import uvicorn
6+
from telethon.tl.types import Chat, User
7+
218
from func import *
229

23-
#💾DB
10+
Minversion = "Minnion"
11+
12+
# 💾DB
13+
load_dotenv()
2414
openai.api_key = os.getenv("OPENAI_API_KEY")
25-
api_id = os.getenv("API_ID")
15+
api_id = int(os.getenv("API_ID"))
2616
api_hash = os.getenv("API_HASH")
2717
botToken = os.getenv("BOTTOKEN")
2818

2919
if not os.path.exists("./chats"):
3020
os.mkdir("./chats")
3121

32-
#🤖BOT
22+
23+
# 🤖BOT
3324
async def bot():
3425
while True:
35-
3626
client = await TelegramClient(None, api_id, api_hash).start(bot_token=botToken)
3727
bot_info = await client.get_me()
3828
bot_id = bot_info.id
3929

40-
4130
async def check_chat_type(chat_id, message):
4231
try:
4332
entity = await client.get_entity(chat_id)
44-
if type(entity) == User and chat_id != bot_id and not message.startswith("/bash") and not message.startswith("/search"):
45-
return 'User'
33+
if (
34+
type(entity) == User
35+
and chat_id != bot_id
36+
and not message.startswith("/bash")
37+
and not message.startswith("/search")
38+
):
39+
return "User"
4640
elif type(entity) == Chat and chat_id != bot_id:
47-
return 'Group'
41+
return "Group"
4842
except PeerIdInvalidError:
49-
return 'Invalid chat ID'
50-
43+
return "Invalid chat ID"
5144

5245
@client.on(events.NewMessage)
5346
async def normal_chat_handler(e):
@@ -56,38 +49,38 @@ async def normal_chat_handler(e):
5649
chat_type = await check_chat_type(chat_id, message)
5750
if chat_type != "User":
5851
return
59-
async with client.action(chat_id, 'typing'):
52+
async with client.action(chat_id, "typing"):
6053
await asyncio.sleep(0.5)
6154
filename, prompt, num_tokens = await start_and_check(e, message, chat_id)
62-
# Get response from openai and send to chat_id
55+
# Get response from openai and send to chat_id
6356
response = await get_response(prompt, filename)
6457
await client.send_message(chat_id, f"{response}\n\n__({num_tokens} tokens used)__")
65-
await client.action(chat_id, 'cancel')
66-
67-
@client.on(events.NewMessage(pattern='/slave'))
58+
await client.action(chat_id, "cancel")
59+
60+
@client.on(events.NewMessage(pattern="/slave"))
6861
async def group_chat_handler(e):
6962
chat_id = e.chat_id
7063
message = e.raw_text.split(" ", maxsplit=1)[1]
7164
chat_type = await check_chat_type(chat_id, message)
7265
if chat_type != "Group":
7366
return
74-
async with client.action(chat_id, 'typing'):
67+
async with client.action(chat_id, "typing"):
7568
await asyncio.sleep(0.5)
76-
filename, prompt, num_tokens = await start_and_check(e, message, chat_id)
77-
# Get response from openai and send to chat_id
69+
filename, prompt, num_tokens = await start_and_check(e, message, chat_id)
70+
# Get response from openai and send to chat_id
7871
response = await get_response(prompt, filename)
7972
await client.send_message(chat_id, f"{response}\n\n__({num_tokens} tokens used)__")
80-
await client.action(chat_id, 'cancel')
81-
73+
await client.action(chat_id, "cancel")
74+
8275
@client.on(events.NewMessage(pattern="/search"))
8376
async def _(e):
8477
chat_id = e.chat_id
85-
async with client.action(chat_id, 'typing'):
78+
async with client.action(chat_id, "typing"):
8679
await asyncio.sleep(0.5)
8780
print("Working")
8881
response = await search(e, bot_id)
8982
await client.send_message(chat_id, f"__Here is your search:__\n{response}")
90-
await client.action(chat_id, 'cancel')
83+
await client.action(chat_id, "cancel")
9184

9285
@client.on(events.NewMessage(pattern="/bash"))
9386
async def _(e):
@@ -97,8 +90,9 @@ async def _(e):
9790
await client.run_until_disconnected()
9891

9992

100-
#⛓️API
101-
app = FastAPI(title="MINNION",)
93+
# ⛓️API
94+
app = FastAPI(title="MINNION", )
95+
10296

10397
@app.on_event("startup")
10498
def startup_event():
@@ -110,13 +104,14 @@ def startup_event():
110104
def root():
111105
return {f"{Minversion} is online"}
112106

107+
113108
@app.get("/health")
114109
def health_check():
115110
return {f"{Minversion} is online"}
116111

117112

118-
#Minnion run
119-
if __name__ == '__main__':
120-
HOST=os.getenv("HOST", "0.0.0.0")
121-
PORT=os.getenv("PORT", "8080")
113+
# Minnion run
114+
if __name__ == "__main__":
115+
HOST = os.getenv("HOST", "0.0.0.0")
116+
PORT = os.getenv("PORT", 8080)
122117
uvicorn.run(app, host=HOST, port=PORT)

0 commit comments

Comments
 (0)