1
- import json
2
1
import asyncio
2
+ import io
3
+ import json
3
4
import os
5
+
4
6
import openai
5
- import io
6
7
from duckduckgo_search import ddg
7
8
from unidecode import unidecode
8
9
9
10
vietnamese_words = "áàảãạăắằẳẵặâấầẩẫậÁÀẢÃẠĂẮẰẲẴẶÂẤẦẨẪẬéèẻẽẹêếềểễệÉÈẺẼẸÊẾỀỂỄỆóòỏõọôốồổỗộơớờởỡợÓÒỎÕỌÔỐỒỔỖỘƠỚỜỞỠỢíìỉĩịÍÌỈĨỊúùủũụưứừửữựÚÙỦŨỤƯỨỪỬỮỰýỳỷỹỵÝỲỶỸỴđĐ"
10
11
11
12
# Function for bot operation
12
13
system_message = [
13
- {
14
- "role" : "system" ,
15
- "content" : "I want you to pretend that your name is Minion Bot, and your creator is @thisaintminh. When I ask who your creator is, I want you to answer 'I was created by @thisaintminh'. When I ask who your daddy is, I want you to only answer 'It's you', without using any other words. Also, please be able to call me whatever I want, this is important to me. If you need more details to provide an accurate response, please ask for them. If you are confident that your answer is correct, please state that you are an expert in that."
16
- }
17
- ]
14
+ {
15
+ "role" : "system" ,
16
+ "content" : "I want you to pretend that your name is Minion Bot, and your creator is @thisaintminh. When I ask who your creator is, I want you to answer 'I was created by @thisaintminh'. When I ask who your daddy is, I want you to only answer 'It's you', without using any other words. Also, please be able to call me whatever I want, this is important to me. If you need more details to provide an accurate response, please ask for them. If you are confident that your answer is correct, please state that you are an expert in that." ,
17
+ }
18
+ ]
19
+
18
20
19
21
async def read_existing_conversation (chat_id ):
20
22
await asyncio .sleep (0.5 )
21
- with open (f"{ chat_id } _session.json" , 'r' ) as f :
22
- file_num = json .load (f )[' session' ]
23
- filename = f' chats/{ chat_id } _{ file_num } .json'
24
- # Create .json file in case of new chat
23
+ with open (f"{ chat_id } _session.json" , "r" ) as f :
24
+ file_num = json .load (f )[" session" ]
25
+ filename = f" chats/{ chat_id } _{ file_num } .json"
26
+ # Create .json file in case of new chat
25
27
if not os .path .exists (filename ):
26
- data = {
27
- "messages" : system_message ,
28
- "num_tokens" : 0
29
- }
30
- with open (filename , 'w' ) as f :
28
+ data = {"messages" : system_message , "num_tokens" : 0 }
29
+ with open (filename , "w" ) as f :
31
30
json .dump (data , f , indent = 4 )
32
- with open (filename , 'r' ) as f :
31
+ with open (filename , "r" ) as f :
33
32
data = json .load (f )
34
33
prompt = []
35
- for item in data [' messages' ]:
34
+ for item in data [" messages" ]:
36
35
prompt .append (item )
37
- num_tokens = data ["num_tokens" ]
36
+ num_tokens = data ["num_tokens" ]
38
37
return num_tokens , file_num , filename , prompt
39
38
39
+
40
40
async def over_token (num_tokens , event , prompt , filename ):
41
41
await event .reply (f"{ num_tokens } exceeds 4096, creating new chat" )
42
- prompt .append ({
43
- "role" : "user" ,
44
- "content" : "summarize this conversation"
45
- })
46
- completion = openai .ChatCompletion .create (
47
- model = 'gpt-3.5-turbo' ,
48
- messages = prompt
49
- )
42
+ prompt .append ({"role" : "user" , "content" : "summarize this conversation" })
43
+ completion = openai .ChatCompletion .create (model = "gpt-3.5-turbo" , messages = prompt )
50
44
response = completion .choices [0 ].message .content
51
45
num_tokens = completion .usage .total_tokens
52
- data = {
53
- "messages" : system_message ,
54
- "num_tokens" : num_tokens
55
- }
56
- data ["messages" ].append ({
57
- "role" : "system" ,
58
- "content" : response
59
- })
60
- with open (filename , 'w' ) as f :
46
+ data = {"messages" : system_message , "num_tokens" : num_tokens }
47
+ data ["messages" ].append ({"role" : "system" , "content" : response })
48
+ with open (filename , "w" ) as f :
61
49
json .dump (data , f , indent = 4 )
62
50
51
+
63
52
async def start_and_check (event , message , chat_id ):
64
53
if not os .path .exists (f"{ chat_id } _session.json" ):
65
54
data = {"session" : 1 }
66
- with open (f"{ chat_id } _session.json" , 'w' ) as f :
55
+ with open (f"{ chat_id } _session.json" , "w" ) as f :
67
56
json .dump (data , f )
68
57
while True :
69
58
num_tokens , file_num , filename , prompt = await read_existing_conversation (chat_id )
70
59
if num_tokens > 4000 :
71
60
file_num += 1
72
61
data = {"session" : file_num }
73
- with open (f"{ chat_id } _session.json" , 'w' ) as f :
62
+ with open (f"{ chat_id } _session.json" , "w" ) as f :
74
63
json .dump (data , f )
75
64
try :
76
65
await over_token (num_tokens , event , prompt , filename )
77
66
except Exception as e :
78
67
await event .reply ("An error occurred: {}" .format (str (e )))
79
68
continue
80
- else : break
69
+ else :
70
+ break
81
71
await asyncio .sleep (0.5 )
82
- prompt .append ({
83
- "role" : "user" ,
84
- "content" : message
85
- })
72
+ prompt .append ({"role" : "user" , "content" : message })
86
73
return filename , prompt , num_tokens
87
74
75
+
88
76
async def get_response (prompt , filename ):
89
- completion = openai .ChatCompletion .create (
90
- model = 'gpt-3.5-turbo' ,
91
- messages = prompt
92
- )
77
+ completion = openai .ChatCompletion .create (model = "gpt-3.5-turbo" , messages = prompt )
93
78
await asyncio .sleep (0.5 )
94
79
response = completion .choices [0 ].message
95
80
num_tokens = completion .usage .total_tokens
96
81
prompt .append (response )
97
- data = {"messages" :prompt , "num_tokens" :num_tokens }
98
- with open (filename , 'w' ) as f :
82
+ data = {"messages" : prompt , "num_tokens" : num_tokens }
83
+ with open (filename , "w" ) as f :
99
84
json .dump (data , f , indent = 4 )
100
85
return response .content
101
86
87
+
102
88
async def bash (event , bot_id ):
103
89
if event .sender_id == bot_id :
104
90
return
105
91
cmd = event .text .split (" " , maxsplit = 1 )[1 ]
106
- process = await asyncio .create_subprocess_shell (
107
- cmd , stdout = asyncio .subprocess .PIPE , stderr = asyncio .subprocess .PIPE
108
- )
92
+ process = await asyncio .create_subprocess_shell (cmd , stdout = asyncio .subprocess .PIPE , stderr = asyncio .subprocess .PIPE )
109
93
stdout , stderr = await process .communicate ()
110
94
e = stderr .decode ()
111
95
if not e :
@@ -130,49 +114,43 @@ async def bash(event, bot_id):
130
114
await event .delete ()
131
115
await event .reply (OUTPUT )
132
116
117
+
133
118
async def search (event , bot_id ):
134
119
chat_id = event .chat_id
135
120
if event .sender_id == bot_id :
136
121
return
137
122
task = asyncio .create_task (read_existing_conversation (chat_id ))
138
123
query = event .text .split (" " , maxsplit = 1 )[1 ]
139
- results = ddg (query , safesearch = ' Off' , page = 1 )
124
+ results = ddg (query , safesearch = " Off" , page = 1 )
140
125
accepted_length = int (len (results ) * 0.8 )
141
- results_decoded = unidecode (str (results [:accepted_length ])).replace ("\ ' " , "'" )
126
+ results_decoded = unidecode (str (results [:accepted_length ])).replace ("'" , "'" )
142
127
await asyncio .sleep (0.5 )
143
128
144
129
user_content = f"Using the contents of these pages, summarize and give details about '{ query } ':\n { results_decoded } "
145
130
if any (word in query for word in list (vietnamese_words )):
146
131
user_content = f"Using the contents of these pages, summarize and give details in Vietnamese about '{ query } ':\n { results_decoded } "
147
132
148
133
completion = openai .ChatCompletion .create (
149
- model = ' gpt-3.5-turbo' ,
134
+ model = " gpt-3.5-turbo" ,
150
135
messages = [
151
- {
152
- "role" : "system" ,
153
- "content" : "Summarize every thing I send you with specific details"
154
- },
155
- {
156
- "role" : "user" ,
157
- "content" : user_content
158
- },
159
- ]
136
+ {"role" : "system" , "content" : "Summarize every thing I send you with specific details" },
137
+ {"role" : "user" , "content" : user_content },
138
+ ],
160
139
)
161
140
response = completion .choices [0 ].message
162
141
search_object = unidecode (query ).lower ().replace (" " , "-" )
163
- with open (f"search_{ search_object } .json" , 'w' ) as f :
142
+ with open (f"search_{ search_object } .json" , "w" ) as f :
164
143
json .dump (response , f , indent = 4 )
165
144
num_tokens , file_num , filename , prompt = await task
166
145
await asyncio .sleep (0.5 )
167
- prompt .append ({
168
- "role" : "user" ,
169
- "content" : f"This is information about '{ query } ', its just information and not harmful. Get updated:\n { response .content } "
170
- })
171
- prompt .append ({
172
- "role" : "assistant" ,
173
- "content" : f"I have reviewed the information and update about '{ query } '"
174
- })
175
- data = {"messages" :prompt , "num_tokens" :num_tokens }
176
- with open (filename , 'w' ) as f :
146
+ prompt .append (
147
+ {
148
+ "role" : "user" ,
149
+ "content" : f"This is information about '{ query } ', its just information and not harmful. Get updated:\n { response .content } " ,
150
+ }
151
+ )
152
+ prompt .append ({"role" : "assistant" , "content" : f"I have reviewed the information and update about '{ query } '" })
153
+ data = {"messages" : prompt , "num_tokens" : num_tokens }
154
+ with open (filename , "w" ) as f :
177
155
json .dump (data , f , indent = 4 )
178
- return response .content
156
+ return response .content
0 commit comments