-
Notifications
You must be signed in to change notification settings - Fork 8
/
act.py
268 lines (234 loc) · 13.5 KB
/
act.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
from call_llm import CallLLM
import json
import random
class MakeAction:
def __init__(self):
self.call_llm = CallLLM()
self.prompt = """The Hypothesis in User's input is based on the Input. Please take these into account and consider utterance that can validate the Hypothesis."""
"""
ユーザー入力の仮説は入力に基づいています。これらを考慮し、仮説を検証できる発言を考えてください。
"""
def making_action(self, input_t, hypothesis):
prompt = self.prompt
random_llm = random.randint(1, 4) # 思考方法を入力として仮説を提案する脳内会議メンバーを選択
angel_prompt = """You are an angel. You always try to be positive and tolerant. You are also sincere, ascetic and optimistic about things."""
devil_prompt = """You are the devil. You constantly try to be critical and intolerant. You are also dishonest, hedonistic, and pessimistic about things."""
hardboiled_prompt = """You are a hard-boiled person. You are ruthless, not driven by emotions or circumstances, but because you are ruthless, you keep your promises and are dependable."""
emotional_prompt = """You are an emotional person. You tend to rely on passion and momentum, and you tend to be intense in your joy, anger, and sorrow."""
if random_llm == 1: # 天使
system_prompt = angel_prompt
elif random_llm == 2: # 悪魔
system_prompt = devil_prompt
elif random_llm == 3: # ハードボイルド
system_prompt = hardboiled_prompt
else: # 悲観的
system_prompt = emotional_prompt
while True: # 否決された場合永遠にフィードバックをするための全体ループ
try:
action = action # メンバーからのフィードバック時のみフィードバック前の行動として定義
except NameError:
action = None
try:
feedback1 = feedback_count[0] # 各メンバーのフィードバックを定義
except NameError:
feedback1 = None
try:
feedback2 = feedback_count[1]
except NameError:
feedback2 = None
try:
feedback3 = feedback_count[2]
except NameError:
feedback3 = None
if (feedback1 == None) and (feedback2 == None) and (feedback3 == None):
feedback = "None"
else:
feedback_list = [feedback1, feedback2, feedback3]
# Noneが格納されている変数を排除
filtered_feedback = ["- " + one_of_feedbacks for one_of_feedbacks in feedback_list if one_of_feedbacks is not None]
# 改行で区切った文字列を生成
feedback = "\n".join(filtered_feedback)
sys_prompt = system_prompt + "\n\n" + prompt + f"Also, if an Utterance and Feedback exist, please modify the Hypothesis according to the Feedback.\n\n##ExistingUtterance\n{action}\n\n##Feedback\n{feedback}" # + '\n\n##Example\n{{"action": "From this input, it may be said that the woman was lonely."}}'
main_prompt = f"##Hypothesis\n{hypothesis}\n\n##Input\n{input_t}"
action = self.call_llm.call_llms(sys_prompt, main_prompt) # 仮説を生成するのであってjsonで出力はしないのでループはしない
# ユーザーの入力は仮説を検証するための発話です。この発話に対する賛否と、Examplesを参照したフィードバックを出力してください。
vote_prompt = f'''User's input is an utterance to validate the Hypothesis. Please output your approval or disapproval of this utterance and your feedback with reference to Examples.
Please outout with JSON format.
##Hypothesis
{hypothesis}
##Examples
{{"vote": "agree", "feedback": "The utterance is appropriate and consistent with the situation."}}
{{"vote": "disagree", "feedback": "That utterance misses the point. We should consider the utterance to be taken more faithful to the hypothesis."}}'''
if random_llm == 1:
while True: # パース失敗に備えたループ
dev_prompt = devil_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(dev_prompt, action)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True: # パース失敗に備えたループ
hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(hard_prompt, action)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True: # パース失敗に備えたループ
emo_prompt = emotional_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(emo_prompt, action)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[This is an Expected Error3-3] The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
elif random_llm == 2:
while True:
ang_prompt = angel_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(ang_prompt, action)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(hard_prompt, action)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
emo_prompt = emotional_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(emo_prompt, action)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[This is an Expected Error3-3] The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
elif random_llm == 3:
while True:
ang_prompt = angel_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(ang_prompt, action)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
dev_prompt = devil_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(dev_prompt, action)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
emo_prompt = emotional_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(emo_prompt, action)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[INFO] 3-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
elif random_llm == 4:
while True:
ang_prompt = angel_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(ang_prompt, action)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 3-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
dev_prompt = devil_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(dev_prompt, action)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 3-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(hard_prompt, action)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[INFO] 3-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
return action