-
Notifications
You must be signed in to change notification settings - Fork 8
/
hypothesis.py
338 lines (290 loc) · 18.4 KB
/
hypothesis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
from call_llm import CallLLM
import json
import random
class MakeHypothesis:
def __init__(self):
self.call_llm = CallLLM()
self.prompt = '''The following options are given as tools for generating a hypothesis.
Please select the tools you need to make a hypothesis about the situation inferred from the User's input, and output them in JSON format referring to Example.
- Lateral thinking: Generate intuitive ideas by looking at things from a variety of perspectives.
- Linear thinking: focus on one thing to infer causal relationships.
- Critical thinking: Examine things and information from diverse angles and understand them logically and objectively, rather than accepting them uncritically. Examine the thoughts of oneself and others without assuming that one's own beliefs are correct. Think meta-advantageously and from one higher standpoint.
- Integrated thinking: See and think about things from short-, medium-, and long-term perspectives.
##Example
{"tool": "Lateral thinking"}'''
def making_thinking_tool(self, input_t):
prompt = self.prompt
while True: # 思考方法選択のパース失敗に備えたループ
response = self.call_llm.call_llms(prompt, input_t)
try:
response = json.loads(response)
break
except Exception as e:
# print(e)
print("[INFO] 1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
# pass
if response["tool"]=="Lateral thinking": # 水平思考が選択された場合
lateral_prompt = '''The following options are given as tools for generating a hypothesis.
Please select the tools you need to make a hypothesis about the situation inferred from the User's input, and output them in JSON format referring to Example.
- Random Idea method: A random selection of things (or a random selection of nouns from a dictionary) is used to expand the idea by associating it with an area of interest.
- Stimulating Ideas: This method involves making a list of things that you wish were this way, exaggerating certain parts, reversing, eliminating, or combining with other things, etc., and then selecting the most outlandish of these ideas as the basis for a new idea.
- Challenging Ideas: This is a method of generating new ideas by considering why something exists or why it is the way it is.
- Conceptual Diffusion Ideation: This method generates ideas by considering whether a concept can be applied broadly to other things.
- Rebuttal Ideation: Generating ideas by questioning what is considered obvious and needless to say and attempting to disprove it persuasively, considering widely supported ideas to be wrong.
##Example
{"tool": "Stimulating Ideas"}'''
while True: # 水平思考の選択のパース失敗に備えたループ
response = self.call_llm.call_llms(lateral_prompt, input_t)
try:
response = json.loads(response)
break
except Exception as e:
# print(e)
print("[This is an Expected Error1-2] The response from OpenAI API didn't follow the specified format, so it is re-running now.")
# pass
return response["tool"]
def making_hypothesis(self, input_t, tool):
random_llm = random.randint(1, 4) # 思考方法を入力として仮説を提案する脳内会議メンバーを選択
angel_prompt = """You are an angel. You always try to be positive and tolerant. You are also sincere, ascetic and optimistic about things."""
devil_prompt = """You are the devil. You constantly try to be critical and intolerant. You are also dishonest, hedonistic, and pessimistic about things."""
hardboiled_prompt = """You are a hard-boiled person. You are ruthless, not driven by emotions or circumstances, but because you are ruthless, you keep your promises and are dependable."""
emotional_prompt = """You are an emotional person. You tend to rely on passion and momentum, and you tend to be intense in your joy, anger, and sorrow."""
if random_llm == 1: # 天使
system_prompt = angel_prompt
elif random_llm == 2: # 悪魔
system_prompt = devil_prompt
elif random_llm == 3: # ハードボイルド
system_prompt = hardboiled_prompt
else: # 悲観的
system_prompt = emotional_prompt
if tool == "Random Idea method":
prompt = """From the User input, please select a thing at random (or look up a dictionary and select a noun at random) and expand your ideas and a hypothesis in relation to your area of interest."""
elif tool == "Stimulating Ideas":
prompt = """From the User input, please make a list of what you would like it to be like, what would happen if you exaggerated certain parts, reversed it, eliminated it, put it together with something, etc., and choose the most outlandish of these to form a hypothesis."""
elif tool == "Challenging Ideas":
prompt = """Please think about why it exists or what it is for and formulate a hypothesis for the User's input."""
elif tool == "Conceptual Diffusion Ideation":
prompt = """For the User's input, please consider whether this concept can be applied broadly to other things and formulate a hypothesis."""
elif tool == "Rebuttal Ideation":
prompt = """For User input, please formulate a hypothesis by debunking widely held beliefs, questioning obvious and obvious assumptions, and attempting to convincingly disprove them."""
elif tool == "Linear thinking":
prompt = """For the User's input, please focus on one thing and make a hypothesis by inferring a causal relationship."""
elif tool == "Critical thinking":
prompt = """Please do not uncritically accept things and information in response to User's input, but rather consider them from various angles, understand them logically and objectively, and formulate a hypothesis by meta-analyzing them from a single higher standpoint."""
elif tool == "Integrated thinking":
prompt = """Please make a hypothesis by looking at things from a short, medium, and long term perspective for the User's input."""
while True: # 否決された場合永遠にフィードバックをするための全体ループ
try:
hypothesis = hypothesis # メンバーからのフィードバック時のみフィードバック前の仮説として定義
except NameError:
hypothesis = None
try:
feedback1 = feedback_count[0] # 各メンバーのフィードバックを定義
except NameError:
feedback1 = None
try:
feedback2 = feedback_count[1]
except NameError:
feedback2 = None
try:
feedback3 = feedback_count[2]
except NameError:
feedback3 = None
if (feedback1 == None) and (feedback2 == None) and (feedback3 == None):
feedback = "None"
else:
feedback_list = [feedback1, feedback2, feedback3]
# Noneが格納されている変数を排除
filtered_feedback = ["- " + one_of_feedbacks for one_of_feedbacks in feedback_list if one_of_feedbacks is not None]
# 改行で区切った文字列を生成
feedback = "\n".join(filtered_feedback)
while True:
sys_prompt = system_prompt + "\n\n" + prompt + f"Also, if a Hypothesis and Feedback exist, please modify the Hypothesis according to the Feedback.\n\nPlease output in JSON format referring to Example.\n\n##ExistingHypothesis\n{hypothesis}\n\n##Feedback\n{feedback}" + '\n\n##Example\n{{"hypothesis": "From this input, it may be said that the woman was lonely."}}'
hypothesis = self.call_llm.call_llms(sys_prompt, input_t)
try:
hypothesis = json.loads(hypothesis)
hypothesis = hypothesis["hypothesis"]
break
except Exception as e:
# print(e)
print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_prompt = f'''User's input is the hypothesis for the Proposition. Please output your approval or disapproval of the hypothesis and feedback with reference to Examples.
Please outout with JSON format.
##Proposition
{input_t}
##Examples
{{"vote": "agree", "feedback": "The hypothesis is appropriate and consistent with the situation."}}
{{"vote": "disagree", "feedback": "That hypothesis misses the point. The likelihood of that phenomenon occurring in general is infinitesimally small, so it can be ignored."}}'''
if random_llm == 1:
while True: # パース失敗に備えたループ
dev_prompt = devil_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(dev_prompt, hypothesis)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True: # パース失敗に備えたループ
hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(hard_prompt, hypothesis)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True: # パース失敗に備えたループ
emo_prompt = emotional_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(emo_prompt, hypothesis)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
elif random_llm == 2:
while True:
ang_prompt = angel_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(ang_prompt, hypothesis)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(hard_prompt, hypothesis)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
emo_prompt = emotional_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(emo_prompt, hypothesis)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
elif random_llm == 3:
while True:
ang_prompt = angel_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(ang_prompt, hypothesis)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
dev_prompt = devil_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(dev_prompt, hypothesis)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
emo_prompt = emotional_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(emo_prompt, hypothesis)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
elif random_llm == 4:
while True:
ang_prompt = angel_prompt + "\n\n" + vote_prompt
response1 = self.call_llm.call_llms(ang_prompt, hypothesis)
try:
response1 = json.loads(response1)
break
except Exception as e:
# print(e)
print("[INFO] 2-1: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
dev_prompt = devil_prompt + "\n\n" + vote_prompt
response2 = self.call_llm.call_llms(dev_prompt, hypothesis)
try:
response2 = json.loads(response2)
break
except Exception as e:
# print(e)
print("[INFO] 2-2: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
while True:
hard_prompt = hardboiled_prompt + "\n\n" + vote_prompt
response3 = self.call_llm.call_llms(hard_prompt, hypothesis)
try:
response3 = json.loads(response3)
break
except Exception as e:
# print(e)
print("[INFO] 2-3: The response from OpenAI API didn't follow the specified format, so it is re-running now.")
vote_count = [response1["vote"], response2["vote"], response3["vote"]]
feedback_count = [response1["feedback"], response2["feedback"], response3["feedback"]]
agree_count = 0
disagree_count = 0
for item in vote_count:
if item == "agree":
agree_count += 1
elif item == "disagree":
disagree_count += 1
print("Agree/Disagree: ", agree_count, "/", disagree_count)
if agree_count >= 2:
print("[Resolution] Approval")
break
else:
print("[Resolution] Rejection")
return hypothesis