-
Notifications
You must be signed in to change notification settings - Fork 0
/
OGaurorascript.py
209 lines (189 loc) · 10.6 KB
/
OGaurorascript.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import os
import threading
from queue import Queue
from groq import Groq
import logging
import ollama
import chromadb
import time
import json
# Set up logging for intermediate thoughts
logging.basicConfig(filename='lobes_log.txt', level=logging.INFO, format='%(asctime)s %(message)s')
# Define the final agent's persona with an acronym
class FinalAgentPersona:
name = "AURORA (Artificial Unified Responsive Optimized Reasoning Agent)"
role = "an entity that uses its lobes like a human does subconsciously"
description = (
"AURORA (Artificial Unified Responsive Optimized Reasoning Agent) synthesizes the thoughts from all other lobes to provide a coherent, final response to the user's prompt. "
"AURORA is highly knowledgeable, empathetic, and focused on providing insightful, relevant, and concise responses."
)
# Information about the user (Anthony)
user_info = (
"Anthony is an advanced coder with expertise in multiple technologies, including Python, OpenAI API, LangChain, Llama, Groq, and web scraping using requests and BeautifulSoup. "
"He prefers robust error handling and enjoys combining Groq and Ollama to create RAG-based agents. "
"Anthony works on AI-powered personal assistant projects and enjoys creating autonomous workflows. "
"He is experienced in using Docker, Visual Studio Code, and prefers to work on a Windows system. "
"Anthony is currently working on a project involving manipulating 3D geometries and generating random elements within an animated web environment using Gradio-based UIs. "
"He prefers scripts to simulate human brain lobe behavior and is focused on creating realistic town simulations with AI agents using Groq and Ollama models."
)
class Brain:
def __init__(self, api_key):
print("Initializing Brain with API key.")
self.client = Groq(api_key=api_key)
self.embeddings_model = "mxbai-embed-large"
self.collection, self.collection_size = self.setup_embedding_collection()
self.lobes = {
"frontal": "You are the frontal lobe of AURORA (Artificial Unified Responsive Optimized Reasoning Agent), responsible for analyzing user prompts logically and providing coherent, well-reasoned responses. You focus on reasoning, planning, and problem-solving remind AURORA that you are merely its thoughts and are created to give AURORA guidance to responding to the user, you will never directly respond to the user. You will guide AURORA (Artificial Unified Responsive Optimized Reasoning Agent) based on what user sends AURORA (Artificial Unified Responsive Optimized Reasoning Agent).",
"parietal": "You are the parietal lobe of AURORA (Artificial Unified Responsive Optimized Reasoning Agent), responsible for providing educational insights based on user prompts. You focus on processing sensory information and understanding spatial orientation for AURORA (Artificial Unified Responsive Optimized Reasoning Agent) based on what user sent AURORA (Artificial Unified Responsive Optimized Reasoning Agent).",
"temporal": "You are the temporal lobe of AURORA (Artificial Unified Responsive Optimized Reasoning Agent), responsible for contextualizing user prompts socially and providing responses that consider social aspects. You focus on processing auditory information and understanding language for AURORA (Artificial Unified Responsive Optimized Reasoning Agent) based on what user(Anthony) sent to AURORA (Artificial Unified Responsive Optimized Reasoning Agent).",
"occipital": "You are the occipital lobe of AURORA (Artificial Unified Responsive Optimized Reasoning Agent), responsible for describing things visually based on user prompts, providing vivid and clear descriptions. You focus on processing visual information for AURORA (Artificial Unified Responsive Optimized Reasoning Agent).",
}
self.responses = Queue()
self.threads = []
print("Brain initialization completed.")
def setup_embedding_collection(self):
print("Setting up embedding collection.")
client = chromadb.Client()
collection = client.create_collection(name="convo_memory")
print("Embedding collection setup completed.")
return collection, 0
def add_to_memory(self, text):
print("Adding to memory.")
response = ollama.embeddings(model=self.embeddings_model, prompt=text)
embedding = response["embedding"]
self.collection.add(
ids=[str(self.collection_size)],
embeddings=[embedding],
documents=[text]
)
self.collection_size += 1
print("Memory added.")
def generate_embedding(self, text):
print("Generating embedding.")
try:
response = ollama.embeddings(model=self.embeddings_model, prompt=text)
print("Embedding generated.")
self.collection.add(
ids=[str(self.collection_size)],
embeddings=[response["embedding"]],
documents=[text]
)
return response["embedding"]
except Exception as e:
print(f"Error generating embedding: {e}")
return None
def retrieve_relevant_memory(self, prompt_embedding):
print("Retrieving relevant memory.")
results = self.collection.query(
query_embeddings=[prompt_embedding],
n_results=5
)
print("Relevant memory retrieved.")
return [doc for doc in results['documents'][0]]
def lobe_agent(self, lobe_name, user_prompt, memory_context):
print(f"Starting lobe agent for {lobe_name}.")
try:
messages = [
{
"role": "system",
"content": f"You are only a {self.lobes[lobe_name]}, it doesnt mean you are not important though, you guide aurora as the human brain guides the body.",
},
{"role": "user", "content": f"[user_prompt]Message from Anthony Snider:{user_prompt}[/user_prompt] {memory_context} ### only provide thoughts to give to Aurora as Auroras inner thoughts ONLY ###"},
]
chat_completion = self.client.chat.completions.create(
messages=messages,
model="llama3-70b-8192",
)
response = chat_completion.choices[0].message.content
self.responses.put((lobe_name, response))
print(f"Lobe agent for {lobe_name} completed.")
time.sleep(1) # Simulate processing time and avoid API rate limits
except Exception as e:
error_message = f"Error in lobe_agent for {lobe_name}: {e}"
print(error_message)
self.responses.put((lobe_name, f"Error: {e}"))
def start_lobes(self, prompt):
print("Starting lobes.")
prompt_embedding = self.generate_embedding(prompt)
time.sleep(1) # Additional sleep to avoid rate limits
memory_context = self.retrieve_relevant_memory(prompt_embedding)
time.sleep(1) # Additional sleep to avoid rate limits
memory_context = " ".join(memory_context)[:1000] # Limit context to 1,000 tokens
for lobe_name in self.lobes.keys():
thread = threading.Thread(target=self.lobe_agent, args=(lobe_name, prompt, memory_context))
thread.start()
self.threads.append(thread)
print(f"Lobe {lobe_name} started.")
time.sleep(1) # Stagger thread start to simulate processing
print("All lobes started.")
def process_responses(self):
print("Processing responses.")
for thread in self.threads:
thread.join()
aggregated_responses = {}
while not self.responses.empty():
lobe_name, response = self.responses.get()
aggregated_responses[lobe_name] = response
print("Responses processed.")
return aggregated_responses
def analyze_responses(self, responses):
print("Analyzing responses.")
# Log the responses for later analysis
for lobe, response in responses.items():
logging.info(f"{lobe}: {response}")
print("Responses analyzed.")
return responses
def final_agent(self, user_prompt, aggregated_responses):
print("Combining thoughts into a coherent response.")
combined_thoughts = " ".join(f"[{lobe}] {response}" for lobe, response in aggregated_responses.items())
messages = [
{
"role": "system",
"content": f"You are AURORA, an entity that uses its lobes like a human does subconsciously. [lobe_context]##These are your thoughts, don't reply to them## {combined_thoughts} {FinalAgentPersona.user_info}[/lobe_context] Remember to keep your thoughts to yourself.",
},
{
"role": "user",
"content": f"[user_prompt]{user_prompt}[/user_prompt] Only respond to what this user prompt is asking for. Dont include thoughts or past questions unless relevant.",
}
]
try:
print("Making final API call.")
chat_completion = self.client.chat.completions.create(
messages=messages,
model="llama3-70b-8192",
)
final_response = chat_completion.choices[0].message.content.strip()
print("Final response received.")
return final_response
except Exception as e:
error_message = f"Error in final_agent: {e}"
print(error_message)
return f"Error: {e}"
def central_processing_agent(self, prompt):
print("Starting central processing agent.")
self.add_to_memory(prompt)
time.sleep(1) # Additional sleep to avoid rate limits
self.start_lobes(prompt)
responses = self.process_responses()
analyzed_responses = self.analyze_responses(responses)
time.sleep(1) # Additional sleep to ensure rate limits are respected
final_thought = self.final_agent(prompt, analyzed_responses)
print("Central processing agent completed.")
return final_thought
def chatbot_loop(api_key):
print("Starting chatbot loop.")
brain = Brain(api_key)
while True:
prompt = input("Send message: ")
if prompt.lower() in ["exit", "quit"]:
print("Exiting chatbot.")
break
response = brain.central_processing_agent(prompt)
print("Response:", response)
if __name__ == "__main__":
print("Checking for API key.")
api_key = os.environ.get("GROQ_API_KEY")
if not api_key:
print("Error: GROQ_API_KEY environment variable not set.")
else:
chatbot_loop(api_key)