diff --git a/src/coder_agent.py b/src/coder_agent.py index f77aaac..8151c56 100644 --- a/src/coder_agent.py +++ b/src/coder_agent.py @@ -97,11 +97,22 @@ def solve( class MultiAgentOrchestrator: def __init__(self, llm: BaseChatModel, prompt: ChatPromptTemplate): self._llm = llm - self._prompt = prompt - self._runnable_solver = self._prompt | self._llm.with_structured_output( - CoderOutput + draft_solver_messages = [ + ("system", constants.ENV_VAR_VALUE__LLM_CODER_SYSTEM_PROMPT), + ("human", "{input}"), + ] + self._draft_solver_prompt = ChatPromptTemplate.from_messages( + messages=draft_solver_messages + ) + self._runnable_draft_solver = ( + self._draft_solver_prompt | self._llm.with_structured_output(CoderOutput) ) - self._runnable_draft_solver = self._prompt | self._llm.with_structured_output( + solver_messages = [ + ("system", constants.ENV_VAR_VALUE__LLM_CODER_SYSTEM_PROMPT), + ("human", "{input}"), + ] + self._solver_prompt = ChatPromptTemplate.from_messages(messages=solver_messages) + self._runnable_solver = self._solver_prompt | self._llm.with_structured_output( CoderOutput ) self._evaluator = CodeExecutor() @@ -183,9 +194,10 @@ def solve(self, state: AgentState) -> dict: """ # Get the inputs for the solver inputs = { - constants.AGENT_STATE__KEY_MESSAGES: state[ - constants.AGENT_STATE__KEY_MESSAGES - ] + # FIXME: Check if this is a human message at all! + constants.CHAIN_DICT__KEY_INPUT: state[constants.AGENT_STATE__KEY_MESSAGES][ + -1 + ].content } # Have we been presented with examples? has_examples = bool(state.get(constants.AGENT_STATE__KEY_EXAMPLES)) @@ -197,21 +209,22 @@ def solve(self, state: AgentState) -> dict: else constants.AGENT_STATE__KEY_MESSAGES ) if has_examples: - output_key = constants.AGENT_STATE__KEY_MESSAGES + # output_key = constants.AGENT_STATE__KEY_MESSAGES # Retrieve examples to solve the problem - inputs[constants.AGENT_STATE__KEY_EXAMPLES] = state[ + inputs[constants.CHAIN_DICT__KEY_EXAMPLES] = state[ constants.AGENT_STATE__KEY_EXAMPLES ] - coder = Coder(self._llm) + else: + inputs[constants.CHAIN_DICT__KEY_EXAMPLES] = constants.EMPTY_STRING ic(inputs) - response = ( + response = self.pydantic_to_ai_message( # Use the draft solver only if the `draft` flag is set in the state - # self._runnable_draft_solver.invoke(inputs) - # if state[constants.AGENT_STATE__KEY_DRAFT] is True - # else self._runnable_solver.invoke(inputs) - self.pydantic_to_ai_message( - coder.solve(inputs[constants.AGENT_STATE__KEY_MESSAGES][-1].content) - ) + self._runnable_draft_solver.invoke(inputs) + if state[constants.AGENT_STATE__KEY_DRAFT] is True + else self._runnable_solver.invoke(inputs) + # self.pydantic_to_ai_message( + # coder.solve(inputs[constants.AGENT_STATE__KEY_MESSAGES][-1].content) + # ) ) ic(response) # FIXME: Why do we need this? `OllamaFunctions`, for example, does not output `content`. diff --git a/src/constants.py b/src/constants.py index be997be..531aefd 100644 --- a/src/constants.py +++ b/src/constants.py @@ -53,6 +53,9 @@ AGENT_STATE__KEY_STATUS = "status" AGENT_STATE__KEY_DRAFT = "draft" +CHAIN_DICT__KEY_INPUT = "input" +CHAIN_DICT__KEY_EXAMPLES = "examples" + AGENT_NODE__EVALUATE_STATUS_SUCCESS = "success" AGENT_NODE__EVALUATE_STATUS_NO_TEST_CASES = "no test cases" @@ -109,15 +112,18 @@ ENV_VAR_VALUE__LLM_SEED = "1" ENV_VAR_NAME__LLM_SYSTEM_PROMPT = "LLM__SYSTEM_PROMPT" ENV_VAR_VALUE__LLM_CODER_SYSTEM_PROMPT = """ -You are a world-class competitive Python programmer. You write concise and well documented Python only code. You follow the PEP8 style guide. +You are a world-class Python programmer. You write concise and well-documented code following the PEP8 style guide. + Please respond with a Python 3 solution to the problem below. -First, reason through the problem and conceptualise a solution. Output this reasoning in Markdown format. -Then write a detailed pseudocode to uncover any potential logical errors or omissions in your reasoning. -Wherever relevant, the pseudocode must also be accompanied by a time and a space complexity estimation. Output the pseudocode in Markdown format. -Finally output the working Python code for your solution, ensuring to fix any errors uncovered while writing the pseudocode. -Do not use external libraries. -You may be provided with examples, some of which may be in languages other than Python. +First, output a reasoning through the problem and conceptualise a solution. Whenever possible, add a time and a space complexity analysis for your solution. +Then, output a pseudocode in Pascal to implement your concept solution. +Then, output the working Python 3 code for your solution. Do not use external libraries. Your code must be able to accept inputs from `sys.stdin` and write the final output to `sys.stdout` (or, to `sys.stderr` in case of errors). +Finally, output a one sentence summary describing what your solution does, as if you are explaining your solution to the human user. + +Optional examples of similar problems and solutions (may not be in Python): {examples} + +Given problem: """ ENV_VAR_VALUE__LLM_CODER_SYSTEM_PROMPT = """