Skip to content

Commit d51cd0b

Browse files
authored
Add files via upload
1 parent 2f0981b commit d51cd0b

File tree

3 files changed

+118
-0
lines changed

3 files changed

+118
-0
lines changed

mcp/testclient.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import asyncio
2+
3+
from agents import Agent, Runner, gen_trace_id, trace
4+
from agents.mcp import MCPServer, MCPServerSse
5+
from agents.model_settings import ModelSettings
6+
7+
# load dotenv
8+
from dotenv import load_dotenv
9+
load_dotenv()
10+
11+
async def run(mcp_server: MCPServer):
12+
agent = Agent(
13+
name="Assistant",
14+
instructions="Use the tools to answer the questions.",
15+
mcp_servers=[mcp_server],
16+
model_settings=ModelSettings(tool_choice="required"),
17+
model="gpt-4o",
18+
)
19+
20+
21+
# Test tool call
22+
while True:
23+
# Get user input
24+
message = input("User>> ")
25+
if message == "exit":
26+
return "bye"
27+
else:
28+
#print(f"Running: {message}")
29+
result = await Runner.run(starting_agent=agent, input=message)
30+
print(f"gpt4o>> {result.final_output}")
31+
32+
async def main():
33+
async with MCPServerSse(
34+
name="wttr.in python mcp sse server",
35+
params={
36+
"url": "http://10.0.0.4:7860/sse",
37+
},
38+
cache_tools_list=True,
39+
) as server:
40+
trace_id = gen_trace_id()
41+
with trace(workflow_name="wttr.in sse Trace", trace_id=trace_id):
42+
print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n")
43+
await run(server)
44+
45+
46+
if __name__ == "__main__":
47+
asyncio.run(main())

mcp/testserver.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
from mcp.server.fastmcp import FastMCP
2+
3+
# Create an MCP server instance named "hello"
4+
mcp = FastMCP("hello",host="10.0.0.4", port=7860)
5+
6+
# Define a tool called "hello" that returns a simple greeting
7+
@mcp.tool()
8+
def hello() -> str:
9+
"""Returns a simple greeting."""
10+
return "Hello, World!"
11+
12+
if __name__ == "__main__":
13+
# Start the MCP server using stdio transport (ideal for integration with Cursor)
14+
print("Starting MCP server...")
15+
mcp.run(transport="sse")

mcp/wttr_chat_client.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
import asyncio
2+
3+
from agents import Agent, Runner, gen_trace_id, trace
4+
from agents.mcp import MCPServer, MCPServerSse
5+
from agents.model_settings import ModelSettings
6+
7+
# load dotenv
8+
from dotenv import load_dotenv
9+
load_dotenv()
10+
11+
async def run(mcp_server: MCPServer):
12+
travel_expert_agent = Agent(
13+
name="Travel Expert Assistant",
14+
instructions="You are a travel expert.",
15+
model="gpt-4o",
16+
)
17+
18+
wttr_agent = Agent(
19+
name="wttr Assistant",
20+
instructions="Use the tools to answer the questions.",
21+
mcp_servers=[mcp_server],
22+
model_settings=ModelSettings(tool_choice="required"),
23+
model="gpt-4o",
24+
)
25+
26+
triage_agent = Agent(
27+
name="Triage Agent",
28+
instructions="You determine which agent to use based on the user's input.",
29+
handoffs=[wttr_agent, travel_expert_agent]
30+
)
31+
# Test tool call
32+
while True:
33+
# Get user input
34+
message = input("User>> ")
35+
if message == "exit":
36+
return "bye"
37+
else:
38+
result = await Runner.run(starting_agent=triage_agent, input=message)
39+
print(f"gpt4o>> {result.final_output}")
40+
41+
async def main():
42+
async with MCPServerSse(
43+
name="wttr python mcp sse server",
44+
params={
45+
"url": "http://10.0.0.4:7860/sse",
46+
},
47+
cache_tools_list=True,
48+
) as server:
49+
trace_id = gen_trace_id()
50+
with trace(workflow_name="wttr sse Trace", trace_id=trace_id):
51+
print(f"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n")
52+
await run(server)
53+
54+
55+
if __name__ == "__main__":
56+
asyncio.run(main())

0 commit comments

Comments
 (0)