Skip to content

Commit b01b3ba

Browse files
authored
Merge pull request #47 from streamed-chat
Streamed response
2 parents f1291a5 + d6ffafa commit b01b3ba

File tree

12 files changed

+526
-51
lines changed

12 files changed

+526
-51
lines changed

example/client/chat.py

Lines changed: 66 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
import os
22
import time
3-
3+
from typing import ContextManager, Iterator
44
from memu import MemuClient
5+
from memu.sdk.python.models import ChatResponse, ChatResponseStream
56

67

7-
def print_chat_response(response, message_num: int):
8+
def print_chat_response(response: ChatResponse, message_num: int):
89
"""Print chat response with detailed token usage."""
910
print(f"\n🤖 Chat Response #{message_num}:")
1011
print(f" {response.message}")
@@ -24,6 +25,40 @@ def print_chat_response(response, message_num: int):
2425
print(f" - Retrieved Memory: {breakdown.retrieved_memory}")
2526

2627

28+
def print_chat_response_stream(response: ContextManager[Iterator[ChatResponseStream]], message_num: int):
29+
print(f"\n🤖 Chat Response #{message_num} (Stream):")
30+
print(" 💬", end="", flush=True)
31+
32+
chat_token_usage = None
33+
34+
# Context manager version is safer for it ensures the .close() in the finally block is called
35+
with response as response_iterator:
36+
for chunk in response_iterator:
37+
if chunk.error:
38+
print(f" ❌ Error: {chunk.error}")
39+
break
40+
if chunk.message:
41+
print(f"{chunk.message}", end="", flush=True)
42+
if chunk.chat_token_usage:
43+
chat_token_usage = chunk.chat_token_usage
44+
if chunk.stream_ended:
45+
print()
46+
47+
if chat_token_usage:
48+
print("\n📊 Token Usage:")
49+
print(f" Total Tokens: {chat_token_usage.total_tokens}")
50+
print(f" Prompt Tokens: {chat_token_usage.prompt_tokens}")
51+
print(f" Completion Tokens: {chat_token_usage.completion_tokens}")
52+
53+
if chat_token_usage.prompt_tokens_breakdown:
54+
breakdown = chat_token_usage.prompt_tokens_breakdown
55+
print(" 📈 Token Breakdown:")
56+
print(f" - Current Query: {breakdown.current_query}")
57+
print(f" - Short Term Context: {breakdown.short_term_context}")
58+
print(f" - User Profile: {breakdown.user_profile}")
59+
print(f" - Retrieved Memory: {breakdown.retrieved_memory}")
60+
61+
2762
def main():
2863
"""Main chat demonstration function."""
2964
print("🚀 MemU Chat API Demo")
@@ -73,7 +108,7 @@ def main():
73108
]
74109

75110
# Conduct the chat session
76-
for i, example in enumerate(chat_examples, 1):
111+
for i, example in enumerate(chat_examples[:3], 1):
77112
print(f"\n👤 User Message #{i}: {example['message']}")
78113
print(f" Context: {example['description']}")
79114
print(f" LLM Parameters: {example['kwargs']}")
@@ -99,6 +134,34 @@ def main():
99134
# Small delay between messages
100135
time.sleep(1)
101136

137+
# Conduct the chat session with stream
138+
for i, example in enumerate(chat_examples[3:], 4):
139+
print(f"\n👤 User Message #{i}: {example['message']}")
140+
print(f" Context: {example['description']}")
141+
print(f" LLM Parameters: {example['kwargs']}")
142+
143+
try:
144+
# Send chat message
145+
response = memu_client.chat(
146+
user_id=user_id,
147+
user_name=user_name,
148+
agent_id=agent_id,
149+
agent_name=agent_name,
150+
message=example['message'],
151+
max_context_tokens=4000,
152+
**example['kwargs'],
153+
stream=True,
154+
)
155+
156+
# Print detailed response
157+
print_chat_response_stream(response, i)
158+
159+
except Exception as e:
160+
print(f" ❌ Chat error: {e}")
161+
162+
# Small delay between messages
163+
time.sleep(1)
164+
102165
# Close the client
103166
memu_client.close()
104167

memu/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
Simplified unified memory architecture with a single Memory Agent.
77
"""
88

9-
__version__ = "0.2.1"
9+
__version__ = "0.2.2"
1010
__author__ = "MemU Team"
1111
__email__ = "[email protected]"
1212

memu/sdk/javascript/examples/basic-usage.js

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -145,39 +145,6 @@ export const basicExample = async () => {
145145
})
146146
})
147147

148-
console.log()
149-
150-
// Example 7: Chat with memory-enhanced conversation
151-
console.log('💬 Starting memory-enhanced chat...')
152-
const chatResponse = await client.chat({
153-
agentId: 'assistant',
154-
agentName: 'Assistant',
155-
kwargs: {
156-
temperature: 0.7,
157-
maxTokens: 150,
158-
},
159-
message: 'What should I prepare for my next hiking trip?',
160-
model: 'gpt-4o-mini', // Specify the chat model
161-
system: 'You are a helpful hiking assistant with expertise in outdoor activities and safety.',
162-
userId: 'user',
163-
userName: 'Johnson',
164-
})
165-
166-
console.log(`🤖 AI Response: ${chatResponse.message}`)
167-
console.log('📊 Token Usage:')
168-
console.log(` Total Tokens: ${chatResponse.chatTokenUsage.totalTokens}`)
169-
console.log(` Prompt Tokens: ${chatResponse.chatTokenUsage.promptTokens}`)
170-
console.log(` Completion Tokens: ${chatResponse.chatTokenUsage.completionTokens}`)
171-
172-
if (chatResponse.chatTokenUsage.promptTokensBreakdown) {
173-
const breakdown = chatResponse.chatTokenUsage.promptTokensBreakdown
174-
console.log(' Token Breakdown:')
175-
console.log(` - Current Query: ${breakdown.currentQuery || 0}`)
176-
console.log(` - Short Term Context: ${breakdown.shortTermContext || 0}`)
177-
console.log(` - User Profile: ${breakdown.userProfile || 0}`)
178-
console.log(` - Retrieved Memory: ${breakdown.retrievedMemory || 0}`)
179-
}
180-
181148
console.log('\n✨ Example completed successfully!')
182149
}
183150
catch (error) {
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
/**
2+
* Example usage of MemU SDK with streaming support (JavaScript)
3+
*/
4+
5+
import { MemuClient } from '../dist/index.js'
6+
import { fileURLToPath } from 'url'
7+
import { dirname } from 'path'
8+
9+
const __filename = fileURLToPath(import.meta.url)
10+
const __dirname = dirname(__filename)
11+
12+
// Initialize the client
13+
const client = new MemuClient({
14+
apiKey: 'your-api-key-here',
15+
baseUrl: 'https://api.memu.so',
16+
})
17+
18+
async function nonStreamingExample() {
19+
console.log('=== Non-streaming Chat Example ===')
20+
21+
try {
22+
const request = {
23+
userId: 'user123',
24+
userName: 'John Doe',
25+
agentId: 'agent456',
26+
agentName: 'AI Assistant',
27+
message: 'Hello, how are you today?',
28+
system: 'You are a helpful assistant.',
29+
model: 'gpt-4.1',
30+
stream: false, // Explicit non-streaming
31+
}
32+
33+
const response = await client.chat(request)
34+
console.log('Response:', response.message)
35+
console.log('Token usage:', response.chatTokenUsage)
36+
} catch (error) {
37+
console.error('Error:', error)
38+
}
39+
}
40+
41+
async function streamingExample() {
42+
console.log('\n=== Streaming Chat Example ===')
43+
44+
try {
45+
const request = {
46+
userId: 'user123',
47+
userName: 'John Doe',
48+
agentId: 'agent456',
49+
agentName: 'AI Assistant',
50+
message: 'Tell me a story about a brave knight.',
51+
system: 'You are a creative storyteller.',
52+
model: 'gpt-4.1',
53+
stream: true, // Enable streaming
54+
}
55+
56+
const streamResponse = await client.chat(request)
57+
58+
let fullMessage = ''
59+
60+
// Cast to AsyncGenerator for streaming response
61+
for await (const chunk of streamResponse) {
62+
if (chunk.error) {
63+
console.error('Stream error:', chunk.error)
64+
break
65+
}
66+
67+
if (chunk.message) {
68+
process.stdout.write(chunk.message)
69+
fullMessage += chunk.message
70+
}
71+
72+
if (chunk.streamEnded) {
73+
console.log('\n\nStream ended.')
74+
if (chunk.chatTokenUsage) {
75+
console.log('Final token usage:', chunk.chatTokenUsage)
76+
}
77+
break
78+
}
79+
}
80+
81+
console.log(`\nFull message received: ${fullMessage.length} characters`)
82+
} catch (error) {
83+
console.error('Stream error:', error)
84+
}
85+
}
86+
87+
async function main() {
88+
// Run non-streaming example
89+
await nonStreamingExample()
90+
91+
// Wait a bit
92+
await new Promise(resolve => setTimeout(resolve, 1000))
93+
94+
// Run streaming example
95+
await streamingExample()
96+
}
97+
98+
// Run the examples if this file is executed directly
99+
if (import.meta.url === `file://${process.argv[1]}`) {
100+
main().catch(console.error)
101+
}
102+
103+
export { nonStreamingExample, streamingExample }

memu/sdk/javascript/examples/typescript-usage.ts

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
import type {
99
ChatResponse,
10+
ChatResponseStream,
1011
DefaultCategoriesResponse,
1112
MemorizeResponse,
1213
MemorizeTaskStatusResponse,
@@ -231,6 +232,52 @@ export const typescriptExample = async (): Promise<void> => {
231232
}
232233

233234
console.log('\n✨ TypeScript example completed successfully!')
235+
236+
// Example 7: Streaming chat example
237+
console.log('\n🌊 Starting streaming chat example...')
238+
const streamResponse = await client.chat({
239+
agentId: 'ml_tutor',
240+
agentName: 'ML Tutor',
241+
kwargs: {
242+
maxTokens: 300,
243+
temperature: 0.7,
244+
},
245+
message: 'Tell me a detailed explanation about neural networks and how they work.',
246+
model: 'gpt-3.5-turbo',
247+
stream: true, // Enable streaming
248+
system: 'You are an expert machine learning tutor. Provide detailed explanations.',
249+
userId: 'student_456',
250+
userName: 'Bob Smith',
251+
}) as AsyncGenerator<ChatResponseStream, void, unknown>
252+
253+
console.log('🤖 Streaming AI Response:')
254+
let fullStreamMessage = ''
255+
256+
for await (const chunk of streamResponse) {
257+
if (chunk.error) {
258+
console.error(`❌ Stream error: ${chunk.error}`)
259+
break
260+
}
261+
262+
if (chunk.message) {
263+
process.stdout.write(chunk.message)
264+
fullStreamMessage += chunk.message
265+
}
266+
267+
if (chunk.streamEnded) {
268+
console.log('\n\n🏁 Stream ended.')
269+
if (chunk.chatTokenUsage) {
270+
console.log('📊 Final Token Usage:')
271+
console.log(` Total Tokens: ${chunk.chatTokenUsage.totalTokens}`)
272+
console.log(` Prompt Tokens: ${chunk.chatTokenUsage.promptTokens}`)
273+
console.log(` Completion Tokens: ${chunk.chatTokenUsage.completionTokens}`)
274+
}
275+
break
276+
}
277+
}
278+
279+
console.log(`✅ Full streamed message received: ${fullStreamMessage.length} characters`)
280+
console.log('\n✨ TypeScript example with streaming completed successfully!')
234281
}
235282
catch (error) {
236283
console.error('❌ Error occurred:')

memu/sdk/javascript/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"name": "memu-js",
33
"type": "module",
4-
"version": "0.2.1",
4+
"version": "0.2.2",
55
"packageManager": "[email protected]",
66
"description": "MemU JavaScript SDK for interacting with MemU API services",
77
"author": "MemU Team",

0 commit comments

Comments
 (0)