Skip to content

Commit d6e6c8d

Browse files
danielchalefBrice Maciasbricemacias
authored
feat: Configurable Summarizer Prompt (#211)
* custom summary prompt in env functionality * using config instead of env * light restructuring; add tests --------- Co-authored-by: Brice Macias <[email protected]> Co-authored-by: Leo <[email protected]>
1 parent ad59ade commit d6e6c8d

File tree

5 files changed

+212
-23
lines changed

5 files changed

+212
-23
lines changed

config.yaml

+39
Original file line numberDiff line numberDiff line change
@@ -70,3 +70,42 @@ data:
7070
purge_every: 60
7171
log:
7272
level: "info"
73+
# Custom Prompts Configuration
74+
# Allows customization of extractor prompts.
75+
custom_prompts:
76+
summarizer_prompts:
77+
# Anthropic Guidelines:
78+
# - Use XML-style tags like <current_summary> as element identifiers.
79+
# - Include {{.PrevSummary}} and {{.MessagesJoined}} as template variables.
80+
# - Clearly explain model instructions, e.g., "Review content inside <current_summary></current_summary> tags".
81+
# - Provide a clear example within the prompt.
82+
#
83+
# Example format:
84+
# anthropic: |
85+
# <YOUR INSTRUCTIONS HERE>
86+
# <example>
87+
# <PROVIDE AN EXAMPLE>
88+
# </example>
89+
# <current_summary>{{.PrevSummary}}</current_summary>
90+
# <new_lines>{{.MessagesJoined}}</new_lines>
91+
# Response without preamble.
92+
#
93+
# If left empty, the default Anthropic summary prompt from zep/pkg/extractors/prompts.go will be used.
94+
anthropic: |
95+
96+
# OpenAI summarizer prompt configuration.
97+
# Guidelines:
98+
# - Include {{.PrevSummary}} and {{.MessagesJoined}} as template variables.
99+
# - Provide a clear example within the prompt.
100+
#
101+
# Example format:
102+
# openai: |
103+
# <YOUR INSTRUCTIONS HERE>
104+
# Example:
105+
# <PROVIDE AN EXAMPLE>
106+
# Current summary: {{.PrevSummary}}
107+
# New lines of conversation: {{.MessagesJoined}}
108+
# New summary:`
109+
#
110+
# If left empty, the default OpenAI summary prompt from zep/pkg/extractors/prompts.go will be used.
111+
openai: |

config/models.go

+20-10
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,17 @@ package config
33
// Config holds the configuration of the application
44
// Use cmd.NewConfig to create a new instance
55
type Config struct {
6-
LLM LLM `mapstructure:"llm"`
7-
NLP NLP `mapstructure:"nlp"`
8-
Memory MemoryConfig `mapstructure:"memory"`
9-
Extractors ExtractorsConfig `mapstructure:"extractors"`
10-
Store StoreConfig `mapstructure:"store"`
11-
Server ServerConfig `mapstructure:"server"`
12-
Log LogConfig `mapstructure:"log"`
13-
Auth AuthConfig `mapstructure:"auth"`
14-
DataConfig DataConfig `mapstructure:"data"`
15-
Development bool `mapstructure:"development"`
6+
LLM LLM `mapstructure:"llm"`
7+
NLP NLP `mapstructure:"nlp"`
8+
Memory MemoryConfig `mapstructure:"memory"`
9+
Extractors ExtractorsConfig `mapstructure:"extractors"`
10+
Store StoreConfig `mapstructure:"store"`
11+
Server ServerConfig `mapstructure:"server"`
12+
Log LogConfig `mapstructure:"log"`
13+
Auth AuthConfig `mapstructure:"auth"`
14+
DataConfig DataConfig `mapstructure:"data"`
15+
Development bool `mapstructure:"development"`
16+
CustomPrompts CustomPromptsConfig `mapstructure:"custom_prompts"`
1617
}
1718

1819
type StoreConfig struct {
@@ -96,6 +97,15 @@ type SummarizerConfig struct {
9697
Enabled bool `mapstructure:"enabled"`
9798
}
9899

100+
type CustomPromptsConfig struct {
101+
SummarizerPrompts ExtractorPromptsConfig `mapstructure:"summarizer_prompts"`
102+
}
103+
104+
type ExtractorPromptsConfig struct {
105+
OpenAI string `mapstructure:"openai"`
106+
Anthropic string `mapstructure:"anthropic"`
107+
}
108+
99109
type EmbeddingsConfig struct {
100110
Enabled bool `mapstructure:"enabled"`
101111
Dimensions int `mapstructure:"dimensions"`

pkg/extractors/prompts.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ type IntentPromptTemplateData struct {
1616
Input string
1717
}
1818

19-
const summaryPromptTemplateAnthropic = `
19+
const defaultSummaryPromptTemplateAnthropic = `
2020
Review the Current Summary inside <current_summary></current_summary> XML tags,
2121
and the New Lines of the provided conversation inside the <new_lines></new_lines> XML tags. Create a concise summary
2222
of the conversation, adding from the <new_lines> to the <current_summary>.
@@ -47,7 +47,7 @@ singer and lists the founding members as Jimmy Page, John Paul Jones, and John B
4747
Provide a response immediately without preamble.
4848
`
4949

50-
const summaryPromptTemplateOpenAI = `
50+
const defaultSummaryPromptTemplateOpenAI = `
5151
Review the Current Content, if there is one, and the New Lines of the provided conversation. Create a concise summary
5252
of the conversation, adding from the New Lines to the Current summary.
5353
If the New Lines are meaningless, return the Current Content.

pkg/extractors/summarizer.go

+47-11
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,22 @@ func processOverLimitMessages(
250250
}, nil
251251
}
252252

253+
func validateSummarizerPrompt(prompt string) error {
254+
prevSummaryIdentifier := "{{.PrevSummary}}"
255+
messagesJoinedIdentifier := "{{.MessagesJoined}}"
256+
257+
isCustomPromptValid := strings.Contains(prompt, prevSummaryIdentifier) &&
258+
strings.Contains(prompt, messagesJoinedIdentifier)
259+
260+
if !isCustomPromptValid {
261+
return fmt.Errorf(
262+
"wrong summary prompt format. please make sure it contains the identifiers %s and %s",
263+
prevSummaryIdentifier, messagesJoinedIdentifier,
264+
)
265+
}
266+
return nil
267+
}
268+
253269
// incrementalSummarizer takes a slice of messages and a summary, calls the LLM,
254270
// and returns a new summary enriched with the messages content. Summary can be
255271
// an empty string. Returns a string with the new summary and the number of
@@ -276,17 +292,7 @@ func incrementalSummarizer(
276292
MessagesJoined: messagesJoined,
277293
}
278294

279-
var summaryPromptTemplate string
280-
switch appState.Config.LLM.Service {
281-
case "openai":
282-
summaryPromptTemplate = summaryPromptTemplateOpenAI
283-
case "anthropic":
284-
summaryPromptTemplate = summaryPromptTemplateAnthropic
285-
default:
286-
return "", 0, fmt.Errorf("unknown LLM service: %s", appState.Config.LLM.Service)
287-
}
288-
289-
progressivePrompt, err := internal.ParsePrompt(summaryPromptTemplate, promptData)
295+
progressivePrompt, err := generateProgressiveSummarizerPrompt(appState, promptData)
290296
if err != nil {
291297
return "", 0, err
292298
}
@@ -309,3 +315,33 @@ func incrementalSummarizer(
309315

310316
return summary, tokensUsed, nil
311317
}
318+
319+
func generateProgressiveSummarizerPrompt(appState *models.AppState, promptData SummaryPromptTemplateData) (string, error) {
320+
customSummaryPromptTemplateAnthropic := appState.Config.CustomPrompts.SummarizerPrompts.Anthropic
321+
customSummaryPromptTemplateOpenAI := appState.Config.CustomPrompts.SummarizerPrompts.OpenAI
322+
323+
var summaryPromptTemplate string
324+
switch appState.Config.LLM.Service {
325+
case "openai":
326+
if customSummaryPromptTemplateOpenAI != "" {
327+
summaryPromptTemplate = customSummaryPromptTemplateOpenAI
328+
} else {
329+
summaryPromptTemplate = defaultSummaryPromptTemplateOpenAI
330+
}
331+
case "anthropic":
332+
if customSummaryPromptTemplateAnthropic != "" {
333+
summaryPromptTemplate = customSummaryPromptTemplateAnthropic
334+
} else {
335+
summaryPromptTemplate = defaultSummaryPromptTemplateAnthropic
336+
}
337+
default:
338+
return "", fmt.Errorf("unknown LLM service: %s", appState.Config.LLM.Service)
339+
}
340+
341+
err := validateSummarizerPrompt(summaryPromptTemplate)
342+
if err != nil {
343+
return "", err
344+
}
345+
346+
return internal.ParsePrompt(summaryPromptTemplate, promptData)
347+
}

pkg/extractors/summarizer_test.go

+104
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package extractors
33
import (
44
"testing"
55

6+
"github.com/getzep/zep/config"
67
"github.com/getzep/zep/pkg/llms"
78
"github.com/getzep/zep/pkg/models"
89
"github.com/getzep/zep/pkg/testutils"
@@ -80,3 +81,106 @@ func TestSummarize_Anthropic(t *testing.T) {
8081
// Reset the config to the default
8182
appState.Config = testutils.NewTestConfig()
8283
}
84+
85+
func TestValidateSummarizerPrompt(t *testing.T) {
86+
testCases := []struct {
87+
name string
88+
prompt string
89+
wantErr bool
90+
}{
91+
{
92+
name: "valid prompt",
93+
prompt: "{{.PrevSummary}} {{.MessagesJoined}}",
94+
wantErr: false,
95+
},
96+
{
97+
name: "invalid prompt",
98+
prompt: "{{.PrevSummary}}",
99+
wantErr: true,
100+
},
101+
}
102+
103+
for _, tc := range testCases {
104+
t.Run(tc.name, func(t *testing.T) {
105+
err := validateSummarizerPrompt(tc.prompt)
106+
if tc.wantErr {
107+
assert.Error(t, err)
108+
} else {
109+
assert.NoError(t, err)
110+
}
111+
})
112+
}
113+
}
114+
115+
func TestGenerateProgressiveSummarizerPrompt(t *testing.T) {
116+
testCases := []struct {
117+
name string
118+
service string
119+
customPromptOpenAI string
120+
customPromptAnthropic string
121+
expectedPrompt string
122+
defaultPrompt bool
123+
}{
124+
{
125+
name: "OpenAI with custom prompt",
126+
service: "openai",
127+
customPromptOpenAI: "custom openai prompt {{.PrevSummary}} {{.MessagesJoined}}",
128+
customPromptAnthropic: "",
129+
expectedPrompt: "custom openai prompt previous summary joined messages",
130+
},
131+
{
132+
name: "Anthropic with custom prompt",
133+
service: "anthropic",
134+
customPromptOpenAI: "",
135+
customPromptAnthropic: "custom anthropic prompt {{.PrevSummary}} {{.MessagesJoined}}",
136+
expectedPrompt: "custom anthropic prompt previous summary joined messages",
137+
},
138+
{
139+
name: "OpenAI without custom prompt",
140+
service: "openai",
141+
customPromptOpenAI: "",
142+
customPromptAnthropic: "",
143+
expectedPrompt: defaultSummaryPromptTemplateOpenAI,
144+
defaultPrompt: true,
145+
},
146+
{
147+
name: "Anthropic without custom prompt",
148+
service: "anthropic",
149+
customPromptOpenAI: "",
150+
customPromptAnthropic: "",
151+
expectedPrompt: defaultSummaryPromptTemplateAnthropic,
152+
defaultPrompt: true,
153+
},
154+
}
155+
156+
for _, tc := range testCases {
157+
t.Run(tc.name, func(t *testing.T) {
158+
appState := &models.AppState{
159+
Config: &config.Config{
160+
LLM: config.LLM{
161+
Service: tc.service,
162+
},
163+
CustomPrompts: config.CustomPromptsConfig{
164+
SummarizerPrompts: config.ExtractorPromptsConfig{
165+
OpenAI: tc.customPromptOpenAI,
166+
Anthropic: tc.customPromptAnthropic,
167+
},
168+
},
169+
},
170+
}
171+
promptData := SummaryPromptTemplateData{
172+
PrevSummary: "previous summary",
173+
MessagesJoined: "joined messages",
174+
}
175+
176+
prompt, err := generateProgressiveSummarizerPrompt(appState, promptData)
177+
assert.NoError(t, err)
178+
if !tc.defaultPrompt {
179+
assert.Equal(t, tc.expectedPrompt, prompt)
180+
} else {
181+
// Only compare the first 50 characters of the prompt, since the instructions should match
182+
assert.Equal(t, tc.expectedPrompt[:50], prompt[:50])
183+
}
184+
})
185+
}
186+
}

0 commit comments

Comments
 (0)