Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@
## v0.8.29 - 2025-11-13

### Bug Fixes
- Fixed engine.py with latest changes

### Documentation
- Updated changelog

**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.8.28...v0.8.29

## v0.8.28 - 2025-11-13

### Changes
Expand Down
24 changes: 6 additions & 18 deletions intentkit/core/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ async def select_model(
) -> BaseChatModel:
llm_params = {}
context = runtime.context
if context.search:
if context.search or agent.has_search():
if llm_model.info.supports_search:
if llm_model.info.provider == LLMProvider.OPENAI:
tools.append({"type": "web_search"})
Expand Down Expand Up @@ -454,30 +454,18 @@ async def stream_agent_raw(

# super mode
recursion_limit = 30
if re.search(r"@super\b", input_message) or user_message.super_mode:
if (
re.search(r"@super\b", input_message)
or user_message.super_mode
or agent.has_super()
):
recursion_limit = 300
# Remove @super from the message
input_message = re.sub(r"@super\b", "", input_message).strip()

# llm native search
search = user_message.search_mode if user_message.search_mode is not None else False
if re.search(r"@search\b", input_message) or re.search(r"@web\b", input_message):
search = True
if model.supports_search:
input_message = re.sub(
r"@search\b",
"(You have native search tool, you can use it to get more recent information)",
input_message,
).strip()
input_message = re.sub(
r"@web\b",
"(You have native search tool, you can use it to get more recent information)",
input_message,
).strip()
else:
search = False
input_message = re.sub(r"@search\b", "", input_message).strip()
input_message = re.sub(r"@web\b", "", input_message).strip()

# content to llm
messages = [
Expand Down
25 changes: 3 additions & 22 deletions intentkit/core/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,35 +240,19 @@ def agent_prompt(agent: Agent, agent_data: AgentData) -> str:


async def explain_prompt(message: str) -> str:
"""
Process message to replace @skill:*:* patterns with (call skill xxxxx) format.

Args:
message (str): The input message to process

Returns:
str: The processed message with @skill patterns replaced
"""
# Pattern to match @skill:category:config_name with word boundaries
pattern = r"@skill:([^:]+):([^\s]+)\b"

async def replace_skill_pattern(match):
category = match.group(1)
config_name = match.group(2)

# Get skill by category and config_name
skill = await Skill.get_by_config_name(category, config_name)

if skill:
return f"(call skill {skill.name})"
else:
# If skill not found, keep original pattern
return match.group(0)

# Find all matches
matches = list(re.finditer(pattern, message))

# Process matches in reverse order to maintain string positions
result = message
for match in reversed(matches):
replacement = await replace_skill_pattern(match)
Expand Down Expand Up @@ -362,9 +346,6 @@ async def build_entrypoint_prompt(agent: Agent, context: AgentContext) -> str |
elif entrypoint == AuthorType.TRIGGER.value:
entrypoint_prompt = "\n\n" + _build_autonomous_task_prompt(agent, context)

if entrypoint_prompt:
entrypoint_prompt = await explain_prompt(entrypoint_prompt)

return entrypoint_prompt


Expand Down Expand Up @@ -422,16 +403,17 @@ async def get_base_prompt():
async def formatted_prompt(
state: AgentState, runtime: Runtime[AgentContext]
) -> list[BaseMessage]:
# Get base prompt (with potential admin LLM skill control processing)
# Base prompt
final_system_prompt = await get_base_prompt()

context = runtime.context

# Add entrypoint prompt if applicable
entrypoint_prompt = await build_entrypoint_prompt(agent, context)
if entrypoint_prompt:
processed_entrypoint = await explain_prompt(entrypoint_prompt)
final_system_prompt = (
f"{final_system_prompt}## Entrypoint rules{entrypoint_prompt}\n\n"
f"{final_system_prompt}## Entrypoint rules{processed_entrypoint}\n\n"
)

# Add user info if user_id is a valid EVM wallet address
Expand All @@ -444,7 +426,6 @@ async def formatted_prompt(
final_system_prompt = f"{final_system_prompt}{internal_info}"

if agent.prompt_append:
# Find the system message in prompt_array and process it
for i, (role, content) in enumerate(prompt_array):
if role == "system":
processed_append = await explain_prompt(content)
Expand Down
26 changes: 26 additions & 0 deletions intentkit/models/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1197,6 +1197,32 @@ async def is_model_support_image(self) -> bool:
except Exception:
return False

def has_search(self) -> bool:
texts = [
self.prompt,
self.prompt_append,
self.purpose,
self.personality,
self.principles,
]
for t in texts:
if t and (re.search(r"@search\b", t) or re.search(r"@web\b", t)):
return True
return False

def has_super(self) -> bool:
texts = [
self.prompt,
self.prompt_append,
self.purpose,
self.personality,
self.principles,
]
for t in texts:
if t and re.search(r"@super\b", t):
return True
return False

def to_yaml(self) -> str:
"""
Dump the agent model to YAML format with field descriptions as comments.
Expand Down
Loading