Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add duckduckgo-search #405

Merged
merged 5 commits into from
Apr 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.
- `Login via url`: Use `/login <a token>$<something like https://provider.com/login>` to Login. The program posts the token to the interface to
retrieve configuration
information, [how to develop this](https://github.com/LlmKira/Openaibot/blob/81eddbff0f136697d5ad6e13ee1a7477b26624ed/app/components/credential.py#L20).
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>` to login
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>$<tool model such as gpt-3.5-turbo>` to login

### 🧀 Plugin Can Do More

Expand All @@ -97,6 +97,7 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.
| Discord | ✅ | ✅ | |
| Kook | ✅ | ✅ | Does not support `triggering by reply` |
| Slack | ✅ | ✅ | Does not support `triggering by reply` |
| Line | ❌ | | |
| QQ | ❌ | | |
| Wechat | ❌ | | |
| Twitter | ❌ | | |
Expand Down
1 change: 1 addition & 0 deletions app/middleware/llm_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def pair_check(_messages):
new_list.append(_messages[i])
new_list.append(_messages[-1])
if isinstance(_messages[-1], AssistantMessage) and _messages[-1].tool_calls:
logger.warning("llm_task:the last AssistantMessage not paired, be careful")
new_list.extend(mock_tool_message(_messages[-1], "[On Queue]"))
return new_list

Expand Down
4 changes: 2 additions & 2 deletions app/receiver/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,10 +273,10 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
logger.debug(f"Read History:{history}")
continue_ = await logic.llm_continue(
context=f"History:{history},ToolCallResult:{run_status}",
condition="Would you like to continue a chat?",
condition="If there is still any action that needs to be performed",
default=False,
)
if continue_.continue_it:
if continue_.boolean:
logger.debug(
"ToolCall run out, resign a new request to request stop sign."
)
Expand Down
7 changes: 2 additions & 5 deletions app/sender/discord/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
# @Software: PyCharm
import base64
import binascii
import json
import random
from typing import List

Expand Down Expand Up @@ -36,6 +35,7 @@
is_empty_command,
uid_make,
save_credential,
dict2markdown,
)
from llmkira.openapi.trigger import get_trigger_loop
from ...components.credential import Credential, ProviderError
Expand Down Expand Up @@ -391,10 +391,7 @@ async def listen_env_command(ctx: crescent.Context, env_string: str):
"**🧊 Env parse failed...O_o**\n", separator="\n"
)
else:
text = formatting.format_text(
f"**🧊 Updated**\n" f"```json\n{json.dumps(env_map, indent=2)}```",
separator="\n",
)
text = convert(dict2markdown(env_map))
await ctx.respond(
ephemeral=True,
content=text,
Expand Down
7 changes: 2 additions & 5 deletions app/sender/kook/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
# @Author : sudoskys
# @File : __init__.py.py
# @Software: PyCharm
import json
import random
from typing import List

Expand Down Expand Up @@ -33,6 +32,7 @@
is_empty_command,
uid_make,
save_credential,
dict2markdown,
)
from llmkira.openapi.trigger import get_trigger_loop
from ...components.credential import ProviderError, Credential
Expand Down Expand Up @@ -396,10 +396,7 @@ async def listen_env_command(msg: Message, env_string: str):
"**🧊 Env parse failed...O_o**\n", separator="\n"
)
else:
text = formatting.format_text(
f"**🧊 Updated**\n" f"```json\n{json.dumps(env_map, indent=2)}```",
separator="\n",
)
text = convert(dict2markdown(env_map))
await msg.reply(
is_temp=True,
type=MessageTypes.KMD,
Expand Down
14 changes: 6 additions & 8 deletions app/sender/slack/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
# @Author : sudoskys
# @File : __init__.py.py
# @Software: PyCharm
import json
import time
from ssl import SSLContext
from typing import List
Expand All @@ -24,6 +23,7 @@
parse_command,
uid_make,
login,
dict2markdown,
)
from app.setting.slack import BotSetting
from llmkira.kv_manager.env import EnvManager
Expand Down Expand Up @@ -239,10 +239,12 @@ async def listen_login_command(ack: AsyncAck, respond: AsyncRespond, command):
async def listen_env_command(ack: AsyncAck, respond: AsyncRespond, command):
command: SlashCommand = SlashCommand.model_validate(command)
await ack()
_manager = EnvManager(user_id=uid_make(__sender__, command.user_id))
if not command.text:
return
env_map = await _manager.read_env()
text = convert(dict2markdown(env_map))
return await respond(text=text)
_arg = command.text
_manager = EnvManager(user_id=uid_make(__sender__, command.user_id))
try:
env_map = await _manager.set_env(
env_value=_arg, update=True, return_all=True
Expand All @@ -251,11 +253,7 @@ async def listen_env_command(ack: AsyncAck, respond: AsyncRespond, command):
logger.exception(f"[213562]env update failed {e}")
text = formatting.mbold("🧊 Failed")
else:
text = formatting.format_text(
formatting.mbold("🦴 Env Changed"),
formatting.mcode(json.dumps(env_map, indent=2)),
separator="\n",
)
text = convert(dict2markdown(env_map))
await respond(text=text)

@bot.command(command="/clear")
Expand Down
17 changes: 9 additions & 8 deletions app/sender/telegram/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
# @Author : sudoskys
# @File : __init__.py.py
# @Software: PyCharm
import json
from typing import Optional, Union, List

from loguru import logger
Expand All @@ -22,6 +21,7 @@
uid_make,
login,
TimerObjectContainer,
dict2markdown,
)
from app.setting.telegram import BotSetting
from llmkira.kv_manager.env import EnvManager
Expand Down Expand Up @@ -239,9 +239,14 @@ async def listen_login_command(message: types.Message):
@bot.message_handler(commands="env", chat_types=["private"])
async def listen_env_command(message: types.Message):
_cmd, _arg = parse_command(command=message.text)
if not _arg:
return None
_manager = EnvManager(user_id=uid_make(__sender__, message.from_user.id))
if not _arg:
env_map = await _manager.read_env()
return await bot.reply_to(
message,
text=convert(dict2markdown(env_map)),
parse_mode="MarkdownV2",
)
try:
env_map = await _manager.set_env(
env_value=_arg, update=True, return_all=True
Expand All @@ -252,11 +257,7 @@ async def listen_env_command(message: types.Message):
formatting.mbold("🧊 Failed"), separator="\n"
)
else:
text = formatting.format_text(
formatting.mbold("🦴 Env Changed"),
formatting.mcode(json.dumps(env_map, indent=2)),
separator="\n",
)
text = convert(dict2markdown(env_map))
await bot.reply_to(message, text=text, parse_mode="MarkdownV2")

@bot.message_handler(
Expand Down
7 changes: 7 additions & 0 deletions app/sender/util_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,3 +223,10 @@ def clear_objects(self, user_id):
"""
if user_id in self.users:
self.users[user_id] = {}


def dict2markdown(maps: dict):
content = "**🦴 Env**\n"
for key, value in maps.items():
content += f"- **`{key}`**: `{value}`\n"
return content
10 changes: 6 additions & 4 deletions llmkira/extra/plugins/search/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from llmkira.sdk.tools.schema import FuncPair, BaseTool # noqa: E402
from llmkira.task import Task, TaskHeader # noqa: E402
from llmkira.task.schema import Location, ToolResponse, EventMessage # noqa: E402
from .engine import SerperSearchEngine, build_search_tips # noqa: E402
from .engine import SerperSearchEngine, build_search_tips, search_in_duckduckgo # noqa: E402


class Search(BaseModel):
Expand All @@ -26,7 +26,9 @@ class Search(BaseModel):


@resign_plugin_executor(tool=Search)
async def search_on_serper(search_sentence: str, api_key: str):
async def search_on_serper(search_sentence: str, api_key: str = None):
if not api_key:
return search_in_duckduckgo(search_sentence)
result = await SerperSearchEngine(api_key=api_key).search(search_sentence)
return build_search_tips(search_items=result)

Expand Down Expand Up @@ -160,15 +162,15 @@ async def run(
_set = Search.model_validate(arg)
_search_result = await search_on_serper(
search_sentence=_set.keywords,
api_key=env.get("SERPER_API_KEY"),
api_key=env.get("SERPER_API_KEY", None),
)
# META
_meta = task.task_sign.reprocess(
plugin_name=__plugin_name__,
tool_response=[
ToolResponse(
name=__plugin_name__,
function_response=str(_search_result),
function_response=f"SearchData: {_search_result},Please give reference link when use it.",
tool_call_id=pending_task.id,
tool_call=pending_task,
)
Expand Down
23 changes: 23 additions & 0 deletions llmkira/extra/plugins/search/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from typing import List

import requests
from duckduckgo_search import AsyncDDGS
from loguru import logger
from pydantic import BaseModel

Expand Down Expand Up @@ -46,6 +47,28 @@ async def search(self, search_term: str) -> List[SearchEngineResult]:
return _result


async def search_in_duckduckgo(search_sentence: str):
try:
search_result = await AsyncDDGS().text(
search_sentence, safesearch="off", timelimit="y", max_results=10
)
except Exception as e:
raise ValueError(
f"Search Failed: DuckDuckGo Error now not available: {type(e)}"
)
else:
_build_result = []
for result in search_result:
_build_result.append(
SearchEngineResult(
title=result.get("title", "Undefined"),
link=result.get("Href", "Undefined"),
sudoskys marked this conversation as resolved.
Show resolved Hide resolved
snippet=result.get("body", "Undefined"),
)
)
return _build_result


def build_search_tips(search_items: List[SearchEngineResult], limit=5):
search_tips = []
assert isinstance(
Expand Down
14 changes: 6 additions & 8 deletions llmkira/extra/voice/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Optional

import aiohttp
import edge_tts
from gtts import gTTS
from loguru import logger


Expand Down Expand Up @@ -99,17 +99,15 @@ async def request_reecho_speech(
return None


async def request_edge_speech(text: str, voice: str = "en-GB-SoniaNeural"):
async def request_google_speech(text: str):
try:
communicate = edge_tts.Communicate(text, voice)
byte_io = BytesIO()
async for chunk in communicate.stream():
if chunk["type"] == "audio":
byte_io.write(chunk["data"])
tts = gTTS(text)
tts.write_to_fp(byte_io)
byte_io.seek(0)
return byte_io.getvalue()
except Exception as e:
logger.warning(f"Edge TTS Error: {e}")
logger.warning(f"google TTS Error: {e}")
return None


Expand Down Expand Up @@ -170,4 +168,4 @@ async def request_en(text) -> Optional[bytes]:
if nai:
return nai
else:
return await request_edge_speech(text)
return await request_google_speech(text)
58 changes: 55 additions & 3 deletions llmkira/logic/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Optional
from typing import Optional, Type

from loguru import logger
from pydantic import BaseModel, Field, SecretStr
Expand All @@ -14,9 +14,13 @@ class whether(BaseModel):

yes_no: bool = Field(description="Whether the condition is true or false")
comment_to_user: Optional[str] = Field(
default="", description="Comment on the decision"
default="", description="Comment on the decision in user language"
)

@property
def boolean(self):
return self.yes_no


class continue_act(BaseModel):
"""
Expand All @@ -25,9 +29,13 @@ class continue_act(BaseModel):

continue_it: bool = Field(description="Whether to continue execution")
comment_to_user: Optional[str] = Field(
default="", description="Comment on the decision"
default="", description="Comment on the decision in user language"
)

@property
def boolean(self):
return self.continue_it


class LLMLogic(object):
"""
Expand Down Expand Up @@ -77,3 +85,47 @@ async def llm_continue(self, context: str, condition: str, default: bool):
except Exception as e:
logger.error(f"llm_continue error: {e}")
return continue_act(continue_it=default)

async def deserialization(
self, context: str, model: Type[BaseModel]
) -> Optional[BaseModel]:
"""
Serialize the string to model
"""
try:
result = await OpenAI(
model=self.api_model,
messages=[UserMessage(content=context)],
).extract(
response_model=model,
session=OpenAICredential(
api_key=SecretStr(self.api_key),
base_url=self.api_endpoint,
model=self.api_model,
),
)
return result
except Exception as e:
logger.error(f"logic:serialization error: {e}")
return None

async def serialization(self, model: BaseModel) -> Optional[UserMessage]:
"""
Serialize the model to string
"""
try:
result = await OpenAI(
model=self.api_model,
messages=[UserMessage(content=model.model_dump_json())],
).extract(
response_model=UserMessage,
session=OpenAICredential(
api_key=SecretStr(self.api_key),
base_url=self.api_endpoint,
model=self.api_model,
),
)
return result
except Exception as e:
logger.error(f"logic:serialization error: {e}")
return None
Loading
Loading