diff --git a/.github/workflows/lint_test.yml b/.github/workflows/lint_test.yml index 79fe7ed5..9b2f0c94 100644 --- a/.github/workflows/lint_test.yml +++ b/.github/workflows/lint_test.yml @@ -31,7 +31,7 @@ jobs: - name: ruff run: ruff sgpt tests scripts - name: mypy - run: mypy sgpt + run: mypy sgpt --exclude function.py --exclude handler.py --exclude default_functions - name: unittests run: | export OPENAI_API_KEY=test_api_key diff --git a/README.md b/README.md index 43a2d44a..50b7fccc 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # ShellGPT A command-line productivity tool powered by AI large language models (LLM). As developers, we can leverage AI capabilities to generate shell commands, code snippets, comments, and documentation, among other things. Forget about cheat sheets and notes, with this tool you can get accurate answers right in your terminal, and you'll probably find yourself reducing your daily Google searches, saving you valuable time and effort. ShellGPT is cross-platform compatible and supports all major operating systems, including Linux, macOS, and Windows with all major shells, such as PowerShell, CMD, Bash, Zsh, Fish, and many others. -https://github-production-user-asset-6210df.s3.amazonaws.com/16740832/291779848-66392282-474e-4a84-8482-d20c53c8727d.mp4 +https://github.com/TheR1D/shell_gpt/assets/16740832/721ddb19-97e7-428f-a0ee-107d027ddd59 ## Installation ```shell @@ -277,6 +277,69 @@ It is a Python script that uses the random module to generate and print a random ``` It is also possible to pickup conversations from chat sessions (which were created using `--chat` option) and continue them in REPL mode. +### Function calling +[Function calls](https://platform.openai.com/docs/guides/function-calling) is a powerful feature OpenAI provides. It allows LLM to execute functions in your system, which can be used to accomplish a variety of tasks. + +To install [default functions](https://github.com/TheR1D/shell_gpt/tree/main/sgpt/default_functions/) run: +```shell +sgpt --install-functions +``` + +ShellGPT has a convenient way to define functions and use them. In order to create your custom function, navigate to `~/.config/shell_gpt/functions` and create a new .py file with the function name. Inside this file, you can define your function using the following syntax: +```python +# execute_shell_command.py +import subprocess +from pydantic import Field +from instructor import OpenAISchema + + +class Function(OpenAISchema): + """ + Executes a shell command and returns the output (result). + """ + shell_command: str = Field(..., example="ls -la", descriptions="Shell command to execute.") + + class Config: + title = "execute_shell_command" + + @classmethod + def execute(cls, shell_command: str) -> str: + result = subprocess.run(shell_command.split(), capture_output=True, text=True) + return f"Exit code: {result.returncode}, Output:\n{result.stdout}" +``` + +The docstring comment inside the class will be passed to OpenAI API as a description for the function, along with the `title` attribute and parameters descriptions. The `execute` function will be called if LLM decides to use your function. In this case we are allowing LLM to execute any Shell commands in our system. Since we are returning the output of the command, LLM will be able to analyze it and decide if it is a good fit for the prompt. Here is an example how the function might be executed by LLM: +```shell +sgpt "What are the files in /tmp folder?" +# -> @FunctionCall execute_shell_command(shell_command="ls /tmp") +# -> The /tmp folder contains the following files and directories: +# -> test.txt +# -> test.json +# ... +``` + +Note that if for some reason the function (execute_shell_command) will return an error, LLM might try to accomplish the task based on the output. Let's say we don't have installed `jq` in our system, and we ask LLM to parse JSON file: +```shell +sgpt "parse /tmp/test.json file using jq and return only email value" +# -> @FunctionCall execute_shell_command(shell_command="jq -r '.email' /tmp/test.json") +# -> It appears that jq is not installed on the system. Let me try to install it using brew. +# -> @FunctionCall execute_shell_command(shell_command="brew install jq") +# -> jq has been successfully installed. Let me try to parse the file again. +# -> @FunctionCall execute_shell_command(shell_command="jq -r '.email' /tmp/test.json") +# -> The email value in /tmp/test.json is johndoe@example. +``` + +It is also possible to chain multiple function calls in the prompt: +```shell +sgpt "Play music and open hacker news" +# -> @FunctionCall play_music() +# -> @FunctionCall open_url(url="https://news.ycombinator.com") +# -> Music is now playing, and Hacker News has been opened in your browser. Enjoy! +``` + +This is just a simple example of how you can use function calls. It is truly a powerful feature that can be used to accomplish a variety of complex tasks. We have dedicated [category](https://github.com/TheR1D/shell_gpt/discussions/categories/functions) in GitHub Discussions for sharing and discussing functions. +LLM might execute destructive commands, so please use it at your own risk❗️ + ### Roles ShellGPT allows you to create custom roles, which can be utilized to generate code, shell commands, or to fulfill your specific needs. To create a new role, use the `--create-role` option followed by the role name. You will be prompted to provide a description for the role, along with other details. This will create a JSON file in `~/.config/shell_gpt/roles` with the role name. Inside this directory, you can also edit default `sgpt` roles, such as **shell**, **code**, and **default**. Use the `--list-roles` option to list all available roles, and the `--show-role` option to display the details of a specific role. Here's an example of a custom role: ```shell @@ -333,40 +396,48 @@ DEFAULT_EXECUTE_SHELL_CMD=false DISABLE_STREAMING=false # The pygment theme to view markdown (default/describe role). CODE_THEME=default +# Path to a directory with functions. +OPENAI_FUNCTIONS_PATH=/Users/user/.config/shell_gpt/functions +# Print output of functions when LLM uses them. +SHOW_FUNCTIONS_OUTPUT=false +# Allows LLM to use functions. +OPENAI_USE_FUNCTIONS=true ``` Possible options for `DEFAULT_COLOR`: black, red, green, yellow, blue, magenta, cyan, white, bright_black, bright_red, bright_green, bright_yellow, bright_blue, bright_magenta, bright_cyan, bright_white. Possible options for `CODE_THEME`: https://pygments.org/styles/ ### Full list of arguments ```text -╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────╮ -│ prompt [PROMPT] The prompt to generate completions for. │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────╮ -│ --model TEXT OpenAI GPT model to use. [default: gpt-3.5-turbo] │ -│ --temperature FLOAT RANGE [0.0<=x<=2.0] Randomness of generated output. [default: 0.0] │ -│ --top-probability FLOAT RANGE [0.0<=x<=1.0] Limits highest probable tokens (words). [default: 1.0] │ -│ --editor Open $EDITOR to provide a prompt. [default: no-editor] │ -│ --cache Cache completion results. [default: cache] │ -│ --help Show this message and exit. │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -╭─ Assistance Options ────────────────────────────────────────────────────────────────────────────────────────╮ -│ --shell -s Generate and execute shell commands. │ -│ --describe-shell -d Describe a shell command. │ -│ --code --no-code Generate only code. [default: no-code] │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -╭─ Chat Options ──────────────────────────────────────────────────────────────────────────────────────────────╮ -│ --chat TEXT Follow conversation with id, use "temp" for quick session. [default: None] │ -│ --repl TEXT Start a REPL (Read–eval–print loop) session. [default: None] │ -│ --show-chat TEXT Show all messages from provided chat id. [default: None] │ -│ --list-chats List all existing chat ids. [default: no-list-chats] │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -╭─ Role Options ──────────────────────────────────────────────────────────────────────────────────────────────╮ -│ --role TEXT System role for GPT model. [default: None] │ -│ --create-role TEXT Create role. [default: None] │ -│ --show-role TEXT Show role. [default: None] │ -│ --list-roles List roles. [default: no-list-roles] │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +╭─ Arguments ──────────────────────────────────────────────────────────────────────────────────────────────╮ +│ prompt [PROMPT] The prompt to generate completions for. │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +╭─ Options ────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ --model TEXT Large language model to use. [default: gpt-4-1106-preview] │ +│ --temperature FLOAT RANGE [0.0<=x<=2.0] Randomness of generated output. [default: 0.0] │ +│ --top-probability FLOAT RANGE [0.0<=x<=1.0] Limits highest probable tokens (words). [default: 1.0] │ +│ --editor Open $EDITOR to provide a prompt. [default: no-editor] │ +│ --cache Cache completion results. [default: cache] │ +│ --version Show version. │ +│ --help Show this message and exit. │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +╭─ Assistance Options ─────────────────────────────────────────────────────────────────────────────────────╮ +│ --shell -s Generate and execute shell commands. │ +│ --describe-shell -d Describe a shell command. │ +│ --code -c Generate only code. │ +│ --functions --no-functions Allow function calls. [default: functions] │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +╭─ Chat Options ───────────────────────────────────────────────────────────────────────────────────────────╮ +│ --chat TEXT Follow conversation with id, use "temp" for quick session. [default: None] │ +│ --repl TEXT Start a REPL (Read–eval–print loop) session. [default: None] │ +│ --show-chat TEXT Show all messages from provided chat id. [default: None] │ +│ --list-chats -lc List all existing chat ids. │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +╭─ Role Options ───────────────────────────────────────────────────────────────────────────────────────────╮ +│ --role TEXT System role for GPT model. [default: None] │ +│ --create-role TEXT Create role. [default: None] │ +│ --show-role TEXT Show role. [default: None] │ +│ --list-roles -lr List roles. │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ ``` ## LocalAI diff --git a/pyproject.toml b/pyproject.toml index 473aa9f7..c7714d4b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ dependencies = [ "rich >= 13.1.0, < 14.0.0", "distro >= 1.8.0, < 2.0.0", "openai >= 1.6.1, < 2.0.0", + "instructor >= 0.4.5, < 1.0.0", 'pyreadline3 >= 3.4.1, < 4.0.0; sys_platform == "win32"', ] @@ -81,6 +82,7 @@ skip = "__init__.py" [tool.mypy] strict = true +exclude = ["function.py", "handler.py", "default_functions"] [tool.ruff] select = [ diff --git a/sgpt/__version__.py b/sgpt/__version__.py index 5c4105cd..6849410a 100644 --- a/sgpt/__version__.py +++ b/sgpt/__version__.py @@ -1 +1 @@ -__version__ = "1.0.1" +__version__ = "1.1.0" diff --git a/sgpt/app.py b/sgpt/app.py index 65231d34..85241d01 100644 --- a/sgpt/app.py +++ b/sgpt/app.py @@ -7,6 +7,8 @@ from click.types import Choice from sgpt.config import cfg +from sgpt.default_functions.init_functions import install_functions as inst_funcs +from sgpt.function import get_openai_schemas from sgpt.handlers.chat_handler import ChatHandler from sgpt.handlers.default_handler import DefaultHandler from sgpt.handlers.repl_handler import ReplHandler @@ -57,9 +59,16 @@ def main( ), code: bool = typer.Option( False, + "--code", + "-c", help="Generate only code.", rich_help_panel="Assistance Options", ), + functions: bool = typer.Option( + cfg.get("OPENAI_USE_FUNCTIONS") == "true", + help="Allow function calls.", + rich_help_panel="Assistance Options", + ), editor: bool = typer.Option( False, help="Open $EDITOR to provide a prompt.", @@ -92,6 +101,8 @@ def main( ), list_chats: bool = typer.Option( False, + "--list-chats", + "-lc", help="List all existing chat ids.", callback=ChatHandler.list_ids, rich_help_panel="Chat Options", @@ -115,6 +126,8 @@ def main( ), list_roles: bool = typer.Option( False, + "--list-roles", + "-lr", help="List roles.", callback=SystemRole.list, rich_help_panel="Role Options", @@ -125,6 +138,12 @@ def main( callback=install_shell_integration, hidden=True, # Hiding since should be used only once. ), + install_functions: bool = typer.Option( + False, + help="Install default functions.", + callback=inst_funcs, + hidden=True, # Hiding since should be used only once. + ), ) -> None: stdin_passed = not sys.stdin.isatty() @@ -154,6 +173,8 @@ def main( else SystemRole.get(role) ) + function_schemas = (get_openai_schemas() or None) if functions else None + if repl: # Will be in infinite loop here until user exits with Ctrl+C. ReplHandler(repl, role_class).handle( @@ -163,6 +184,7 @@ def main( top_p=top_probability, chat_id=repl, caching=cache, + functions=function_schemas, ) if chat: @@ -173,6 +195,7 @@ def main( top_p=top_probability, chat_id=chat, caching=cache, + functions=function_schemas, ) else: full_completion = DefaultHandler(role_class).handle( @@ -181,6 +204,7 @@ def main( temperature=temperature, top_p=top_probability, caching=cache, + functions=function_schemas, ) while shell and not stdin_passed: @@ -201,6 +225,7 @@ def main( temperature=temperature, top_p=top_probability, caching=cache, + functions=function_schemas, ) continue break diff --git a/sgpt/cache.py b/sgpt/cache.py index 55c07e9e..e0269157 100644 --- a/sgpt/cache.py +++ b/sgpt/cache.py @@ -39,7 +39,8 @@ def wrapper(*args: Any, **kwargs: Any) -> Generator[str, None, None]: for i in func(*args, **kwargs): result += i yield i - cache_file.write_text(result) + if "@FunctionCall" not in result: + cache_file.write_text(result) self._delete_oldest_files(self.length) # type: ignore return wrapper diff --git a/sgpt/config.py b/sgpt/config.py index 34072cdb..f7ebf34d 100644 --- a/sgpt/config.py +++ b/sgpt/config.py @@ -10,6 +10,7 @@ SHELL_GPT_CONFIG_FOLDER = Path(CONFIG_FOLDER) / "shell_gpt" SHELL_GPT_CONFIG_PATH = SHELL_GPT_CONFIG_FOLDER / ".sgptrc" ROLE_STORAGE_PATH = SHELL_GPT_CONFIG_FOLDER / "roles" +FUNCTIONS_PATH = SHELL_GPT_CONFIG_FOLDER / "functions" CHAT_CACHE_PATH = Path(gettempdir()) / "chat_cache" CACHE_PATH = Path(gettempdir()) / "cache" @@ -28,6 +29,9 @@ "DEFAULT_EXECUTE_SHELL_CMD": os.getenv("DEFAULT_EXECUTE_SHELL_CMD", "false"), "DISABLE_STREAMING": os.getenv("DISABLE_STREAMING", "false"), "CODE_THEME": os.getenv("CODE_THEME", "dracula"), + "OPENAI_FUNCTIONS_PATH": os.getenv("OPENAI_FUNCTIONS_PATH", str(FUNCTIONS_PATH)), + "OPENAI_USE_FUNCTIONS": os.getenv("OPENAI_USE_FUNCTIONS", "true"), + "SHOW_FUNCTIONS_OUTPUT": os.getenv("SHOW_FUNCTIONS_OUTPUT", "false"), # New features might add their own config variables here. } diff --git a/sgpt/default_functions/__init__.py b/sgpt/default_functions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sgpt/default_functions/common/execute_shell.py b/sgpt/default_functions/common/execute_shell.py new file mode 100644 index 00000000..55c626c6 --- /dev/null +++ b/sgpt/default_functions/common/execute_shell.py @@ -0,0 +1,28 @@ +import subprocess + +from instructor import OpenAISchema +from pydantic import Field + + +class Function(OpenAISchema): + """ + Executes a shell command and returns the output (result). + """ + + shell_command: str = Field( + ..., + example="ls -la", + descriptions="Shell command to execute.", + ) + + class Config: + title = "execute_shell_command" + + @classmethod + def execute(cls, shell_command: str) -> str: + process = subprocess.Popen( + shell_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) + output, _ = process.communicate() + exit_code = process.returncode + return f"Exit code: {exit_code}, Output:\n{output.decode()}" diff --git a/sgpt/default_functions/init_functions.py b/sgpt/default_functions/init_functions.py new file mode 100644 index 00000000..d0aca1c8 --- /dev/null +++ b/sgpt/default_functions/init_functions.py @@ -0,0 +1,35 @@ +import os +import platform +import shutil +from pathlib import Path +from typing import Any + +from ..config import cfg +from ..utils import option_callback + +FUNCTIONS_FOLDER = Path(cfg.get("OPENAI_FUNCTIONS_PATH")) + + +@option_callback +def install_functions(*_args: Any) -> None: + current_folder = os.path.dirname(os.path.abspath(__file__)) + common_folder = Path(current_folder + "/common") + common_files = [Path(path) for path in common_folder.glob("*.py")] + print("Installing default functions...") + + for file in common_files: + print(f"Installed {FUNCTIONS_FOLDER}/{file.name}") + shutil.copy(file, FUNCTIONS_FOLDER, follow_symlinks=True) + + current_platform = platform.system() + if current_platform == "Linux": + print("Installing Linux functions...") + if current_platform == "Windows": + print("Installing Windows functions...") + if current_platform == "Darwin": + print("Installing Mac functions...") + mac_folder = Path(current_folder + "/mac") + mac_files = [Path(path) for path in mac_folder.glob("*.py")] + for file in mac_files: + print(f"Installed {FUNCTIONS_FOLDER}/{file.name}") + shutil.copy(file, FUNCTIONS_FOLDER, follow_symlinks=True) diff --git a/sgpt/default_functions/mac/apple_script.py b/sgpt/default_functions/mac/apple_script.py new file mode 100644 index 00000000..6d07b39b --- /dev/null +++ b/sgpt/default_functions/mac/apple_script.py @@ -0,0 +1,33 @@ +import subprocess + +from instructor import OpenAISchema +from pydantic import Field + + +class Function(OpenAISchema): + """ + Executes Apple Script on macOS and returns the output (result). + Can be used for actions like: draft (prepare) an email, show calendar events, create a note. + """ + + apple_script: str = Field( + ..., + example='tell application "Finder" to get the name of every disk', + descriptions="Apple Script to execute.", + ) + + class Config: + title = "execute_apple_script" + + @classmethod + def execute(cls, apple_script): + script_command = ["osascript", "-e", apple_script] + try: + process = subprocess.Popen( + script_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + output, _ = process.communicate() + output = output.decode("utf-8").strip() + return f"Output: {output}" + except Exception as e: + return f"Error: {e}" diff --git a/sgpt/function.py b/sgpt/function.py new file mode 100644 index 00000000..9afe7f88 --- /dev/null +++ b/sgpt/function.py @@ -0,0 +1,62 @@ +import importlib.util +import sys +from abc import ABCMeta +from pathlib import Path +from typing import Any, Callable + +from .config import cfg + + +class Function: + def __init__(self, path: str): + module = self._read(path) + self._function = module.Function.execute + self._openai_schema = module.Function.openai_schema + self._name = self._openai_schema["name"] + + @property + def name(self) -> str: + return self._name + + @property + def openai_schema(self) -> dict[str, Any]: + return self._openai_schema + + @property + def execute(self) -> Callable[..., str]: + return self._function + + @classmethod + def _read(cls, path: str) -> Any: + module_name = path.replace("/", ".").rstrip(".py") + spec = importlib.util.spec_from_file_location(module_name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + spec.loader.exec_module(module) + + if not isinstance(module.Function, ABCMeta): + raise TypeError( + f"Function {module_name} must be a subclass of pydantic.BaseModel" + ) + if not hasattr(module.Function, "execute"): + raise TypeError( + f"Function {module_name} must have a 'execute' static method" + ) + + return module + + +functions_folder = Path(cfg.get("OPENAI_FUNCTIONS_PATH")) +functions_folder.mkdir(parents=True, exist_ok=True) +functions = [Function(str(path)) for path in functions_folder.glob("*.py")] + + +def get_function(name: str) -> Callable[..., Any]: + for function in functions: + if function.name == name: + return function.execute + raise ValueError(f"Function {name} not found") + + +def get_openai_schemas() -> [dict[str, Any]]: + return [function.openai_schema for function in functions] diff --git a/sgpt/handlers/handler.py b/sgpt/handlers/handler.py index 6d6e62f7..75773be6 100644 --- a/sgpt/handlers/handler.py +++ b/sgpt/handlers/handler.py @@ -1,3 +1,4 @@ +import json from pathlib import Path from typing import Any, Dict, Generator, List @@ -9,7 +10,8 @@ from ..cache import Cache from ..config import cfg -from ..role import SystemRole +from ..function import get_function +from ..role import DefaultRoles, SystemRole cache = Cache(int(cfg.get("CACHE_LENGTH")), Path(cfg.get("CACHE_PATH"))) @@ -23,6 +25,7 @@ def __init__(self, role: SystemRole) -> None: ) self.role = role self.disable_stream = cfg.get("DISABLE_STREAMING") == "true" + self.show_functions_output = cfg.get("SHOW_FUNCTIONS_OUTPUT") == "true" self.color = cfg.get("DEFAULT_COLOR") self.theme_name = cfg.get("CODE_THEME") @@ -62,17 +65,77 @@ def _handle_with_plain_text(self, prompt: str, **kwargs: Any) -> str: def make_messages(self, prompt: str) -> List[Dict[str, str]]: raise NotImplementedError + def handle_function_call( + self, + messages: List[dict[str, str]], + name: str, + arguments: str, + ) -> Generator[str, None, None]: + messages.append( + { + "role": "assistant", + "content": "", + "function_call": {"name": name, "arguments": arguments}, # type: ignore + } + ) + + if messages and messages[-1]["role"] == "assistant": + yield "\n" + + dict_args = json.loads(arguments) + joined_args = ", ".join(f'{k}="{v}"' for k, v in dict_args.items()) + yield f"> @FunctionCall `{name}({joined_args})` \n\n" + result = get_function(name)(**dict_args) + if self.show_functions_output: + yield f"```text\n{result}\n```\n" + messages.append({"role": "function", "content": result, "name": name}) + + # TODO: Fix MyPy typing errors. This modules is excluded from MyPy checks. @cache def get_completion(self, **kwargs: Any) -> Generator[str, None, None]: + func_call = {"name": None, "arguments": ""} + + is_shell_role = self.role.name == DefaultRoles.SHELL.value + is_code_role = self.role.name == DefaultRoles.CODE.value + is_dsc_shell_role = self.role.name == DefaultRoles.DESCRIBE_SHELL.value + if is_shell_role or is_code_role or is_dsc_shell_role: + kwargs["functions"] = None + if self.disable_stream: completion = self.client.chat.completions.create(**kwargs) - yield completion.choices[0].message.content + message = completion.choices[0].message + if completion.choices[0].finish_reason == "function_call": + name, arguments = ( + message.function_call.name, + message.function_call.arguments, + ) + yield from self.handle_function_call( + kwargs["messages"], name, arguments + ) + yield from self.get_completion(**kwargs, caching=False) + yield message.content or "" return for chunk in self.client.chat.completions.create(**kwargs, stream=True): - yield from chunk.choices[0].delta.content or "" + delta = chunk.choices[0].delta + if delta.function_call: + if delta.function_call.name: + func_call["name"] = delta.function_call.name + if delta.function_call.arguments: + func_call["arguments"] += delta.function_call.arguments + if chunk.choices[0].finish_reason == "function_call": + name, arguments = func_call["name"], func_call["arguments"] + yield from self.handle_function_call( + kwargs["messages"], name, arguments + ) + yield from self.get_completion(**kwargs, caching=False) + return + + yield delta.content or "" def handle(self, prompt: str, **kwargs: Any) -> str: - if self.role.name == "ShellGPT" or self.role.name == "Shell Command Descriptor": + default = DefaultRoles.DEFAULT.value + shell_descriptor = DefaultRoles.DESCRIBE_SHELL.value + if self.role.name == default or self.role.name == shell_descriptor: return self._handle_with_markdown(prompt, **kwargs) return self._handle_with_plain_text(prompt, **kwargs) diff --git a/tests/test_integration.py b/tests/test_integration.py index cd348b3f..513b913a 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -36,6 +36,8 @@ def setUpClass(cls): assert cfg.get("DISABLE_STREAMING") == "false" # ShellGPT optimised and tested with gpt-4 turbo. assert cfg.get("DEFAULT_MODEL") == "gpt-4-1106-preview" + # Make sure we will not call any functions. + assert cfg.get("OPENAI_USE_FUNCTIONS") == "false" @staticmethod def get_arguments(prompt, **kwargs): @@ -391,6 +393,7 @@ def test_model_option(self, mocked_get_completion): temperature=0.0, top_p=1.0, caching=False, + functions=None, ) assert result.exit_code == 0 @@ -491,3 +494,5 @@ def test_version(self): } result = runner.invoke(app, self.get_arguments(**dict_arguments), input="d\n") assert __version__ in result.stdout + + # TODO: Implement function call tests.