Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,31 @@ https://github.com/vanna-ai/vanna/assets/7146154/1901f47a-515d-4982-af50-f12761a

![vanna-quadrants](https://github.com/vanna-ai/vanna/assets/7146154/1c7c88ba-c144-4ecf-a028-cf5ba7344ca2)

## Quick Start (No API Key Required)
If you want to try Vanna instantly, you can use the built-in mock LLM and vector database. This requires no API keys or setup:

```python
from vanna.mock.llm import MockLLM
from vanna.mock.vectordb import MockVectorDB

class MyVanna(MockVectorDB, MockLLM):
pass

vn = MyVanna()

vn.train(ddl="""
CREATE TABLE IF NOT EXISTS customers (
id INT PRIMARY KEY,
name VARCHAR(100),
age INT
)
""")

print(vn.ask("Show all customers over 30"))
```

This will output a mock SQL query. For real use, see the sections below for connecting to actual LLMs and vector databases.

## How Vanna works

![Screen Recording 2024-01-24 at 11 21 37 AM](https://github.com/vanna-ai/vanna/assets/7146154/1d2718ad-12a8-4a76-afa2-c61754462f93)
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ dependencies = [
"Homepage" = "https://github.com/vanna-ai/vanna"
"Bug Tracker" = "https://github.com/vanna-ai/vanna/issues"

[project.scripts]
vanna = "vanna.cli:main"

[project.optional-dependencies]
postgres = ["psycopg2-binary", "db-dtypes"]
mysql = ["PyMySQL"]
Expand Down
47 changes: 47 additions & 0 deletions src/vanna/cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import argparse
from vanna.mock.llm import MockLLM
from vanna.mock.vectordb import MockVectorDB
from typing import Optional

class MyVanna(MockVectorDB, MockLLM):
"""A demo Vanna instance using mock LLM and vector DB for CLI usage."""
pass

def main() -> None:
parser = argparse.ArgumentParser(description="Vanna CLI - Generate SQL from natural language.")
subparsers = parser.add_subparsers(dest="command", required=True)

# Train command
train_parser = subparsers.add_parser("train", help="Train Vanna with DDL or SQL.")
train_parser.add_argument("--ddl", type=str, help="DDL statement to train with.")
train_parser.add_argument("--sql", type=str, help="SQL statement to train with.")

# Ask command
ask_parser = subparsers.add_parser("ask", help="Ask a question to generate SQL.")
ask_parser.add_argument("question", type=str, help="The natural language question.")

# Info command
info_parser = subparsers.add_parser("info", help="Show info about the Vanna instance.")

args = parser.parse_args()
vn = MyVanna()

if args.command == "train":
if args.ddl:
vn.train(ddl=args.ddl)
print("Trained with DDL.")
if args.sql:
vn.train(sql=args.sql)
print("Trained with SQL.")
if not args.ddl and not args.sql:
print("Please provide --ddl or --sql.")
elif args.command == "ask":
result = vn.ask(args.question)
print(f"SQL:\n{result}")
elif args.command == "info":
print("Vanna CLI demo instance (mock LLM + mock vector DB)")
else:
parser.print_help()

if __name__ == "__main__":
main()
47 changes: 42 additions & 5 deletions src/vanna/mock/llm.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,56 @@

from typing import Any, Dict, Optional
from ..base import VannaBase


class MockLLM(VannaBase):
def __init__(self, config=None):
"""
A mock implementation of an LLM for testing and demonstration purposes.
Returns canned responses and does not require any external API keys or services.
"""
def __init__(self, config: Optional[dict] = None) -> None:
"""
Initialize the mock LLM. Config is ignored.
"""
pass

def system_message(self, message: str) -> any:
def system_message(self, message: str) -> Dict[str, str]:
"""
Create a mock system message.
Args:
message (str): The system message content.
Returns:
dict: A dictionary representing the system message.
"""
return {"role": "system", "content": message}

def user_message(self, message: str) -> any:
def user_message(self, message: str) -> Dict[str, str]:
"""
Create a mock user message.
Args:
message (str): The user message content.
Returns:
dict: A dictionary representing the user message.
"""
return {"role": "user", "content": message}

def assistant_message(self, message: str) -> any:
def assistant_message(self, message: str) -> Dict[str, str]:
"""
Create a mock assistant message.
Args:
message (str): The assistant message content.
Returns:
dict: A dictionary representing the assistant message.
"""
return {"role": "assistant", "content": message}

def submit_prompt(self, prompt, **kwargs) -> str:
def submit_prompt(self, prompt: str, **kwargs: Any) -> str:
"""
Return a canned response for any prompt.
Args:
prompt (str): The prompt to submit.
**kwargs: Additional keyword arguments (ignored).
Returns:
str: A mock LLM response string.
"""
return "Mock LLM response"