-
Notifications
You must be signed in to change notification settings - Fork 13
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
More examples of using LLM via Inference class
- Loading branch information
1 parent
5411bf8
commit 2c2a88f
Showing
16 changed files
with
579 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
--- | ||
title: 'Working directly with LLMs and JSON - JSON mode' | ||
docname: 'llm_json' | ||
--- | ||
|
||
## Overview | ||
|
||
While working with `Inference` class, you can also generate JSON output | ||
from the model inference. This is useful for example when you need to | ||
process the response in a structured way or when you want to store the | ||
elements of the response in a database. | ||
|
||
`Inference` class supports multiple inference modes, like `Tools`, `Json` | ||
`JsonSchema` or `MdJson`, which gives you flexibility to choose the best | ||
approach for your use case. | ||
|
||
## Example | ||
|
||
In this example we will use OpenAI JSON mode, which guarantees that the | ||
response will be in a JSON format. | ||
|
||
It does not guarantee compliance with a specific schema (for some providers | ||
including OpenAI). We can try to work around it by providing an example of | ||
the expected JSON output in the prompt. | ||
|
||
> NOTE: Some model providers allow to specify a JSON schema for model to | ||
follow via `schema` parameter of `response_format`. OpenAI does not support | ||
this feature in JSON mode (only in JSON Schema mode). | ||
|
||
```php | ||
<?php | ||
$loader = require 'vendor/autoload.php'; | ||
$loader->add('Cognesy\\Instructor\\', __DIR__ . '../../src/'); | ||
|
||
use Cognesy\Instructor\Enums\Mode; | ||
use Cognesy\Instructor\Extras\LLM\Inference; | ||
|
||
// regular API, allows to customize inference options | ||
$data = (new Inference) | ||
->withConnection('openai') // optional, default is set in /config/llm.php | ||
->create( | ||
messages: [['role' => 'user', 'content' => 'What is capital of France? \ | ||
Respond with JSON data containing name", population and year of founding. \ | ||
Example: {"name": "Berlin", "population": 3700000, "founded": 1237}']], | ||
responseFormat: [ | ||
'type' => 'json_object', | ||
], | ||
options: ['max_tokens' => 64], | ||
mode: Mode::Json, | ||
) | ||
->toJson(); | ||
|
||
echo "USER: What is capital of France\n"; | ||
echo "ASSISTANT:\n"; | ||
dump($data); | ||
|
||
?> | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
--- | ||
title: 'Working directly with LLMs and JSON - JSON Schema mode' | ||
docname: 'llm_json_schema' | ||
--- | ||
|
||
## Overview | ||
|
||
While working with `Inference` class, you can also generate JSON output | ||
from the model inference. This is useful for example when you need to | ||
process the response in a structured way or when you want to store the | ||
elements of the response in a database. | ||
|
||
## Example | ||
|
||
In this example we will use OpenAI JSON Schema mode, which guarantees | ||
that the response will be in a JSON format that matches the provided | ||
schema. | ||
|
||
> NOTE: Json Schema mode with guaranteed structured outputs is not | ||
supported by all language model providers. | ||
|
||
```php | ||
<?php | ||
$loader = require 'vendor/autoload.php'; | ||
$loader->add('Cognesy\\Instructor\\', __DIR__ . '../../src/'); | ||
|
||
use Cognesy\Instructor\Enums\Mode;use Cognesy\Instructor\Extras\LLM\Inference; | ||
|
||
// regular API, allows to customize inference options | ||
$data = (new Inference) | ||
->withConnection('openai') | ||
->create( | ||
messages: [['role' => 'user', 'content' => 'What is capital of France? \ | ||
Respond with JSON data.']], | ||
responseFormat: [ | ||
'type' => 'json_schema', | ||
'description' => 'City data', | ||
'json_schema' => [ | ||
'name' => 'city_data', | ||
'schema' => [ | ||
'type' => 'object', | ||
'description' => 'City information', | ||
'properties' => [ | ||
'name' => [ | ||
'type' => 'string', | ||
'description' => 'City name', | ||
], | ||
'founded' => [ | ||
'type' => 'integer', | ||
'description' => 'Founding year', | ||
], | ||
'population' => [ | ||
'type' => 'integer', | ||
'description' => 'Current population', | ||
], | ||
], | ||
'additionalProperties' => false, | ||
'required' => ['name', 'founded', 'population'], | ||
], | ||
'strict' => true, | ||
], | ||
], | ||
options: ['max_tokens' => 64], | ||
mode: Mode::JsonSchema, | ||
) | ||
->toJson(); | ||
|
||
echo "USER: What is capital of France\n"; | ||
echo "ASSISTANT:\n"; | ||
dump($data); | ||
|
||
?> | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
--- | ||
title: 'Working directly with LLMs and JSON - MdJSON mode' | ||
docname: 'llm_md_json' | ||
--- | ||
|
||
## Overview | ||
|
||
While working with `Inference` class, you can also generate JSON output | ||
from the model inference. This is useful for example when you need to | ||
process the response in a structured way or when you want to store the | ||
elements of the response in a database. | ||
|
||
## Example | ||
|
||
In this example we will use emulation mode - MdJson, which tries to | ||
force the model to generate a JSON output by asking it to respond | ||
with a JSON object within a Markdown code block. | ||
|
||
This is useful for the models which do not support JSON output directly. | ||
|
||
We will also provide an example of the expected JSON output in the prompt | ||
to guide the model in generating the correct response. | ||
|
||
```php | ||
<?php | ||
$loader = require 'vendor/autoload.php'; | ||
$loader->add('Cognesy\\Instructor\\', __DIR__ . '../../src/'); | ||
|
||
use Cognesy\Instructor\Enums\Mode;use Cognesy\Instructor\Extras\LLM\Inference; | ||
|
||
// regular API, allows to customize inference options | ||
$data = (new Inference) | ||
->withConnection('openai') | ||
->create( | ||
messages: [['role' => 'user', 'content' => 'What is capital of France? \ | ||
Respond with JSON data containing name", population and year of founding. \ | ||
Example: {"name": "Berlin", "population": 3700000, "founded": 1237}']], | ||
options: ['max_tokens' => 64], | ||
mode: Mode::MdJson, | ||
) | ||
->toJson(); | ||
|
||
echo "USER: What is capital of France\n"; | ||
echo "ASSISTANT:\n"; | ||
dump($data); | ||
|
||
?> | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,76 @@ | ||
--- | ||
title: 'Working directly with LLMs and JSON - Tools mode' | ||
docname: 'llm_tools' | ||
--- | ||
|
||
## Overview | ||
|
||
While working with `Inference` class, you can also generate JSON output | ||
from the model inference. This is useful for example when you need to | ||
process the response in a structured way or when you want to store the | ||
elements of the response in a database. | ||
|
||
## Example | ||
|
||
In this example we will use OpenAI tools mode, in which model will generate | ||
a JSON containing arguments for a function call. This way we can make the | ||
model generate a JSON object with specific structure of parameters. | ||
|
||
```php | ||
<?php | ||
$loader = require 'vendor/autoload.php'; | ||
$loader->add('Cognesy\\Instructor\\', __DIR__ . '../../src/'); | ||
|
||
use Cognesy\Instructor\Enums\Mode; | ||
use Cognesy\Instructor\Extras\LLM\Inference; | ||
|
||
// regular API, allows to customize inference options | ||
$data = (new Inference) | ||
->withConnection('openai') | ||
->create( | ||
messages: [['role' => 'user', 'content' => 'What is capital of France? \ | ||
Respond with function call.']], | ||
tools: [[ | ||
'type' => 'function', | ||
'function' => [ | ||
'name' => 'extract_data', | ||
'description' => 'Extract city data', | ||
'parameters' => [ | ||
'type' => 'object', | ||
'description' => 'City information', | ||
'properties' => [ | ||
'name' => [ | ||
'type' => 'string', | ||
'description' => 'City name', | ||
], | ||
'founded' => [ | ||
'type' => 'integer', | ||
'description' => 'Founding year', | ||
], | ||
'population' => [ | ||
'type' => 'integer', | ||
'description' => 'Current population', | ||
], | ||
], | ||
'required' => ['name', 'founded', 'population'], | ||
'additionalProperties' => false, | ||
], | ||
], | ||
]], | ||
toolChoice: [ | ||
'type' => 'function', | ||
'function' => [ | ||
'name' => 'extract_data' | ||
] | ||
], | ||
options: ['max_tokens' => 64], | ||
mode: Mode::Tools, | ||
) | ||
->toJson(); | ||
|
||
echo "USER: What is capital of France\n"; | ||
echo "ASSISTANT:\n"; | ||
dump($data); | ||
|
||
?> | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
--- | ||
title: 'Working directly with LLMs and JSON - JSON mode' | ||
docname: 'llm_json' | ||
--- | ||
|
||
## Overview | ||
|
||
While working with `Inference` class, you can also generate JSON output | ||
from the model inference. This is useful for example when you need to | ||
process the response in a structured way or when you want to store the | ||
elements of the response in a database. | ||
|
||
`Inference` class supports multiple inference modes, like `Tools`, `Json` | ||
`JsonSchema` or `MdJson`, which gives you flexibility to choose the best | ||
approach for your use case. | ||
|
||
## Example | ||
|
||
In this example we will use OpenAI JSON mode, which guarantees that the | ||
response will be in a JSON format. | ||
|
||
It does not guarantee compliance with a specific schema (for some providers | ||
including OpenAI). We can try to work around it by providing an example of | ||
the expected JSON output in the prompt. | ||
|
||
> NOTE: Some model providers allow to specify a JSON schema for model to | ||
follow via `schema` parameter of `response_format`. OpenAI does not support | ||
this feature in JSON mode (only in JSON Schema mode). | ||
|
||
```php | ||
<?php | ||
$loader = require 'vendor/autoload.php'; | ||
$loader->add('Cognesy\\Instructor\\', __DIR__ . '../../src/'); | ||
|
||
use Cognesy\Instructor\Enums\Mode; | ||
use Cognesy\Instructor\Extras\LLM\Inference; | ||
|
||
// regular API, allows to customize inference options | ||
$data = (new Inference) | ||
->withConnection('openai') // optional, default is set in /config/llm.php | ||
->create( | ||
messages: [['role' => 'user', 'content' => 'What is capital of France? \ | ||
Respond with JSON data containing name", population and year of founding. \ | ||
Example: {"name": "Berlin", "population": 3700000, "founded": 1237}']], | ||
responseFormat: [ | ||
'type' => 'json_object', | ||
], | ||
options: ['max_tokens' => 64], | ||
mode: Mode::Json, | ||
) | ||
->toJson(); | ||
|
||
echo "USER: What is capital of France\n"; | ||
echo "ASSISTANT:\n"; | ||
dump($data); | ||
|
||
?> | ||
``` |
Oops, something went wrong.