Skip to content

Commit c6e095d

Browse files
committed
feat: add simple chat gpt action
1 parent cca8837 commit c6e095d

File tree

12 files changed

+426
-25
lines changed

12 files changed

+426
-25
lines changed
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
name: Simple Chat GPT
2+
run-name: Action started by ${{ github.actor }}
3+
4+
on:
5+
workflow_dispatch:
6+
inputs:
7+
prompt:
8+
description: 'Prompt'
9+
required: true
10+
type: string
11+
12+
jobs:
13+
simple_chat_gpt:
14+
name: Generate message
15+
runs-on: ubuntu-latest
16+
17+
outputs:
18+
message: ${{ steps.chat.outputs.message }}
19+
20+
steps:
21+
- uses: actions/checkout@v4
22+
23+
- uses: ./simple-chat-gpt
24+
id: chat
25+
with:
26+
prompt: ${{ github.event.inputs.prompt }}
27+
openai_key: ${{ secrets.OPENAI_KEY }}
28+
29+
- name: Print result message
30+
run: |
31+
echo ${{ steps.chat.outputs.message }}
32+
echo ${{ steps.chat.outputs.message }} >> $GITHUB_STEP_SUMMARY
33+
34+
print_message_from_other_job:
35+
name: Print message
36+
runs-on: ubuntu-latest
37+
needs: simple_chat_gpt
38+
steps:
39+
- name: Print message from other job
40+
run: echo ${{ needs.simple_chat_gpt.outputs.message }}

generate-enhanced-notes/README.md

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
## Generate Enhanced Notes using OpenAI GPT-3.5
1+
## Generate Enhanced Notes using OpenAI GPT
22

33
First create a secret in your repository called `OPENAI_KEY` with your OpenAI API key.
44
https://platform.openai.com/account/api-keys
@@ -61,13 +61,11 @@ jobs:
6161
---
6262
### Outputs
6363

64-
#### enhanced_notes
65-
The enhanced release notes.
66-
6764
#### release_notes
6865
The original release notes.
6966

70-
67+
#### enhanced_notes
68+
The enhanced release notes.
7169

7270
## TODO
7371
- [ ] Add support for multiple languages

generate-enhanced-notes/generate_enhanced_notes.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ const github = require('@actions/github');
99
const Logger = require('../utils/logger.js');
1010

1111
const GitHubService = require('../services/github_service.js');
12-
const GptService = require('../services/gpt_service.js');
12+
const GptService = require('../services/gpt/gpt_service.js');
1313

1414
class GenerateEnhancedNotes {
1515

package-lock.json

Lines changed: 55 additions & 16 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@
2121
"author": "Emanuel Braz",
2222
"license": "MIT",
2323
"dependencies": {
24-
"@actions/core": "1.2.6",
24+
"@actions/core": "^1.10.1",
2525
"@actions/github": "^4.0.0",
2626
"axios": "^1.5.1",
2727
"changelog-parser": "2.8.0",
28-
"openai": "^4.10.0"
28+
"openai": "^4.11.0"
2929
},
3030
"devDependencies": {
3131
"conventional-changelog-cli": "2.2.2"
Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
class ChatCompletionParams {
2+
3+
constructor({
4+
messages,
5+
model,
6+
frequency_penalty,
7+
function_call,
8+
functions,
9+
logit_bias,
10+
max_tokens,
11+
n,
12+
presence_penalty,
13+
stop,
14+
stream,
15+
temperature,
16+
top_p,
17+
user
18+
}) {
19+
this.messages = messages;
20+
this.model = model;
21+
this.frequency_penalty = frequency_penalty;
22+
this.function_call = function_call;
23+
this.functions = functions;
24+
this.logit_bias = logit_bias;
25+
this.max_tokens = max_tokens;
26+
this.n = n;
27+
this.presence_penalty = presence_penalty;
28+
this.stop = stop;
29+
this.stream = stream;
30+
this.temperature = temperature;
31+
this.top_p = top_p;
32+
this.user = user;
33+
}
34+
35+
/**
36+
* A list of messages comprising the conversation so far.
37+
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
38+
*/
39+
messages; // List
40+
41+
/**
42+
* ID of the model to use. See the
43+
* [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
44+
* table for details on which models work with the Chat API.
45+
*(string & {})
46+
'gpt-4'
47+
'gpt-4-0314'
48+
'gpt-4-0613'
49+
'gpt-4-32k'
50+
'gpt-4-32k-0314'
51+
'gpt-4-32k-0613'
52+
'gpt-3.5-turbo'
53+
'gpt-3.5-turbo-16k'
54+
'gpt-3.5-turbo-0301'
55+
'gpt-3.5-turbo-0613'
56+
'gpt-3.5-turbo-16k-0613';
57+
*/
58+
model;
59+
60+
/**
61+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
62+
* existing frequency in the text so far, decreasing the model's likelihood to
63+
* repeat the same line verbatim.
64+
*
65+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
66+
*/
67+
frequency_penalty; // number | null
68+
69+
/**
70+
* Controls how the model responds to function calls. `none` means the model does
71+
* not call a function, and responds to the end-user. `auto` means the model can
72+
* pick between an end-user or calling a function. Specifying a particular function
73+
* via `{"name": "my_function"}` forces the model to call that function. `none` is
74+
* the default when no functions are present. `auto` is the default if functions
75+
* are present.
76+
*/
77+
function_call; // 'none' | 'auto' | ChatCompletionCreateParams.FunctionCallOption
78+
79+
/**
80+
* A list of functions the model may generate JSON inputs for.
81+
*/
82+
functions; // Array<ChatCompletionCreateParams.Function>
83+
84+
/**
85+
* Modify the likelihood of specified tokens appearing in the completion.
86+
*
87+
* Accepts a json object that maps tokens (specified by their token ID in the
88+
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the
89+
* bias is added to the logits generated by the model prior to sampling. The exact
90+
* effect will vary per model, but values between -1 and 1 should decrease or
91+
* increase likelihood of selection; values like -100 or 100 should result in a ban
92+
* or exclusive selection of the relevant token.
93+
*/
94+
logit_bias; //Record<string, number> | null
95+
96+
/**
97+
* The maximum number of [tokens](/tokenizer) to generate in the chat completion.
98+
*
99+
* The total length of input tokens and generated tokens is limited by the model's
100+
* context length.
101+
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
102+
* for counting tokens.
103+
*/
104+
max_tokens; // number | null
105+
106+
/**
107+
* How many chat completion choices to generate for each input message.
108+
*/
109+
n; // number | null
110+
111+
/**
112+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on
113+
* whether they appear in the text so far, increasing the model's likelihood to
114+
* talk about new topics.
115+
*
116+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
117+
*/
118+
presence_penalty; // number | null
119+
120+
/**
121+
* Up to 4 sequences where the API will stop generating further tokens.
122+
*/
123+
stop; // string | null | Array<string>
124+
125+
/**
126+
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
127+
* sent as data-only
128+
* [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
129+
* as they become available, with the stream terminated by a `data: [DONE]`
130+
* message.
131+
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
132+
*/
133+
stream; // boolean | null
134+
135+
/**
136+
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
137+
* make the output more random, while lower values like 0.2 will make it more
138+
* focused and deterministic.
139+
*
140+
* We generally recommend altering this or `top_p` but not both.
141+
*/
142+
temperature; // number | null
143+
144+
/**
145+
* An alternative to sampling with temperature, called nucleus sampling, where the
146+
* model considers the results of the tokens with top_p probability mass. So 0.1
147+
* means only the tokens comprising the top 10% probability mass are considered.
148+
*
149+
* We generally recommend altering this or `temperature` but not both.
150+
*/
151+
top_p; // number | null
152+
153+
/**
154+
* A unique identifier representing your end-user, which can help OpenAI to monitor
155+
* and detect abuse.
156+
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
157+
*/
158+
user; // string
159+
}
160+
161+
module.exports = ChatCompletionParams;
Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,25 @@ const openai = require('openai');
99
class GptService {
1010

1111
constructor(apiKey) {
12-
this.apiKey = apiKey;
1312
console.log('Initializing OpenAI client...');
13+
14+
this.apiKey = apiKey;
1415
this.openaiClient = new openai.OpenAI({ apiKey: this.apiKey });
1516
}
1617

18+
async chatCompletions(chatCompletionParams) {
19+
20+
try {
21+
console.log('Generating chat completions...');
22+
23+
const response = await this.openaiClient.chat.completions.create(chatCompletionParams);
24+
return response;
25+
} catch (error) {
26+
console.error('[chatCompletions]', error);
27+
throw error;
28+
}
29+
}
30+
1731
async generateReleaseNotes(pullRequestTitles, prompt, maxTokens, model) {
1832
try {
1933

0 commit comments

Comments
 (0)