Skip to content

Commit

Permalink
chore: push old and merge missing ctx fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Keyrxng committed Nov 3, 2024
1 parent bf67520 commit 9104945
Show file tree
Hide file tree
Showing 13 changed files with 138 additions and 187 deletions.
27 changes: 5 additions & 22 deletions .cspell.json
Original file line number Diff line number Diff line change
@@ -1,14 +1,7 @@
{
"$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json",
"version": "0.2",
"ignorePaths": [
"**/*.json",
"**/*.css",
"node_modules",
"**/*.log",
"./src/adapters/supabase/**/**.ts",
"/supabase/*"
],
"ignorePaths": ["**/*.json", "**/*.css", "node_modules", "**/*.log", "./src/adapters/supabase/**/**.ts", "/supabase/*"],
"useGitignore": true,
"language": "en",
"words": [
Expand Down Expand Up @@ -48,17 +41,7 @@
"mobileprovision",
"icns"
],
"dictionaries": [
"typescript",
"node",
"software-terms"
],
"import": [
"@cspell/dict-typescript/cspell-ext.json",
"@cspell/dict-node/cspell-ext.json",
"@cspell/dict-software-terms"
],
"ignoreRegExpList": [
"[0-9a-fA-F]{6}"
]
}
"dictionaries": ["typescript", "node", "software-terms"],
"import": ["@cspell/dict-typescript/cspell-ext.json", "@cspell/dict-node/cspell-ext.json", "@cspell/dict-software-terms"],
"ignoreRegExpList": ["[0-9a-fA-F]{6}"]
}
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -87,4 +87,4 @@
"@commitlint/config-conventional"
]
}
}
}
4 changes: 2 additions & 2 deletions src/adapters/openai/helpers/append-to-base-chat-history.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { createSystemMessage } from "./create-system-msg";
import { ChatHistory, CreationParams, ToolCallResponse } from "../types";

export function appendToConversation(params: CreationParams, toolCallsToAppend: ToolCallResponse[] = []): ChatHistory {
const { systemMessage, prompt, additionalContext, localContext, groundTruths, botName } = params;
const { systemMessage, query, additionalContext, localContext, groundTruths, botName } = params;
const baseChat: ChatHistory = [
{
role: "system",
Expand All @@ -18,7 +18,7 @@ export function appendToConversation(params: CreationParams, toolCallsToAppend:
content: [
{
type: "text",
text: prompt,
text: query,
},
],
},
Expand Down
87 changes: 21 additions & 66 deletions src/adapters/openai/helpers/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,9 @@ import { logger } from "../../../helpers/errors";
import { appendToConversation } from "./append-to-base-chat-history";
import { getAnswerAndTokenUsage } from "./get-answer-and-token-usage";
import { CreationParams, ResponseFromLlm, ToolCallResponse } from "../types";
import { MAX_COMPLETION_TOKENS } from "../constants";
import { CompletionsModelHelper, ModelApplications } from "../../../types/llm";
import { encode } from "gpt-tokenizer";

export interface CompletionsType {
answer: string;
groundTruths: string[];
tokenUsage: {
input: number;
output: number;
total: number;
};
}

export class Completions extends SuperOpenAi {
protected context: Context;

Expand Down Expand Up @@ -63,73 +52,38 @@ export class Completions extends SuperOpenAi {
return this.getModelMaxTokenLimit("o1-mini");
}


async createCompletion(
{
query,
model,
params: {
systemMessage: string;
query: string;
model: string;
additionalContext: string[];
localContext: string[];
groundTruths: string[];
botName: string;
maxTokens: number;
},
chatHistory?: OpenAI.Chat.Completions.ChatCompletionMessageParam[]
): Promise<ResponseFromLlm> {
const { query, model, additionalContext, localContext, groundTruths, botName, maxTokens } = params;
logger.info(`Creating completion for model: ${model} with query: ${query}`);
logger.info(`Context for completion:`, {
additionalContext,
localContext,
groundTruths,
botName,
maxTokens,
}: {
query: string,
model: string,
additionalContext: string[],
localContext: string[],
groundTruths: string[],
botName: string,
maxTokens: number
}
): Promise<CompletionsType> {
const numTokens = await this.findTokenLength(query, additionalContext, localContext, groundTruths);
logger.info(`Number of tokens: ${numTokens}`);

const sysMsg = [
"You Must obey the following ground truths: ",
JSON.stringify(groundTruths) + "\n",
"You are tasked with assisting as a GitHub bot by generating responses based on provided chat history and similar responses, focusing on using available knowledge within the provided corpus, which may contain code, documentation, or incomplete information. Your role is to interpret and use this knowledge effectively to answer user questions.\n\n# Steps\n\n1. **Understand Context**: Review the chat history and any similar provided responses to understand the context.\n2. **Extract Relevant Information**: Identify key pieces of information, even if they are incomplete, from the available corpus.\n3. **Apply Knowledge**: Use the extracted information and relevant documentation to construct an informed response.\n4. **Draft Response**: Compile the gathered insights into a coherent and concise response, ensuring it's clear and directly addresses the user's query.\n5. **Review and Refine**: Check for accuracy and completeness, filling any gaps with logical assumptions where necessary.\n\n# Output Format\n\n- Concise and coherent responses in paragraphs that directly address the user's question.\n- Incorporate inline code snippets or references from the documentation if relevant.\n\n# Examples\n\n**Example 1**\n\n*Input:*\n- Chat History: \"What was the original reason for moving the LP tokens?\"\n- Corpus Excerpts: \"It isn't clear to me if we redid the staking yet and if we should migrate. If so, perhaps we should make a new issue instead. We should investigate whether the missing LP tokens issue from the MasterChefV2.1 contract is critical to the decision of migrating or not.\"\n\n*Output:*\n\"It was due to missing LP tokens issue from the MasterChefV2.1 Contract.\n\n# Notes\n\n- Ensure the response is crafted from the corpus provided, without introducing information outside of what's available or relevant to the query.\n- Consider edge cases where the corpus might lack explicit answers, and justify responses with logical reasoning based on the existing information.",
`Your name is: ${botName}`,
"\n",
"Main Context (Provide additional precedence in terms of information): ",
localContext.join("\n"),
"Secondary Context: ",
additionalContext.join("\n"),
].join("\n");

logger.info(`System message: ${sysMsg}`);
logger.info(`Query: ${query}`);
});

const res: OpenAI.Chat.Completions.ChatCompletion = await this.client.chat.completions.create({
// tools: LLM_TOOLS, might not be a good idea to have this available for the general chatbot
model: model,
messages: [
{
role: "system",
content: [
{
type: "text",
text: sysMsg,
},
],
},
{
role: "user",
content: [
{
type: "text",
text: query,
},
],
},
],
messages: chatHistory || appendToConversation(params),
temperature: 0.2,
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
// max_COMPLETION_tokens: MAX_COMPLETION_TOKENS,

/**An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens. */
max_completion_tokens: MAX_COMPLETION_TOKENS,
max_completion_tokens: maxTokens,
top_p: 0.5,
frequency_penalty: 0,
presence_penalty: 0,
Expand All @@ -144,7 +98,7 @@ export class Completions extends SuperOpenAi {
}

async handleFunctionCalling(res: OpenAI.Chat.Completions.ChatCompletion, params: CreationParams) {
const { systemMessage, prompt, model, additionalContext, localContext, groundTruths, botName } = params;
const { systemMessage, query, model, additionalContext, localContext, groundTruths, botName, maxTokens } = params;
if (res.choices[0].finish_reason === "function_call") {
const toolCalls = res.choices[0].message.tool_calls;
const choiceMessage = res.choices[0]["message"];
Expand Down Expand Up @@ -218,12 +172,13 @@ export class Completions extends SuperOpenAi {
return await this.createCompletion(
{
systemMessage,
prompt,
query,
model,
additionalContext,
localContext,
groundTruths,
botName,
maxTokens,
},
newChat
);
Expand Down
14 changes: 5 additions & 9 deletions src/adapters/openai/helpers/create-system-msg.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
export function createSystemMessage(systemMessage: string, additionalContext: string[], localContext: string[], groundTruths: string[], botName: string) {
// safer to use array join than string concatenation
const parts = [
"You Must obey the following ground truths: [",
groundTruths.join(":"),
"]\n",
`You Must obey the following ground truths: ${JSON.stringify(groundTruths)}\n`,
systemMessage,
"Your name is : ",
botName,
"\n",
"Primary Context: ",
additionalContext.join("\n"),
"\nLocal Context: ",
`Your name is: ${botName}`,
"Main Context (Provide additional precedence in terms of information): ",
localContext.join("\n"),
"Secondary Context: ",
additionalContext.join("\n"),
];

return parts.join("\n");
Expand Down
2 changes: 1 addition & 1 deletion src/adapters/openai/helpers/prompts.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export const DEFAULT_SYSTEM_MESSAGE = `You are tasked with assisting as a GitHub bot by generating responses based on provided chat history and similar responses, focusing on using available knowledge within the provided corpus, which may contain code, documentation, or incomplete information. Your role is to interpret and use this knowledge effectively to answer user questions.
export const CHATBOT_DEFAULT_SYSTEM_MESSAGE = `You are tasked with assisting as a GitHub bot by generating responses based on provided chat history and similar responses, focusing on using available knowledge within the provided corpus, which may contain code, documentation, or incomplete information. Your role is to interpret and use this knowledge effectively to answer user questions.
# Steps
Expand Down
6 changes: 4 additions & 2 deletions src/adapters/openai/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,24 @@ export type TokenUsage = {
input: number;
output: number;
total: number;
outputDetails?: OpenAI.Completions.CompletionUsage.CompletionTokensDetails;
reasoning_tokens?: number;
};

export type ResponseFromLlm = {
answer: string;
groundTruths: string[];
tokenUsage: TokenUsage;
};

export type CreationParams = {
systemMessage: string;
prompt: string;
query: string;
model: string;
additionalContext: string[];
localContext: string[];
groundTruths: string[];
botName: string;
maxTokens: number;
};

export type ToolCallResponse = {
Expand Down
Loading

0 comments on commit 9104945

Please sign in to comment.