From ad12ca63b0df1093d0446d31f2341d39e8f4d895 Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Mon, 21 Oct 2024 08:47:53 -0700 Subject: [PATCH 1/6] React to lmTools API breaking changes (#12866) --- Extension/package.json | 5 +---- Extension/src/LanguageServer/lmTool.ts | 9 ++------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/Extension/package.json b/Extension/package.json index d147a3cd35..182e2ccfc6 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -6487,10 +6487,7 @@ "userDescription": "%c_cpp.languageModelTools.configuration.userDescription%", "modelDescription": "For the active C or C++ file, this tool provides: the language (C, C++, or CUDA), the language standard version (such as C++11, C++14, C++17, or C++20), the compiler (such as GCC, Clang, or MSVC), the target platform (such as x86, x64, or ARM), and the target architecture (such as 32-bit or 64-bit).", "icon": "$(file-code)", - "when": "(config.C_Cpp.experimentalFeatures =~ /^[eE]nabled$/)", - "supportedContentTypes": [ - "text/plain" - ] + "when": "(config.C_Cpp.experimentalFeatures =~ /^[eE]nabled$/)" } ] }, diff --git a/Extension/src/LanguageServer/lmTool.ts b/Extension/src/LanguageServer/lmTool.ts index 5951377b4e..ed5be61a00 100644 --- a/Extension/src/LanguageServer/lmTool.ts +++ b/Extension/src/LanguageServer/lmTool.ts @@ -44,15 +44,10 @@ const knownValues: { [Property in keyof ChatContextResult]?: { [id: string]: str } }; -const plainTextContentType = 'text/plain'; - export class CppConfigurationLanguageModelTool implements vscode.LanguageModelTool { public async invoke(options: vscode.LanguageModelToolInvocationOptions, token: vscode.CancellationToken): Promise { - const result: vscode.LanguageModelToolResult = {}; - if (options.requestedContentTypes.includes(plainTextContentType)) { - result[plainTextContentType] = await this.getContext(token); - } - return result; + return new vscode.LanguageModelToolResult([ + new vscode.LanguageModelTextPart(await this.getContext(token))]); } private async getContext(token: vscode.CancellationToken): Promise { From a61c55ca8cbf07722ddc9b067b86adb6b5958f5c Mon Sep 17 00:00:00 2001 From: Colen Garoutte-Carson <49173979+Colengms@users.noreply.github.com> Date: Mon, 21 Oct 2024 12:45:35 -0700 Subject: [PATCH 2/6] Update changelog for 1.22.10 (#12852) --- Extension/CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Extension/CHANGELOG.md b/Extension/CHANGELOG.md index 71a2ad23eb..8891897fa2 100644 --- a/Extension/CHANGELOG.md +++ b/Extension/CHANGELOG.md @@ -1,5 +1,10 @@ # C/C++ for Visual Studio Code Changelog +## Version 1.22.10: October 21, 2024 +### Bug Fixes +* Fix the 'Extract to Function' feature not working. +* Fix the 'Go to Next/Prev Preprocessor Conditional' feature not working. + ## Version 1.22.9: October 10, 2024 ### Performance Improvements * Initialization performance improvements. [#12030](https://github.com/microsoft/vscode-cpptools/issues/12030) From ddb7a5502dbd66e5cfa38209cd3e785460758851 Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Mon, 21 Oct 2024 14:02:25 -0700 Subject: [PATCH 3/6] Fix E2E tests (#12871) --- .../tests/copilotProviders.test.ts | 30 +++++++++++-------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/Extension/test/scenarios/SingleRootProject/tests/copilotProviders.test.ts b/Extension/test/scenarios/SingleRootProject/tests/copilotProviders.test.ts index c06f438f8b..54052e122d 100644 --- a/Extension/test/scenarios/SingleRootProject/tests/copilotProviders.test.ts +++ b/Extension/test/scenarios/SingleRootProject/tests/copilotProviders.test.ts @@ -22,6 +22,12 @@ describe('registerRelatedFilesProvider', () => { let callbackPromise: Promise<{ entries: vscode.Uri[]; traits?: CopilotTrait[] }> | undefined; let vscodeExtension: vscode.Extension; + const includedFiles = process.platform === 'win32' ? + ['c:\\system\\include\\vector', 'c:\\system\\include\\string', 'C:\\src\\my_project\\foo.h'] : + ['/system/include/vector', '/system/include/string', '/home/src/my_project/foo.h']; + const rootUri = vscode.Uri.file(process.platform === 'win32' ? 'C:\\src\\my_project' : '/home/src/my_project'); + const expectedInclude = process.platform === 'win32' ? 'file:///c%3A/src/my_project/foo.h' : 'file:///home/src/my_project/foo.h'; + beforeEach(() => { proxyquire.noPreserveCache(); // Tells proxyquire to not fetch the module from cache // Ensures that each test has a freshly loaded instance of moduleUnderTest @@ -105,9 +111,9 @@ describe('registerRelatedFilesProvider', () => { it('should not add #cpp traits when ChatContext isn\'t available.', async () => { arrange({ vscodeExtension: vscodeExtension, - getIncludeFiles: { includedFiles: ['c:\\system\\include\\vector', 'c:\\system\\include\\string', 'C:\\src\\my_project\\foo.h'] }, + getIncludeFiles: { includedFiles }, chatContext: undefined, - rootUri: vscode.Uri.file('C:\\src\\my_project'), + rootUri, flags: { copilotcppTraits: true } }); await moduleUnderTest.registerRelatedFilesProvider(); @@ -120,14 +126,14 @@ describe('registerRelatedFilesProvider', () => { ok(callbackPromise, 'callbackPromise should be defined'); ok(result, 'result should be defined'); ok(result.entries.length === 1, 'result.entries should have 1 included file'); - ok(result.entries[0].toString() === 'file:///c%3A/src/my_project/foo.h', 'result.entries should have "file:///c%3A/src/my_project/foo.h"'); + ok(result.entries[0].toString() === expectedInclude, `result.entries should have "${expectedInclude}"`); ok(result.traits === undefined, 'result.traits should be undefined'); }); it('should not add #cpp traits when copilotcppTraits flag is false.', async () => { arrange({ vscodeExtension: vscodeExtension, - getIncludeFiles: { includedFiles: ['c:\\system\\include\\vector', 'c:\\system\\include\\string', 'C:\\src\\my_project\\foo.h'] }, + getIncludeFiles: { includedFiles }, chatContext: { language: 'c++', standardVersion: 'c++20', @@ -135,7 +141,7 @@ describe('registerRelatedFilesProvider', () => { targetPlatform: 'windows', targetArchitecture: 'x64' }, - rootUri: vscode.Uri.file('C:\\src\\my_project'), + rootUri, flags: { copilotcppTraits: false } }); await moduleUnderTest.registerRelatedFilesProvider(); @@ -148,14 +154,14 @@ describe('registerRelatedFilesProvider', () => { ok(callbackPromise, 'callbackPromise should be defined'); ok(result, 'result should be defined'); ok(result.entries.length === 1, 'result.entries should have 1 included file'); - ok(result.entries[0].toString() === 'file:///c%3A/src/my_project/foo.h', 'result.entries should have "file:///c%3A/src/my_project/foo.h"'); + ok(result.entries[0].toString() === expectedInclude, `result.entries should have "${expectedInclude}"`); ok(result.traits === undefined, 'result.traits should be undefined'); }); it('should add #cpp traits when copilotcppTraits flag is true.', async () => { arrange({ vscodeExtension: vscodeExtension, - getIncludeFiles: { includedFiles: ['c:\\system\\include\\vector', 'c:\\system\\include\\string', 'C:\\src\\my_project\\foo.h'] }, + getIncludeFiles: { includedFiles }, chatContext: { language: 'c++', standardVersion: 'c++20', @@ -163,7 +169,7 @@ describe('registerRelatedFilesProvider', () => { targetPlatform: 'windows', targetArchitecture: 'x64' }, - rootUri: vscode.Uri.file('C:\\src\\my_project'), + rootUri, flags: { copilotcppTraits: true } }); await moduleUnderTest.registerRelatedFilesProvider(); @@ -177,7 +183,7 @@ describe('registerRelatedFilesProvider', () => { ok(callbackPromise, 'callbackPromise should be defined'); ok(result, 'result should be defined'); ok(result.entries.length === 1, 'result.entries should have 1 included file'); - ok(result.entries[0].toString() === 'file:///c%3A/src/my_project/foo.h', 'result.entries should have "file:///c%3A/src/my_project/foo.h"'); + ok(result.entries[0].toString() === expectedInclude, `result.entries should have "${expectedInclude}"`); ok(result.traits, 'result.traits should be defined'); ok(result.traits.length === 5, 'result.traits should have 5 traits'); ok(result.traits[0].name === 'language', 'result.traits[0].name should be "language"'); @@ -206,7 +212,7 @@ describe('registerRelatedFilesProvider', () => { const excludeTraits = ['compiler', 'targetPlatform']; arrange({ vscodeExtension: vscodeExtension, - getIncludeFiles: { includedFiles: ['c:\\system\\include\\vector', 'c:\\system\\include\\string', 'C:\\src\\my_project\\foo.h'] }, + getIncludeFiles: { includedFiles }, chatContext: { language: 'c++', standardVersion: 'c++20', @@ -214,7 +220,7 @@ describe('registerRelatedFilesProvider', () => { targetPlatform: 'windows', targetArchitecture: 'x64' }, - rootUri: vscode.Uri.file('C:\\src\\my_project'), + rootUri, flags: { copilotcppTraits: true, copilotcppExcludeTraits: excludeTraits } }); await moduleUnderTest.registerRelatedFilesProvider(); @@ -228,7 +234,7 @@ describe('registerRelatedFilesProvider', () => { ok(callbackPromise, 'callbackPromise should be defined'); ok(result, 'result should be defined'); ok(result.entries.length === 1, 'result.entries should have 1 included file'); - ok(result.entries[0].toString() === 'file:///c%3A/src/my_project/foo.h', 'result.entries should have "file:///c%3A/src/my_project/foo.h"'); + ok(result.entries[0].toString() === expectedInclude, `result.entries should have "${expectedInclude}"`); ok(result.traits, 'result.traits should be defined'); ok(result.traits.length === 3, 'result.traits should have 3 traits'); ok(result.traits.filter(trait => excludeTraits.includes(trait.name)).length === 0, 'result.traits should not include excluded traits'); From a248d107da06b0d1fd3fc24cdd3172c3ce2c8c4d Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Mon, 21 Oct 2024 15:20:07 -0700 Subject: [PATCH 4/6] Check-in lmTools API to avoid build breaks (#12872) --- Extension/.gitignore | 4 + Extension/package.json | 2 +- Extension/vscode.proposed.lmTools.d.ts | 407 +++++++++++++++++++++++++ 3 files changed, 412 insertions(+), 1 deletion(-) create mode 100644 Extension/vscode.proposed.lmTools.d.ts diff --git a/Extension/.gitignore b/Extension/.gitignore index 1adad30d07..06e718e788 100644 --- a/Extension/.gitignore +++ b/Extension/.gitignore @@ -35,3 +35,7 @@ src/nativeStrings.ts vscode*.d.ts .scripts/_* + +# The lmTools API is still changing frequently. We want to avoid spontaneous +# build breaks just because the upstream API changed in VS Code Insiders. +!vscode.proposed.lmTools.d.ts diff --git a/Extension/package.json b/Extension/package.json index 182e2ccfc6..bdc0895c0b 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -6512,7 +6512,7 @@ "translations-generate": "set NODE_OPTIONS=--no-experimental-fetch && gulp translations-generate", "translations-import": "gulp translations-import", "import-edge-strings": "ts-node -T ./.scripts/import_edge_strings.ts", - "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main)", + "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main && git checkout -- vscode.proposed.lmTools.d.ts)", "build": "yarn prep:dts && echo [Building TypeScript code] && tsc --build tsconfig.json" }, "devDependencies": { diff --git a/Extension/vscode.proposed.lmTools.d.ts b/Extension/vscode.proposed.lmTools.d.ts new file mode 100644 index 0000000000..2b40bd7b0b --- /dev/null +++ b/Extension/vscode.proposed.lmTools.d.ts @@ -0,0 +1,407 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for license information. + *--------------------------------------------------------------------------------------------*/ + +// version: 10 +// https://github.com/microsoft/vscode/issues/213274 + +declare module 'vscode' { + + export namespace lm { + /** + * Register a LanguageModelTool. The tool must also be registered in the package.json `languageModelTools` contribution + * point. A registered tool is available in the {@link lm.tools} list for any extension to see. But in order for it to + * be seen by a language model, it must be passed in the list of available tools in {@link LanguageModelChatRequestOptions.tools}. + */ + export function registerTool(name: string, tool: LanguageModelTool): Disposable; + + /** + * A list of all available tools that were registered by all extensions using {@link lm.registerTool}. They can be called + * with {@link lm.invokeTool} with a set of parameters that match their declared `parametersSchema`. + */ + export const tools: readonly LanguageModelToolInformation[]; + + /** + * Invoke a tool listed in {@link lm.tools} by name with the given parameters. + * + * The caller must pass a {@link LanguageModelToolInvocationOptions.toolInvocationToken}, which comes from + * {@link ChatRequest.toolInvocationToken} when the tool is being invoked by a by a {@link ChatParticipant}, and + * associates the invocation to a chat session. + * + * The tool will return a {@link LanguageModelToolResult} which contains an array of {@link LanguageModelTextPart} and + * {@link LanguageModelPromptTsxPart}. If the tool caller is using `@vscode/prompt-tsx`, it can incorporate the response + * parts into its prompt using a `ToolResult`. If not, the parts can be passed along to the {@link LanguageModelChat} via + * a User message with a {@link LanguageModelToolResultPart}. + * + * If a chat participant wants to preserve tool results for requests across multiple turns, it can store tool results in + * the {@link ChatResult.metadata} returned from the handler and retrieve them on the next turn from + * {@link ChatResponseTurn.result}. + */ + export function invokeTool(name: string, options: LanguageModelToolInvocationOptions, token: CancellationToken): Thenable; + } + + /** + * A tool that is available to the language model via {@link LanguageModelChatRequestOptions}. A language model uses all the + * properties of this interface to decide which tool to call, and how to call it. + */ + export interface LanguageModelChatTool { + /** + * The name of the tool. + */ + name: string; + + /** + * The description of the tool. + */ + description: string; + + /** + * A JSON schema for the parameters this tool accepts. + */ + parametersSchema?: object; + } + + /** + * A tool-calling mode for the language model to use. + */ + export enum LanguageModelChatToolMode { + /** + * The language model can choose to call a tool or generate a message. Is the default. + */ + Auto = 1, + + /** + * The language model must call one of the provided tools. Note- some models only support a single tool when using this + * mode. TODO@API - do we throw, or just pick the first tool? Or only offer an API that allows callers to pick a single + * tool? Go back to `toolChoice?: string`? + */ + Required = 2 + } + + export interface LanguageModelChatRequestOptions { + + /** + * An optional list of tools that are available to the language model. These could be registered tools available via + * {@link lm.tools}, or private tools that are just implemented within the calling extension. + * + * If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in + * {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool + * registered in {@link lm.tools}, that means calling {@link lm.invokeTool}. + * + * Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a + * {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}. + */ + tools?: LanguageModelChatTool[]; + + /** + * The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default. + */ + toolMode?: LanguageModelChatToolMode; + } + + /** + * A language model response part indicating a tool call, returned from a {@link LanguageModelChatResponse}, and also can be + * included as a content part on a {@link LanguageModelChatMessage}, to represent a previous tool call in a chat request. + */ + export class LanguageModelToolCallPart { + /** + * The name of the tool to call. + */ + name: string; + + /** + * The ID of the tool call. This is a unique identifier for the tool call within the chat request. + */ + callId: string; + + /** + * The parameters with which to call the tool. + */ + parameters: object; + + /** + * Create a new LanguageModelToolCallPart. + */ + constructor(name: string, callId: string, parameters: object); + } + + /** + * A language model response part containing a piece of text, returned from a {@link LanguageModelChatResponse}. + */ + export class LanguageModelTextPart { + /** + * The text content of the part. + */ + value: string; + + /** + * Construct a text part with the given content. + * @param value The text content of the part. + */ + constructor(value: string); + } + + /** + * A language model response part containing a PromptElementJSON from `@vscode/prompt-tsx`. + * @see {@link LanguageModelToolResult} + */ + export class LanguageModelPromptTsxPart { + /** + * The value of the part. + */ + value: unknown; + + /** + * The mimeType of this part, exported from the `@vscode/prompt-tsx` library. + */ + mime: string; + + /** + * Construct a prompt-tsx part with the given content. + * @param value The value of the part, the result of `renderPromptElementJSON` from `@vscode/prompt-tsx`. + * @param mime The mimeType of the part, exported from `@vscode/prompt-tsx` as `contentType`. + */ + constructor(value: unknown, mime: string); + } + + export interface LanguageModelChatResponse { + /** + * A stream of parts that make up the response. Could be extended with more types in the future. A + * {@link LanguageModelTextPart} is part of the assistant's response to be shown to the user. A + * {@link LanguageModelToolCallPart} is a request from the language model to call a tool. + */ + stream: AsyncIterable; + } + + /** + * The result of a tool call. Can only be included in the content of a User message. + */ + export class LanguageModelToolResultPart { + /** + * The ID of the tool call. + */ + callId: string; + + /** + * The value of the tool result. + */ + content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; + + /** + * @param callId The ID of the tool call. + * @param content The content of the tool result. + */ + constructor(callId: string, content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); + } + + export interface LanguageModelChatMessage { + /** + * A heterogeneous array of other things that a message can contain as content. Some parts may be message-type specific + * for some models. + */ + content2: (string | LanguageModelToolResultPart | LanguageModelToolCallPart)[]; + } + + /** + * A result returned from a tool invocation. If using `@vscode/prompt-tsx`, this result may be rendered using a `ToolResult`. + */ + export class LanguageModelToolResult { + /** + * A list of tool result content parts. Includes `unknown` becauses this list may be extended with new content types in + * the future. + * @see {@link lm.invokeTool}. + */ + content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; + + /** + * Create a LanguageModelToolResult + * @param content A list of tool result content parts + */ + constructor(content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); + } + + /** + * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. + */ + export type ChatParticipantToolToken = unknown; + + /** + * Options provided for tool invocation. + */ + export interface LanguageModelToolInvocationOptions { + /** + * When this tool is being invoked by a {@link ChatParticipant} within the context of a chat request, this token should be + * passed from {@link ChatRequest.toolInvocationToken}. In that case, a progress bar will be automatically shown for the + * tool invocation in the chat response view, and if the tool requires user confirmation, it will show up inline in the + * chat view. If the tool is being invoked outside of a chat request, `undefined` should be passed instead. + * + * If a tool invokes another tool during its invocation, it can pass along the `toolInvocationToken` that it received. + */ + toolInvocationToken: ChatParticipantToolToken | undefined; + + /** + * The parameters with which to invoke the tool. The parameters must match the schema defined in + * {@link LanguageModelToolInformation.parametersSchema} + */ + parameters: T; + + /** + * Options to hint at how many tokens the tool should return in its response, and enable the tool to count tokens + * accurately. + */ + tokenizationOptions?: LanguageModelToolTokenizationOptions; + } + + /** + * Options related to tokenization for a tool invocation. + */ + export interface LanguageModelToolTokenizationOptions { + /** + * If known, the maximum number of tokens the tool should emit in its result. + */ + tokenBudget: number; + + /** + * Count the number of tokens in a message using the model specific tokenizer-logic. + * @param text A string. + * @param token Optional cancellation token. See {@link CancellationTokenSource} for how to create one. + * @returns A thenable that resolves to the number of tokens. + */ + countTokens(text: string, token?: CancellationToken): Thenable; + } + + /** + * Information about a registered tool available in {@link lm.tools}. + */ + export interface LanguageModelToolInformation { + /** + * A unique name for the tool. + */ + readonly name: string; + + /** + * A description of this tool that may be passed to a language model. + */ + readonly description: string; + + /** + * A JSON schema for the parameters this tool accepts. + */ + readonly parametersSchema: object | undefined; + + /** + * A set of tags, declared by the tool, that roughly describe the tool's capabilities. A tool user may use these to filter + * the set of tools to just ones that are relevant for the task at hand. + */ + readonly tags: readonly string[]; + } + + /** + * When this is returned in {@link PreparedToolInvocation}, the user will be asked to confirm before running the tool. These + * messages will be shown with buttons that say "Continue" and "Cancel". + */ + export interface LanguageModelToolConfirmationMessages { + /** + * The title of the confirmation message. + */ + title: string; + + /** + * The body of the confirmation message. + */ + message: string | MarkdownString; + } + + /** + * Options for {@link LanguageModelTool.prepareInvocation}. + */ + export interface LanguageModelToolInvocationPrepareOptions { + /** + * The parameters that the tool is being invoked with. + */ + parameters: T; + } + + /** + * A tool that can be invoked by a call to a {@link LanguageModelChat}. + */ + export interface LanguageModelTool { + /** + * Invoke the tool with the given parameters and return a result. + * + * The provided {@link LanguageModelToolInvocationOptions.parameters} are currently not validated against the declared + * schema, but will be in the future. + */ + invoke(options: LanguageModelToolInvocationOptions, token: CancellationToken): ProviderResult; + + /** + * Called once before a tool is invoked. It's recommended to implement this to customize the progress message that appears + * while the tool is running, and to provide a more useful message with context from the invocation parameters. Can also + * signal that a tool needs user confirmation before running, if appropriate. Must be free of side-effects. A call to + * `prepareInvocation` is not necessarily followed by a call to `invoke`. + */ + prepareInvocation?(options: LanguageModelToolInvocationPrepareOptions, token: CancellationToken): ProviderResult; + } + + /** + * The result of a call to {@link LanguageModelTool.prepareInvocation}. + */ + export interface PreparedToolInvocation { + /** + * A customized progress message to show while the tool runs. + */ + invocationMessage?: string; + + /** + * The presence of this property indicates that the user should be asked to confirm before running the tool. The user + * should be asked for confirmation for any tool that has a side-effect or may potentially be dangerous. + */ + confirmationMessages?: LanguageModelToolConfirmationMessages; + } + + /** + * A reference to a tool that the user manually attached to their request, either using the `#`-syntax inline, or as an + * attachment via the paperclip button. + */ + export interface ChatLanguageModelToolReference { + /** + * The tool name. Refers to a tool listed in {@link lm.tools}. + */ + readonly name: string; + + /** + * The start and end index of the reference in the {@link ChatRequest.prompt prompt}. When undefined, the reference was + * not part of the prompt text. + * + * *Note* that the indices take the leading `#`-character into account which means they can be used to modify the prompt + * as-is. + */ + readonly range?: [start: number, end: number]; + } + + export interface ChatRequest { + /** + * The list of tools that the user attached to their request. + * + * When a tool reference is present, the chat participant should make a chat request using + * {@link LanguageModelChatToolMode.Required} to force the language model to generate parameters for the tool. Then, the + * participant can use {@link lm.invokeTool} to use the tool attach the result to its request for the user's prompt. The + * tool may contribute useful extra context for the user's request. + */ + readonly toolReferences: readonly ChatLanguageModelToolReference[]; + + /** + * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. + * This associates the tool invocation to a chat session. + */ + readonly toolInvocationToken: ChatParticipantToolToken; + } + + export interface ChatRequestTurn { + /** + * The list of tools were attached to this request. + */ + readonly toolReferences?: readonly ChatLanguageModelToolReference[]; + } +} From 1f7f2f77adb457ef2d986cd99c1c977d6b2f2cb9 Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Tue, 22 Oct 2024 09:08:31 -0700 Subject: [PATCH 5/6] Switch to stable lmTools API (#12874) * Revert "Check-in lmTools API to avoid build breaks (#12872)" This reverts commit a248d107da06b0d1fd3fc24cdd3172c3ce2c8c4d. * Switch to stable lmTools API --- Extension/.gitignore | 4 - Extension/package.json | 5 +- Extension/vscode.proposed.lmTools.d.ts | 407 ------------------------- 3 files changed, 2 insertions(+), 414 deletions(-) delete mode 100644 Extension/vscode.proposed.lmTools.d.ts diff --git a/Extension/.gitignore b/Extension/.gitignore index 06e718e788..1adad30d07 100644 --- a/Extension/.gitignore +++ b/Extension/.gitignore @@ -35,7 +35,3 @@ src/nativeStrings.ts vscode*.d.ts .scripts/_* - -# The lmTools API is still changing frequently. We want to avoid spontaneous -# build breaks just because the upstream API changed in VS Code Insiders. -!vscode.proposed.lmTools.d.ts diff --git a/Extension/package.json b/Extension/package.json index bdc0895c0b..cad8ec768c 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -38,8 +38,7 @@ "Snippets" ], "enabledApiProposals": [ - "terminalDataWriteEvent", - "lmTools" + "terminalDataWriteEvent" ], "capabilities": { "untrustedWorkspaces": { @@ -6512,7 +6511,7 @@ "translations-generate": "set NODE_OPTIONS=--no-experimental-fetch && gulp translations-generate", "translations-import": "gulp translations-import", "import-edge-strings": "ts-node -T ./.scripts/import_edge_strings.ts", - "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main && git checkout -- vscode.proposed.lmTools.d.ts)", + "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main)", "build": "yarn prep:dts && echo [Building TypeScript code] && tsc --build tsconfig.json" }, "devDependencies": { diff --git a/Extension/vscode.proposed.lmTools.d.ts b/Extension/vscode.proposed.lmTools.d.ts deleted file mode 100644 index 2b40bd7b0b..0000000000 --- a/Extension/vscode.proposed.lmTools.d.ts +++ /dev/null @@ -1,407 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -// version: 10 -// https://github.com/microsoft/vscode/issues/213274 - -declare module 'vscode' { - - export namespace lm { - /** - * Register a LanguageModelTool. The tool must also be registered in the package.json `languageModelTools` contribution - * point. A registered tool is available in the {@link lm.tools} list for any extension to see. But in order for it to - * be seen by a language model, it must be passed in the list of available tools in {@link LanguageModelChatRequestOptions.tools}. - */ - export function registerTool(name: string, tool: LanguageModelTool): Disposable; - - /** - * A list of all available tools that were registered by all extensions using {@link lm.registerTool}. They can be called - * with {@link lm.invokeTool} with a set of parameters that match their declared `parametersSchema`. - */ - export const tools: readonly LanguageModelToolInformation[]; - - /** - * Invoke a tool listed in {@link lm.tools} by name with the given parameters. - * - * The caller must pass a {@link LanguageModelToolInvocationOptions.toolInvocationToken}, which comes from - * {@link ChatRequest.toolInvocationToken} when the tool is being invoked by a by a {@link ChatParticipant}, and - * associates the invocation to a chat session. - * - * The tool will return a {@link LanguageModelToolResult} which contains an array of {@link LanguageModelTextPart} and - * {@link LanguageModelPromptTsxPart}. If the tool caller is using `@vscode/prompt-tsx`, it can incorporate the response - * parts into its prompt using a `ToolResult`. If not, the parts can be passed along to the {@link LanguageModelChat} via - * a User message with a {@link LanguageModelToolResultPart}. - * - * If a chat participant wants to preserve tool results for requests across multiple turns, it can store tool results in - * the {@link ChatResult.metadata} returned from the handler and retrieve them on the next turn from - * {@link ChatResponseTurn.result}. - */ - export function invokeTool(name: string, options: LanguageModelToolInvocationOptions, token: CancellationToken): Thenable; - } - - /** - * A tool that is available to the language model via {@link LanguageModelChatRequestOptions}. A language model uses all the - * properties of this interface to decide which tool to call, and how to call it. - */ - export interface LanguageModelChatTool { - /** - * The name of the tool. - */ - name: string; - - /** - * The description of the tool. - */ - description: string; - - /** - * A JSON schema for the parameters this tool accepts. - */ - parametersSchema?: object; - } - - /** - * A tool-calling mode for the language model to use. - */ - export enum LanguageModelChatToolMode { - /** - * The language model can choose to call a tool or generate a message. Is the default. - */ - Auto = 1, - - /** - * The language model must call one of the provided tools. Note- some models only support a single tool when using this - * mode. TODO@API - do we throw, or just pick the first tool? Or only offer an API that allows callers to pick a single - * tool? Go back to `toolChoice?: string`? - */ - Required = 2 - } - - export interface LanguageModelChatRequestOptions { - - /** - * An optional list of tools that are available to the language model. These could be registered tools available via - * {@link lm.tools}, or private tools that are just implemented within the calling extension. - * - * If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in - * {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool - * registered in {@link lm.tools}, that means calling {@link lm.invokeTool}. - * - * Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a - * {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}. - */ - tools?: LanguageModelChatTool[]; - - /** - * The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default. - */ - toolMode?: LanguageModelChatToolMode; - } - - /** - * A language model response part indicating a tool call, returned from a {@link LanguageModelChatResponse}, and also can be - * included as a content part on a {@link LanguageModelChatMessage}, to represent a previous tool call in a chat request. - */ - export class LanguageModelToolCallPart { - /** - * The name of the tool to call. - */ - name: string; - - /** - * The ID of the tool call. This is a unique identifier for the tool call within the chat request. - */ - callId: string; - - /** - * The parameters with which to call the tool. - */ - parameters: object; - - /** - * Create a new LanguageModelToolCallPart. - */ - constructor(name: string, callId: string, parameters: object); - } - - /** - * A language model response part containing a piece of text, returned from a {@link LanguageModelChatResponse}. - */ - export class LanguageModelTextPart { - /** - * The text content of the part. - */ - value: string; - - /** - * Construct a text part with the given content. - * @param value The text content of the part. - */ - constructor(value: string); - } - - /** - * A language model response part containing a PromptElementJSON from `@vscode/prompt-tsx`. - * @see {@link LanguageModelToolResult} - */ - export class LanguageModelPromptTsxPart { - /** - * The value of the part. - */ - value: unknown; - - /** - * The mimeType of this part, exported from the `@vscode/prompt-tsx` library. - */ - mime: string; - - /** - * Construct a prompt-tsx part with the given content. - * @param value The value of the part, the result of `renderPromptElementJSON` from `@vscode/prompt-tsx`. - * @param mime The mimeType of the part, exported from `@vscode/prompt-tsx` as `contentType`. - */ - constructor(value: unknown, mime: string); - } - - export interface LanguageModelChatResponse { - /** - * A stream of parts that make up the response. Could be extended with more types in the future. A - * {@link LanguageModelTextPart} is part of the assistant's response to be shown to the user. A - * {@link LanguageModelToolCallPart} is a request from the language model to call a tool. - */ - stream: AsyncIterable; - } - - /** - * The result of a tool call. Can only be included in the content of a User message. - */ - export class LanguageModelToolResultPart { - /** - * The ID of the tool call. - */ - callId: string; - - /** - * The value of the tool result. - */ - content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; - - /** - * @param callId The ID of the tool call. - * @param content The content of the tool result. - */ - constructor(callId: string, content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); - } - - export interface LanguageModelChatMessage { - /** - * A heterogeneous array of other things that a message can contain as content. Some parts may be message-type specific - * for some models. - */ - content2: (string | LanguageModelToolResultPart | LanguageModelToolCallPart)[]; - } - - /** - * A result returned from a tool invocation. If using `@vscode/prompt-tsx`, this result may be rendered using a `ToolResult`. - */ - export class LanguageModelToolResult { - /** - * A list of tool result content parts. Includes `unknown` becauses this list may be extended with new content types in - * the future. - * @see {@link lm.invokeTool}. - */ - content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; - - /** - * Create a LanguageModelToolResult - * @param content A list of tool result content parts - */ - constructor(content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); - } - - /** - * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. - */ - export type ChatParticipantToolToken = unknown; - - /** - * Options provided for tool invocation. - */ - export interface LanguageModelToolInvocationOptions { - /** - * When this tool is being invoked by a {@link ChatParticipant} within the context of a chat request, this token should be - * passed from {@link ChatRequest.toolInvocationToken}. In that case, a progress bar will be automatically shown for the - * tool invocation in the chat response view, and if the tool requires user confirmation, it will show up inline in the - * chat view. If the tool is being invoked outside of a chat request, `undefined` should be passed instead. - * - * If a tool invokes another tool during its invocation, it can pass along the `toolInvocationToken` that it received. - */ - toolInvocationToken: ChatParticipantToolToken | undefined; - - /** - * The parameters with which to invoke the tool. The parameters must match the schema defined in - * {@link LanguageModelToolInformation.parametersSchema} - */ - parameters: T; - - /** - * Options to hint at how many tokens the tool should return in its response, and enable the tool to count tokens - * accurately. - */ - tokenizationOptions?: LanguageModelToolTokenizationOptions; - } - - /** - * Options related to tokenization for a tool invocation. - */ - export interface LanguageModelToolTokenizationOptions { - /** - * If known, the maximum number of tokens the tool should emit in its result. - */ - tokenBudget: number; - - /** - * Count the number of tokens in a message using the model specific tokenizer-logic. - * @param text A string. - * @param token Optional cancellation token. See {@link CancellationTokenSource} for how to create one. - * @returns A thenable that resolves to the number of tokens. - */ - countTokens(text: string, token?: CancellationToken): Thenable; - } - - /** - * Information about a registered tool available in {@link lm.tools}. - */ - export interface LanguageModelToolInformation { - /** - * A unique name for the tool. - */ - readonly name: string; - - /** - * A description of this tool that may be passed to a language model. - */ - readonly description: string; - - /** - * A JSON schema for the parameters this tool accepts. - */ - readonly parametersSchema: object | undefined; - - /** - * A set of tags, declared by the tool, that roughly describe the tool's capabilities. A tool user may use these to filter - * the set of tools to just ones that are relevant for the task at hand. - */ - readonly tags: readonly string[]; - } - - /** - * When this is returned in {@link PreparedToolInvocation}, the user will be asked to confirm before running the tool. These - * messages will be shown with buttons that say "Continue" and "Cancel". - */ - export interface LanguageModelToolConfirmationMessages { - /** - * The title of the confirmation message. - */ - title: string; - - /** - * The body of the confirmation message. - */ - message: string | MarkdownString; - } - - /** - * Options for {@link LanguageModelTool.prepareInvocation}. - */ - export interface LanguageModelToolInvocationPrepareOptions { - /** - * The parameters that the tool is being invoked with. - */ - parameters: T; - } - - /** - * A tool that can be invoked by a call to a {@link LanguageModelChat}. - */ - export interface LanguageModelTool { - /** - * Invoke the tool with the given parameters and return a result. - * - * The provided {@link LanguageModelToolInvocationOptions.parameters} are currently not validated against the declared - * schema, but will be in the future. - */ - invoke(options: LanguageModelToolInvocationOptions, token: CancellationToken): ProviderResult; - - /** - * Called once before a tool is invoked. It's recommended to implement this to customize the progress message that appears - * while the tool is running, and to provide a more useful message with context from the invocation parameters. Can also - * signal that a tool needs user confirmation before running, if appropriate. Must be free of side-effects. A call to - * `prepareInvocation` is not necessarily followed by a call to `invoke`. - */ - prepareInvocation?(options: LanguageModelToolInvocationPrepareOptions, token: CancellationToken): ProviderResult; - } - - /** - * The result of a call to {@link LanguageModelTool.prepareInvocation}. - */ - export interface PreparedToolInvocation { - /** - * A customized progress message to show while the tool runs. - */ - invocationMessage?: string; - - /** - * The presence of this property indicates that the user should be asked to confirm before running the tool. The user - * should be asked for confirmation for any tool that has a side-effect or may potentially be dangerous. - */ - confirmationMessages?: LanguageModelToolConfirmationMessages; - } - - /** - * A reference to a tool that the user manually attached to their request, either using the `#`-syntax inline, or as an - * attachment via the paperclip button. - */ - export interface ChatLanguageModelToolReference { - /** - * The tool name. Refers to a tool listed in {@link lm.tools}. - */ - readonly name: string; - - /** - * The start and end index of the reference in the {@link ChatRequest.prompt prompt}. When undefined, the reference was - * not part of the prompt text. - * - * *Note* that the indices take the leading `#`-character into account which means they can be used to modify the prompt - * as-is. - */ - readonly range?: [start: number, end: number]; - } - - export interface ChatRequest { - /** - * The list of tools that the user attached to their request. - * - * When a tool reference is present, the chat participant should make a chat request using - * {@link LanguageModelChatToolMode.Required} to force the language model to generate parameters for the tool. Then, the - * participant can use {@link lm.invokeTool} to use the tool attach the result to its request for the user's prompt. The - * tool may contribute useful extra context for the user's request. - */ - readonly toolReferences: readonly ChatLanguageModelToolReference[]; - - /** - * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. - * This associates the tool invocation to a chat session. - */ - readonly toolInvocationToken: ChatParticipantToolToken; - } - - export interface ChatRequestTurn { - /** - * The list of tools were attached to this request. - */ - readonly toolReferences?: readonly ChatLanguageModelToolReference[]; - } -} From e58e96369f7bc3e0fb6dfb95a689cd107cf0fc2b Mon Sep 17 00:00:00 2001 From: Colen Garoutte-Carson <49173979+Colengms@users.noreply.github.com> Date: Tue, 22 Oct 2024 15:31:38 -0700 Subject: [PATCH 6/6] Prevent redundant progressive squiggle updates (#12876) --- Extension/src/LanguageServer/client.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Extension/src/LanguageServer/client.ts b/Extension/src/LanguageServer/client.ts index 04dcaba701..7d4d81d5ad 100644 --- a/Extension/src/LanguageServer/client.ts +++ b/Extension/src/LanguageServer/client.ts @@ -2387,7 +2387,9 @@ export class DefaultClient implements Client { } this.updateInactiveRegions(intelliSenseResult.uri, intelliSenseResult.inactiveRegions, intelliSenseResult.clearExistingInactiveRegions, intelliSenseResult.isCompletePass); - this.updateSquiggles(intelliSenseResult.uri, intelliSenseResult.diagnostics, intelliSenseResult.clearExistingDiagnostics); + if (intelliSenseResult.clearExistingDiagnostics || intelliSenseResult.diagnostics.length > 0) { + this.updateSquiggles(intelliSenseResult.uri, intelliSenseResult.diagnostics, intelliSenseResult.clearExistingDiagnostics); + } } private updateSquiggles(uriString: string, diagnostics: IntelliSenseDiagnostic[], startNewSet: boolean): void {