Merge branch 'main' into pe/oneper-pt-2

This commit is contained in:
Patrick Erichsen 2025-07-29 09:46:59 -07:00 committed by GitHub
commit 9e7eeccace
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 538 additions and 141 deletions

View File

@ -1,10 +0,0 @@
# Reference: https://github.com/marketplace/actions/auto-assign-action
addAssignees: true
assignees:
- sestinj
- Patrick-Erichsen
- tomasz-stefaniak
- RomneyDa
numberOfAssignees: 2

View File

@ -1,10 +0,0 @@
name: PR assignment
on:
pull_request:
types: [opened, ready_for_review]
jobs:
add-reviews:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/auto-assign-action@v2.0.0

View File

@ -19,7 +19,6 @@ export const sharedConfigSchema = z
readResponseTTS: z.boolean(),
promptPath: z.string(),
useCurrentFileAsContext: z.boolean(),
optInNextEditFeature: z.boolean(),
enableExperimentalTools: z.boolean(),
onlyUseSystemMessageTools: z.boolean(),
codebaseToolCallingOnly: z.boolean(),
@ -182,10 +181,7 @@ export function modifyAnyConfigWithSharedConfig<
configCopy.experimental.useCurrentFileAsContext =
sharedConfig.useCurrentFileAsContext;
}
if (sharedConfig.optInNextEditFeature !== undefined) {
configCopy.experimental.optInNextEditFeature =
sharedConfig.optInNextEditFeature;
}
if (sharedConfig.onlyUseSystemMessageTools !== undefined) {
configCopy.experimental.onlyUseSystemMessageTools =
sharedConfig.onlyUseSystemMessageTools;

View File

@ -0,0 +1,31 @@
import { ModelDescription } from "../index.d";
/**
* Determines if system message tools should be auto-enabled for the given model.
* This is used to automatically enable system message tools for certain provider/model combinations
* where we have a strong preference, taking priority over user manual settings.
*
* System message tools are an alternative to native tool calling where tool definitions and calls
* are embedded in the system message as text rather than using the provider's native tool calling API.
* This can be beneficial for providers/models that don't support native tool calling or have
* poor tool calling performance.
*
* Current auto-enable rules:
* - OpenRouter provider: true for all models except those containing "claude" (since Claude models
* generally have good native tool calling support)
* - All other providers: undefined (no auto-preference, use manual setting)
*
* @param model The model description to check
* @returns true to force enable, false to force disable, undefined for no auto-preference
*/
export function shouldAutoEnableSystemMessageTools(
model: ModelDescription,
): boolean | undefined {
// Auto-enable for OpenRouter, but exclude Claude models which have good native tool calling
if (model.provider === "openrouter") {
return !model.model.toLowerCase().includes("claude");
}
// No auto-preference for all other providers - use manual setting
return undefined;
}

View File

@ -0,0 +1,85 @@
import { describe, expect, test } from "vitest";
import { ModelDescription } from "../index.d";
import { shouldAutoEnableSystemMessageTools } from "./shouldAutoEnableSystemMessageTools";
describe("shouldAutoEnableSystemMessageTools", () => {
const createModel = (provider: string, model: string): ModelDescription => ({
title: "Test Model",
provider,
underlyingProviderName: provider,
model,
});
test("should return true for OpenRouter models that don't contain 'claude'", () => {
expect(
shouldAutoEnableSystemMessageTools(createModel("openrouter", "gpt-4o")),
).toBe(true);
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "llama-3.3-70b-instruct"),
),
).toBe(true);
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "qwen/qwen-2.5-72b-instruct"),
),
).toBe(true);
});
test("should return false for OpenRouter Claude models", () => {
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "anthropic/claude-3-sonnet"),
),
).toBe(false);
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "anthropic/claude-3.5-sonnet"),
),
).toBe(false);
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "claude-3-haiku"),
),
).toBe(false);
});
test("should return undefined for all other providers", () => {
expect(
shouldAutoEnableSystemMessageTools(createModel("openai", "gpt-4o")),
).toBe(undefined);
expect(
shouldAutoEnableSystemMessageTools(
createModel("anthropic", "claude-3.5-sonnet"),
),
).toBe(undefined);
expect(
shouldAutoEnableSystemMessageTools(
createModel("groq", "llama-3.3-70b-versatile"),
),
).toBe(undefined);
expect(
shouldAutoEnableSystemMessageTools(
createModel("together", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"),
),
).toBe(undefined);
});
test("should be case insensitive for 'claude' detection", () => {
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "CLAUDE-models"),
),
).toBe(false);
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "Claude-3.5"),
),
).toBe(false);
expect(
shouldAutoEnableSystemMessageTools(
createModel("openrouter", "some-model-Claude"),
),
).toBe(false);
});
});

View File

@ -410,6 +410,10 @@ export class Core {
await this.messenger.request("openUrl", url);
});
on("controlPlane/getEnvironment", async (msg) => {
return await getControlPlaneEnv(this.ide.getIdeSettings());
});
on("controlPlane/getFreeTrialStatus", async (msg) => {
return this.configHandler.controlPlaneClient.getFreeTrialStatus();
});

5
core/index.d.ts vendored
View File

@ -1541,11 +1541,6 @@ export interface ExperimentalConfig {
*/
useCurrentFileAsContext?: boolean;
/**
* If enabled, will enable next edit in place of autocomplete
*/
optInNextEditFeature?: boolean;
/**
* If enabled, @codebase will only use tool calling
* instead of embeddings, FTS, recently edited files, etc.

View File

@ -213,7 +213,7 @@ describe("LLM", () => {
testToolCall: true,
});
testLLM(
new OpenAI({ apiKey: process.env.OPENAI_API_KEY, model: "o1-preview" }),
new OpenAI({ apiKey: process.env.OPENAI_API_KEY, model: "o3-mini" }),
{ skip: false, timeout: 60000 },
);
testLLM(new OpenAI({ apiKey: process.env.OPENAI_API_KEY, model: "o1" }), {

View File

@ -275,7 +275,7 @@ describe("OpenAI", () => {
test("should handle O1 models correctly", async () => {
const openai = new OpenAI({
apiKey: "test-api-key",
model: "o1-preview",
model: "o3-mini",
apiBase: "https://api.openai.com/v1/",
});
@ -296,7 +296,7 @@ describe("OpenAI", () => {
"api-key": "test-api-key",
},
body: {
model: "o1-preview",
model: "o3-mini",
messages: [{ role: "user", content: "hello" }],
stream: true,
max_completion_tokens: 100,

15
core/nextEdit/utils.ts Normal file
View File

@ -0,0 +1,15 @@
type NextEditModelName = "mercury-coder-nextedit";
export function isModelCapableOfNextEdit(modelName: string): boolean {
// In test mode, we can control whether next edit is enabled via environment variable.
if (process.env.NEXT_EDIT_TEST_ENABLED === "false") {
return false;
}
if (process.env.NEXT_EDIT_TEST_ENABLED === "true") {
return true;
}
const supportedModels: NextEditModelName[] = ["mercury-coder-nextedit"];
return supportedModels.some((supported) => modelName.includes(supported));
}

View File

@ -43,7 +43,10 @@ import { AutocompleteCodeSnippet } from "../autocomplete/snippets/types";
import { GetLspDefinitionsFunction } from "../autocomplete/types";
import { ConfigHandler } from "../config/ConfigHandler";
import { SerializedOrgWithProfiles } from "../config/ProfileLifecycleManager";
import { ControlPlaneSessionInfo } from "../control-plane/AuthTypes";
import {
ControlPlaneEnv,
ControlPlaneSessionInfo,
} from "../control-plane/AuthTypes";
import { FreeTrialStatus } from "../control-plane/client";
export enum OnboardingModes {
@ -248,6 +251,7 @@ export type ToCoreFromIdeOrWebviewProtocol = {
];
"clipboardCache/add": [{ content: string }, void];
"controlPlane/openUrl": [{ path: string; orgSlug?: string }, void];
"controlPlane/getEnvironment": [undefined, ControlPlaneEnv];
"controlPlane/getFreeTrialStatus": [undefined, FreeTrialStatus | null];
"controlPlane/getModelsAddOnUpgradeUrl": [
{ vsCodeUriScheme?: string },

View File

@ -66,12 +66,13 @@ export const WEBVIEW_TO_CORE_PASS_THROUGH: (keyof ToCoreFromWebviewProtocol)[] =
"didChangeSelectedProfile",
"didChangeSelectedOrg",
"tools/call",
"controlPlane/openUrl",
"controlPlane/getEnvironment",
"controlPlane/getFreeTrialStatus",
"controlPlane/getModelsAddOnUpgradeUrl",
"controlPlane/openUrl",
"isItemTooBig",
"process/markAsBackgrounded",
"process/isBackgrounded",
"controlPlane/getFreeTrialStatus",
];
// Message types to pass through from core to webview

View File

@ -988,6 +988,14 @@
{
"source": "/agent/context-selection",
"destination": "/features/agent/context-selection"
},
{
"source": "/customization/overview#codebase-context",
"destination": "/customize/context/codebase"
},
{
"source": "/customization/overview#documentation-context",
"destination": "/customize/context/documentation"
}
]
}

View File

@ -51,8 +51,8 @@ View the exact prompt sent to the AI model in the [prompt logs](/troubleshooting
Learn more about how context providers work:
- [Codebase Context](/customization/overview#codebase-context)
- [Documentation Context](/customization/overview#documentation-context)
- [Codebase Context](/customize/context/codebase)
- [Documentation Context](/customize/context/documentation)
---

View File

@ -18,6 +18,20 @@ Its strong tool calling and reasoning capabilities make it the best model for Ag
These models have varying tool calling and reasoning capabilities.
[Kimi K2](https://hub.continue.dev/openrouter/kimi-k2) from Moonshot AI
1. Get your API key from [OpenRouter](https://openrouter.ai/settings/keys)
2. Add [Kimi K2](https://hub.continue.dev/openrouter/kimi-k2) to your assistant on Continue Hub
3. Add `OPENROUTER_API_KEY` as a [User Secret](https://docs.continue.dev/hub/secrets/secret-types#user-secrets) on Continue Hub [here](https://hub.continue.dev/settings/secrets)
4. Click `Reload config` in the assistant selector in the Continue IDE extension
[Qwen Coder 3 480B](https://hub.continue.dev/openrouter/qwen3-coder) from Qwen
1. Get your API key from [OpenRouter](https://openrouter.ai/settings/keys)
2. Add [Qwen Coder 3 480B](https://hub.continue.dev/openrouter/qwen3-coder) to your assistant on Continue Hub
3. Add `OPENROUTER_API_KEY` as a [User Secret](https://docs.continue.dev/hub/secrets/secret-types#user-secrets) on Continue Hub [here](https://hub.continue.dev/settings/secrets)
4. Click `Reload config` in the assistant selector in the Continue IDE extension
[Gemini 2.5 Pro](https://hub.continue.dev/google/gemini-2.5-pro) from Google
1. Get your API key from [Google AI Studio](https://aistudio.google.com)
@ -25,13 +39,6 @@ These models have varying tool calling and reasoning capabilities.
3. Add `GEMINI_API_KEY` as a [User Secret](https://docs.continue.dev/hub/secrets/secret-types#user-secrets) on Continue Hub [here](https://hub.continue.dev/settings/secrets)
4. Click `Reload config` in the assistant selector in the Continue IDE extension
[Kimi K2](hub.continue.dev/togetherai/kimi-k2-instruct) from Moonshot AI
1. Get your API key from [Together AI](https://api.together.ai)
2. Add [Kimi K2](hub.continue.dev/togetherai/kimi-k2-instruct) to your assistant on Continue Hub
3. Add `TOGETHER_API_KEY` as a [User Secret](https://docs.continue.dev/hub/secrets/secret-types#user-secrets) on Continue Hub [here](https://hub.continue.dev/settings/secrets)
4. Click `Reload config` in the assistant selector in the Continue IDE extension
[o3](https://hub.continue.dev/openai/o3) from OpenAI
1. Get your API key from [OpenAI](https://platform.openai.com)

View File

@ -79,9 +79,7 @@ Local blocks utilizing mustache notation for secrets (`${{ secrets.SECRET_NAME }
Blocks can be passed user inputs, including hub secrets and raw text values. To create a block that has an input, use mustache templating as follows:
Block config.yaml
```yaml
```yaml title="config.yaml"
name: myprofile/custom-model
models:
- name: My Favorite Model
@ -91,11 +89,9 @@ models:
temperature: ${{ inputs.TEMP }}
```
Which can then be imported like
Which can then be imported like this:
Assistant config.yaml
```yaml
```yaml title="config.yaml"
name: myprofile/custom-assistant
models:
- uses: myprofile/custom-model
@ -110,9 +106,7 @@ Note that hub secrets can be passed as inputs, using a similar mustache format:
Block properties can be also be directly overriden using `override`. For example:
Assistant config.yaml
```yaml
```yaml title="config.yaml"
name: myprofile/custom-assistant
models:
- uses: myprofile/custom-model
@ -149,9 +143,7 @@ The top-level properties in the `config.yaml` configuration file are:
The `name` property specifies the name of your project or configuration.
config.yaml
```yaml
```yaml title="config.yaml"
name: MyProject
```
@ -230,11 +222,9 @@ The `models` section defines the language models used in your configuration. Mod
- `key`: Path to the client certificate key file.
- `passphrase`: Optional passphrase for the client certificate key file.
#### Example
**Example:**
config.yaml
```yaml
```yaml title="config.yaml"
models:
- name: GPT-4o
provider: openai
@ -279,9 +269,7 @@ More information about usage/params for each context provider can be found [here
**Example:**
config.yaml
```yaml
```yaml title="config.yaml"
context:
- provider: file
- provider: code
@ -310,9 +298,7 @@ Explicit rules can either be simple text or an object with the following propert
- `rule` (**required**): The text content of the rule
- `globs` (optional): When files are provided as context that match this glob pattern, the rule will be included. This can be either a single pattern (e.g., `"**/*.{ts,tsx}"`) or an array of patterns (e.g., `["src/**/*.ts", "tests/**/*.ts"]`).
config.yaml
```yaml
```yaml title="config.yaml"l
rules:
- Always annotate Python functions with their parameter and return types
- name: TypeScript best practices
@ -334,9 +320,7 @@ rules:
A list of custom prompts that can be invoked from the chat window. Each prompt has a name, description, and the actual prompt text.
config.yaml
```yaml
```yaml title="config.yaml"
prompts:
- name: check
description: Check for mistakes in my code
@ -360,11 +344,9 @@ List of documentation sites to index.
- `favicon`: URL for site favicon (default is `/favicon.ico` from `startUrl`).
- `useLocalCrawling`: Skip the default crawler and only crawl using a local crawler.
Example
**Example:**
config.yaml
```yaml
```yaml title="config.yaml"
docs:
- name: Continue
startUrl: https://docs.continue.dev/intro
@ -388,9 +370,7 @@ The [Model Context Protocol](https://modelcontextprotocol.io/introduction) is a
**Example:**
config.yaml
```yaml
```yaml title="config.yaml"
mcpServers:
- name: My MCP Server
command: uvx
@ -410,27 +390,18 @@ Destinations to which [development data](/customize/overview#development-data) w
**Properties:**
- `name` (**required**): The display name of the data destination
- `destination` (**required**): The destination/endpoint that will receive the data. Can be:
- an HTTP endpoint that will receive a POST request with a JSON blob
- a file URL to a directory in which events will be dumpted to `.jsonl` files
- `schema` (**required**): the schema version of the JSON blobs to be sent. Options include `0.1.0` and `0.2.0`
- `events`: an array of event names to include. Defaults to all events if not specified.
- `level`: a pre-defined filter for event fields. Options include `all` and `noCode`; the latter excludes data like file contents, prompts, and completions. Defaults to `all`
- `apiKey`: api key to be sent with request (Bearer header)
- `requestOptions`: Options for event POST requests. Same format as [model requestOptions](#models).
**Example:**
**Example:**
config.yaml
```yaml
```yaml title="config.yaml"
data:
- name: Local Data Bank
destination: file:///Users/dallin/Documents/code/continuedev/continue-extras/external-data
@ -451,9 +422,7 @@ data:
Putting it all together, here's a complete example of a `config.yaml` configuration file:
config.yaml
```yaml
```yaml title="config.yaml"
name: MyProject
version: 0.0.1
schema: v1
@ -527,9 +496,7 @@ data:
You can also use node anchors to avoid duplication of properties. To do so, adding the YAML version header `%YAML 1.1` is needed, here's an example of a `config.yaml` configuration file using anchors:
config.yaml
```yaml
```yaml title="config.yaml"
%YAML 1.1
---
name: MyProject

View File

@ -18,7 +18,7 @@ export class NextEditActions {
);
// Check if HELLO is written into the editor.
const editorText = await editor.getTextAtLine(1);
const editorText = await editor.getTextAtLine(2);
return editorText === "HELLO";
}
@ -35,25 +35,35 @@ export class NextEditActions {
// Check if the editor text didn't change.
const editorText = await editor.getText();
return editorText === "def main():\n ";
return editorText === "def main():\n s";
}
/**
* Force a Next Edit suggestion using command.
*/
public static async forceNextEdit(editor: TextEditor): Promise<boolean> {
await editor.setText("def main():\n ");
console.log("Starting forceNextEdit...");
await editor.setText("def main():\n s");
console.log("Text set in editor");
await editor.moveCursor(2, 4);
console.log("Cursor moved to position 2, 4");
await new Workbench().executeCommand("Continue: Force Next Edit");
await TestUtils.waitForTimeout(DEFAULT_TIMEOUT.XL);
console.log("Executed 'Force Next Edit' command");
// console.log("Waiting for SVG decoration to appear...");
// await TestUtils.waitForTimeout(DEFAULT_TIMEOUT.MD);
// console.log("Wait completed, looking for decoration...");
console.log("Looking for decoration...");
const svgDecoration = await TestUtils.waitForSuccess(
() => NextEditSelectors.getSvgDecoration(VSBrowser.instance.driver),
DEFAULT_TIMEOUT.XL,
);
return svgDecoration !== null;
const result = svgDecoration !== null;
console.log("SVG decoration search result:", result);
return result;
}
public static async reload(): Promise<void> {

View File

@ -6,4 +6,5 @@ export const DEFAULT_TIMEOUT = {
MD: BASELINE,
XL: BASELINE * 5,
XXL: BASELINE * 7,
XXLP: BASELINE * 8,
};

View File

@ -1,19 +1,92 @@
import { By, WebDriver } from "vscode-extension-tester";
export class NextEditSelectors {
/**
* Get the SVG decoration element if present (for next edit).
*/
public static async getSvgDecoration(driver: WebDriver) {
try {
const decorations = await driver.findElement(
// By.xpath("//*[contains(@class, 'ced-') and matches(@class, 'ced-2-TextEditorDecorationType[0-9]+-4')]")
By.css("[class*='TextEditorDecorationType']"),
);
return decorations ?? null;
} catch (error) {
console.error("Error finding SVG decoration:", error);
return null;
}
console.log("===")
// return SelectorUtils.getElementByClassName(
// driver,
// "TextEditorDecorationType",
// );
return Promise.any(
[
await driver.findElement(
By.css("[class*='TextEditorDecorationType']")
),
await driver.findElement(
By.css("[class*=TextEditorDecorationType]")
),
await driver.findElement(
By.css("*[class*='TextEditorDecorationType']")
),
await driver.findElement(
By.css("*[class*=TextEditorDecorationType]")
),
await driver.findElement(
By.css("span[class*='TextEditorDecorationType']")
),
await driver.findElement(
By.css("span[class*=TextEditorDecorationType]")
),
await driver.findElement(
By.xpath(`//span[contains(@class, 'TextEditorDecorationType')]`)
),
await driver.findElement(
By.xpath(`//span[contains(@class, TextEditorDecorationType)]`)
),
await driver.findElement(
By.xpath(`//*[contains(@class, 'TextEditorDecorationType')]`)
),
await driver.findElement(
By.xpath(`//*[contains(@class, TextEditorDecorationType)]`)
),
]
)
// try {
// const decorations = await driver.findElement(
// // By.xpath("//*[contains(@class, 'ced-') and matches(@class, 'ced-2-TextEditorDecorationType[0-9]+-4')]")
// By.css("[class*='TextEditorDecorationType']"),
// // By.css("div[class*='TextEditorDecorationType'][style*='filter']")
// );
// if (!decorations) {
// throw new Error("SVG decoraton not found");
// }
// return decorations;
// } catch (error) {
// throw error;
// }
}
// public static async getSvgDecoration(driver: WebDriver) {
// console.log("Attempting to find SVG decoration");
// try {
// // First check if any decoration elements exist at all
// try {
// const allDecorations = await driver.findElements(By.css("[class*='TextEditorDecorationType']"));
// console.log(`Found ${allDecorations.length} elements with TextEditorDecorationType class`);
// } catch (e) {
// console.log("No decoration elements found:", e.message);
// }
// const decorations = await driver.findElement(
// By.css("[class*='TextEditorDecorationType']"),
// );
// if (!decorations) {
// console.log("SVG decoration not found (null returned)");
// throw new Error("SVG decoration not found (null returned)");
// }
// console.log("SVG decoration found");
// return decorations;
// } catch (error) {
// console.log(`Error finding SVG decoration: ${error.message}`);
// throw error;
// }
// }
}

View File

@ -1,4 +1,4 @@
import { By, WebElement, WebView } from "vscode-extension-tester";
import { By, WebDriver, WebElement, WebView } from "vscode-extension-tester";
export class SelectorUtils {
/**
@ -26,4 +26,16 @@ export class SelectorUtils {
): Promise<WebElement> {
return view.findWebElement(By.css(`[aria-label='${ariaLabel}']`));
}
public static getElementByClassName(
driver: WebDriver,
className: string
): Promise<WebElement> {
return driver.findElement(
// By.xpath("//*[contains(@class, 'ced-') and matches(@class, 'ced-2-TextEditorDecorationType[0-9]+-4')]")
By.css(`*[class*='${className}']`),
// By.xpath(`//span[contains(@class, '${className}')]`),
// By.css("div[class*='TextEditorDecorationType'][style*='filter']")
);
}
}

View File

@ -8,6 +8,10 @@ import { DEFAULT_TIMEOUT } from "../constants";
describe("Autocomplete", () => {
let editor: TextEditor;
before(async function () {
process.env.NEXT_EDIT_TEST_ENABLED = "false";
})
beforeEach(async function () {
this.timeout(DEFAULT_TIMEOUT.XL);

View File

@ -1,6 +1,6 @@
import * as fs from "fs/promises";
import { expect } from "chai";
import * as fs from "fs/promises";
import { EditorView, TextEditor } from "vscode-extension-tester";
import { GlobalActions } from "../actions/Global.actions";

View File

@ -0,0 +1,47 @@
import { expect } from "chai";
import { EditorView, TextEditor } from "vscode-extension-tester";
import { GlobalActions } from "../actions/Global.actions";
import { NextEditActions } from "../actions/NextEdit.actions";
import { DEFAULT_TIMEOUT } from "../constants";
describe("Next Edit", () => {
let editor: TextEditor;
before(async function () {
process.env.NEXT_EDIT_TEST_ENABLED = "true";
});
beforeEach(async function () {
this.timeout(DEFAULT_TIMEOUT.XL);
await GlobalActions.openTestWorkspace();
({ editor } = await GlobalActions.createAndOpenNewTextFile());
});
afterEach(async function () {
this.timeout(DEFAULT_TIMEOUT.XL);
await editor.clearText();
await new EditorView().closeAllEditors();
});
after(async function () {
process.env.NEXT_EDIT_TEST_ENABLED = "false";
});
it("Should force a Next Edit", async () => {
const hasDecoration = await NextEditActions.forceNextEdit(editor);
expect(hasDecoration).to.be.true;
}).timeout(DEFAULT_TIMEOUT.XL);
it("Should accept Next Edit suggestion with Tab", async () => {
const accepted = await NextEditActions.acceptNextEditSuggestion(editor);
expect(accepted).to.be.true;
}).timeout(DEFAULT_TIMEOUT.XL);
it("Should reject Next Edit suggestion with Esc", async () => {
const rejected = await NextEditActions.rejectNextEditSuggestion(editor);
expect(rejected).to.be.true;
}).timeout(DEFAULT_TIMEOUT.XL);
});

View File

@ -2,7 +2,7 @@
"name": "continue",
"icon": "media/icon.png",
"author": "Continue Dev, Inc",
"version": "1.1.69",
"version": "1.1.71",
"repository": {
"type": "git",
"url": "https://github.com/continuedev/continue"

View File

@ -42,6 +42,7 @@ import { VsCodeIde } from "../VsCodeIde";
import { ConfigYamlDocumentLinkProvider } from "./ConfigYamlDocumentLinkProvider";
import { VsCodeMessenger } from "./VsCodeMessenger";
import { isModelCapableOfNextEdit } from "core/nextEdit/utils";
import setupNextEditWindowManager, {
NextEditWindowManager,
} from "../activation/NextEditWindowManager";
@ -165,10 +166,13 @@ export class VsCodeExtension {
this.configHandler.onConfigUpdate(
async ({ config: newConfig, configLoadInterrupted }) => {
if (newConfig?.experimental?.optInNextEditFeature) {
const autocompleteModel = newConfig?.selectedModelByRole.autocomplete;
if (
autocompleteModel &&
isModelCapableOfNextEdit(autocompleteModel.model)
) {
// Set up next edit window manager only for Continue team members
await setupNextEditWindowManager(context);
this.activateNextEdit();
await NextEditWindowManager.freeTabAndEsc();
} else {

View File

@ -1,5 +1,5 @@
import { OnboardingModes } from "core/protocol/core";
import { useContext, useEffect } from "react";
import { useContext, useEffect, useState } from "react";
import { Outlet, useLocation, useNavigate } from "react-router-dom";
import styled from "styled-components";
import { CustomScrollbarDiv } from ".";
@ -42,12 +42,12 @@ const GridDiv = styled.div`
`;
const Layout = () => {
const [showStagingIndicator, setShowStagingIndicator] = useState(false);
const navigate = useNavigate();
const location = useLocation();
const dispatch = useAppDispatch();
const onboardingCard = useOnboardingCard();
const ideMessenger = useContext(IdeMessengerContext);
const currentSessionId = useAppSelector((state) => state.session.id);
const { mainEditor } = useMainEditor();
const dialogMessage = useAppSelector((state) => state.ui.dialogMessage);
@ -55,6 +55,17 @@ const Layout = () => {
const showDialog = useAppSelector((state) => state.ui.showDialog);
const isInEdit = useAppSelector((store) => store.session.isInEdit);
useEffect(() => {
(async () => {
const response = await ideMessenger.request(
"controlPlane/getEnvironment",
undefined,
);
response.status === "success" &&
setShowStagingIndicator(response.content.AUTH_TYPE.includes("staging"));
})();
}, []);
useWebviewListener(
"newSession",
async () => {
@ -236,6 +247,15 @@ const Layout = () => {
<LocalStorageProvider>
<AuthProvider>
<LayoutTopDiv>
{showStagingIndicator && (
<span
title="Staging environment"
className="absolute right-0 mx-1.5 h-1.5 w-1.5 rounded-full"
style={{
backgroundColor: "var(--vscode-list-warningForeground)",
}}
/>
)}
<LumpProvider>
<OSRContextMenu />
<div
@ -257,7 +277,7 @@ const Layout = () => {
message={dialogMessage}
/>
<GridDiv className="">
<GridDiv>
<PostHogPageView />
<Outlet />
<FatalErrorIndicator />

View File

@ -12,6 +12,7 @@ import {
import { capitalize } from "lodash";
import { useCallback, useEffect, useMemo } from "react";
import { useAppDispatch, useAppSelector } from "../../redux/hooks";
import { selectUseSystemMessageTools } from "../../redux/selectors/selectUseSystemMessageTools";
import { selectSelectedChatModel } from "../../redux/slices/configSlice";
import { setMode } from "../../redux/slices/sessionSlice";
import { getFontSize, getMetaKeyLabel } from "../../util";
@ -24,9 +25,8 @@ export function ModeSelect() {
const dispatch = useAppDispatch();
const mode = useAppSelector((store) => store.session.mode);
const selectedModel = useAppSelector(selectSelectedChatModel);
const useSystemTools = useAppSelector(
(state) => state.config.config.experimental?.onlyUseSystemMessageTools,
);
const useSystemTools = useAppSelector(selectUseSystemMessageTools);
const isAgentSupported = useMemo(() => {
if (!selectedModel) {
return undefined;

View File

@ -1,14 +1,10 @@
import ToggleSwitch from "../../components/gui/Switch";
interface ContinueFeaturesMenuProps {
optInNextEditFeature: boolean;
handleOptInNextEditToggle: (value: boolean) => void;
enableStaticContextualization: boolean;
handleEnableStaticContextualizationToggle: (value: boolean) => void;
}
export function ContinueFeaturesMenu({
optInNextEditFeature,
handleOptInNextEditToggle,
enableStaticContextualization,
handleEnableStaticContextualizationToggle,
}: ContinueFeaturesMenuProps) {
@ -18,12 +14,6 @@ export function ContinueFeaturesMenu({
🚧 INTERNAL SETTINGS 🚧
</div>
<div className="flex w-full flex-col gap-y-4">
<ToggleSwitch
isToggled={optInNextEditFeature}
onToggle={() => handleOptInNextEditToggle(!optInNextEditFeature)}
text="Enable Next Edit Over Autocomplete"
/>
<ToggleSwitch
isToggled={enableStaticContextualization}
onToggle={() =>

View File

@ -67,10 +67,6 @@ export function UserSettingsForm() {
});
};
const handleOptInNextEditToggle = (value: boolean) => {
handleUpdate({ optInNextEditFeature: value });
};
const handleEnableStaticContextualizationToggle = (value: boolean) => {
handleUpdate({ enableStaticContextualization: value });
};
@ -94,8 +90,6 @@ export function UserSettingsForm() {
config.experimental?.enableExperimentalTools ?? false;
const onlyUseSystemMessageTools =
config.experimental?.onlyUseSystemMessageTools ?? false;
const optInNextEditFeature =
config.experimental?.optInNextEditFeature ?? false;
const codebaseToolCallingOnly =
config.experimental?.codebaseToolCallingOnly ?? false;
const enableStaticContextualization =
@ -449,8 +443,6 @@ export function UserSettingsForm() {
{hasContinueEmail && (
<ContinueFeaturesMenu
optInNextEditFeature={optInNextEditFeature}
handleOptInNextEditToggle={handleOptInNextEditToggle}
enableStaticContextualization={
enableStaticContextualization
}

View File

@ -0,0 +1,112 @@
import { ModelDescription } from "core";
import { describe, expect, test, vi } from "vitest";
import { RootState } from "../store";
// Mock the shouldAutoEnableSystemMessageTools function
vi.mock("core/config/shouldAutoEnableSystemMessageTools", () => ({
shouldAutoEnableSystemMessageTools: vi.fn((model: ModelDescription) => {
if (model.provider === "openrouter") {
return !model.model.toLowerCase().includes("claude");
}
return undefined; // No auto-preference for other providers
}),
}));
// Mock the selectSelectedChatModel selector - we'll control its return value in tests
vi.mock("../slices/configSlice", () => ({
selectSelectedChatModel: vi.fn(),
}));
// Import after mocking
import { selectSelectedChatModel } from "../slices/configSlice";
import { selectUseSystemMessageTools } from "./selectUseSystemMessageTools";
describe("selectUseSystemMessageTools", () => {
const mockSelectSelectedChatModel = selectSelectedChatModel as any;
const createMockState = (manualSetting?: boolean): RootState => {
return {
config: {
config: {
experimental: {
onlyUseSystemMessageTools: manualSetting,
},
},
},
} as any;
};
const createModel = (provider: string, model: string): ModelDescription => ({
title: "Test Model",
provider,
underlyingProviderName: provider,
model,
});
test("auto-detection takes priority: OpenRouter non-Claude returns true even with manual false", () => {
mockSelectSelectedChatModel.mockReturnValue(
createModel("openrouter", "gpt-4"),
);
const state = createMockState(false);
expect(selectUseSystemMessageTools(state)).toBe(true);
});
test("auto-detection takes priority: OpenRouter Claude returns false even with manual true", () => {
mockSelectSelectedChatModel.mockReturnValue(
createModel("openrouter", "claude-3.5-sonnet"),
);
const state = createMockState(true);
expect(selectUseSystemMessageTools(state)).toBe(false);
});
test("falls back to manual setting when no auto-preference: manual true", () => {
mockSelectSelectedChatModel.mockReturnValue(createModel("openai", "gpt-4"));
const state = createMockState(true);
expect(selectUseSystemMessageTools(state)).toBe(true);
});
test("falls back to manual setting when no auto-preference: manual false", () => {
mockSelectSelectedChatModel.mockReturnValue(
createModel("anthropic", "claude-3.5-sonnet"),
);
const state = createMockState(false);
expect(selectUseSystemMessageTools(state)).toBe(false);
});
test("defaults to false when no auto-preference and no manual setting", () => {
mockSelectSelectedChatModel.mockReturnValue(
createModel("groq", "llama-3.3-70b"),
);
const state = createMockState(undefined);
expect(selectUseSystemMessageTools(state)).toBe(false);
});
test("handles missing selected model: uses manual setting", () => {
mockSelectSelectedChatModel.mockReturnValue(undefined);
const state = createMockState(true);
expect(selectUseSystemMessageTools(state)).toBe(true);
});
test("handles missing selected model: defaults to false when no manual setting", () => {
mockSelectSelectedChatModel.mockReturnValue(undefined);
const state = createMockState(undefined);
expect(selectUseSystemMessageTools(state)).toBe(false);
});
test("auto-enable works for various OpenRouter models", () => {
mockSelectSelectedChatModel.mockReturnValue(
createModel("openrouter", "gpt-4o"),
);
expect(selectUseSystemMessageTools(createMockState(false))).toBe(true);
mockSelectSelectedChatModel.mockReturnValue(
createModel("openrouter", "llama-3.3-70b"),
);
expect(selectUseSystemMessageTools(createMockState(false))).toBe(true);
mockSelectSelectedChatModel.mockReturnValue(
createModel("openrouter", "claude-haiku"),
);
expect(selectUseSystemMessageTools(createMockState(true))).toBe(false);
});
});

View File

@ -0,0 +1,37 @@
import { shouldAutoEnableSystemMessageTools } from "core/config/shouldAutoEnableSystemMessageTools";
import { selectSelectedChatModel } from "../slices/configSlice";
import { RootState } from "../store";
/**
* Selector that determines if system message tools should be used.
* This prioritizes auto-detection over manual configuration where we have a strong preference.
*
* Priority order:
* 1. Auto-detection (if it returns true/false)
* 2. Manual user configuration (if auto-detection returns undefined)
* 3. Default to false
*
* @param state The Redux root state
* @returns true if system message tools should be used, false otherwise
*/
export function selectUseSystemMessageTools(state: RootState): boolean {
const selectedModel = selectSelectedChatModel(state);
const manualSetting =
state.config.config.experimental?.onlyUseSystemMessageTools;
// If no model is selected, fall back to manual setting or default
if (!selectedModel) {
return manualSetting ?? false;
}
// Check auto-detection first
const autoSetting = shouldAutoEnableSystemMessageTools(selectedModel);
// If auto-detection has a preference, use it (takes priority)
if (autoSetting !== undefined) {
return autoSetting;
}
// If no auto-preference, use manual setting or default to false
return manualSetting ?? false;
}

View File

@ -4,6 +4,7 @@ import { getRuleId } from "core/llm/rules/getSystemMessageWithRules";
import { ToCoreProtocol } from "core/protocol";
import { BuiltInToolNames } from "core/tools/builtIn";
import { selectActiveTools } from "../selectors/selectActiveTools";
import { selectUseSystemMessageTools } from "../selectors/selectUseSystemMessageTools";
import { selectSelectedChatModel } from "../slices/configSlice";
import {
abortStream,
@ -141,8 +142,9 @@ export const streamNormalInput = createAsyncThunk<
const allActiveTools = selectActiveTools(state);
const activeTools = filterToolsForModel(allActiveTools, selectedChatModel);
const supportsNativeTools = modelSupportsNativeTools(selectedChatModel);
const useSystemTools =
!!state.config.config.experimental?.onlyUseSystemMessageTools;
// Use the centralized selector to determine if system message tools should be used
const useSystemTools = selectUseSystemMessageTools(state);
const useNativeTools = !useSystemTools && supportsNativeTools;
// Construct completion options