Merge branch 'main' into nate/prettier-ci

This commit is contained in:
Nate 2025-06-02 13:03:36 -07:00
commit 02e4fa6e45
185 changed files with 6613 additions and 2490 deletions

View File

@ -0,0 +1,13 @@
name: bigger-picture-description-rules
version: 0.0.1
schema: v1
rules:
- name: bigger-picture-description-rules
rule: >-
When a user asks how a certain part of the code works:
1. Describe what the code does in isolation.
2. If the code interacts with other parts of the codebase, describe how the code is imported and used in other parts of the codebase.
3. When describing each use-case, include the parent function for clarity.

View File

@ -0,0 +1,27 @@
name: GUI
version: 0.0.1
schema: v1
rules:
- name: Extension Color Themes
rule: |
When adding colors to components, use tailwind color classes.
Do NOT use explicit colors like text-gray-400. Instead, use theme colors.
The common available theme colors are:
- For normal text: foreground, description, description-muted
- For other text, icons, etc: success, warning, error, accent, link
- For general components background, border, border-focus
- For specific components:
- Button: primary, primary-foreground, primary-hover, secondary, secondary-foreground, secondary-hover
- Input: input, input-foreground, input-border, input-placeholder
- Badge: badge, badge-foreground
- List/Dropdown items: list-hover, list-active, list-active-foreground
- Code Editor: editor, editor-foreground
Any of these colors can be used in tailwind e.g. bg-primary, text-success, border-error, hover:bg-list-hover, etc.
globs: "gui/**/*.tsx"
# Excluding the following less-used colors:
# Command (only used by tip-tap): command, command-foreground, command-border, command-border-focus
# Find widget colors: find-match, find-match-selected
# table-oddRow

View File

@ -0,0 +1,9 @@
---
globs: /**/*.
description: General questions about code completion should be answered specific to Continue
---
# Continue Specificity
- In chat mode, if the user asks generally about code completion or developer tools, answer specifically regarding Continue and not other similar software.
- Keep all suggestions and comments concentrated on Continue, unless the user asks otherwise. If the user does this, answer with no particular specificity to Continue.

View File

@ -0,0 +1,16 @@
---
globs: docs/**/*.{md,mdx}
description: Standards for writing and maintaining Continue Docs
---
# Continue Docs Standards
- Follow Docusaurus documentation standards
- Include YAML frontmatter with title, description, and keywords
- Use consistent heading hierarchy starting with h2 (##)
- Include relevant Admonition components for tips, warnings, and info
- Use descriptive alt text for images
- Include cross-references to related documentation
- Reference other docs with relative paths
- Keep paragraphs concise and scannable
- Use code blocks with appropriate language tags

View File

@ -0,0 +1,11 @@
---
globs: core/llm/llms/**/*.{ts,test.ts}
description: Tailor recommendations for LLM code based on which specific LLM is being used.
---
# LLM Model Specificity
- Refer to the file name and names of big classes to determine which LLM is being used in a file.
- Ground all observations and recommendations with knowledge of that LLM.
- Consider items such as context length, architecture, speed, and such.
- Pay attention to the parent classes in these files.

75
.github/workflows/metrics.yaml vendored Normal file
View File

@ -0,0 +1,75 @@
name: Monthly issue metrics
on:
workflow_dispatch:
schedule:
# Runs every Monday at 9:00 AM PST (17:00 UTC)
- cron: "0 17 * * 1"
push:
branches:
- nate/metrics-action
permissions:
contents: read
jobs:
build:
name: issue metrics
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: read
steps:
- name: Get dates for last month
shell: bash
run: |
# Calculate the first day of the previous month
first_day=$(date -d "last month" +%Y-%m-01)
# Calculate the last day of the previous month
last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d)
#Set an environment variable with the date range
echo "$first_day..$last_day"
echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV"
- name: Run issue-metrics tool
uses: github/issue-metrics@v3
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SEARCH_QUERY: 'repo:continuedev/continue created:${{ env.last_month }} -reason:"not planned"'
- name: Read metrics file content
id: read-metrics
run: |
content=$(cat ./issue_metrics.md)
# This prepares the content in a way that can be used in GitHub Actions
content="${content//'%'/'%25'}"
content="${content//$'\n'/'%0A'}"
# content="${content//$'\r'/'%0D'}"
echo "metrics_content=$content" >> "$GITHUB_OUTPUT"
- name: Post a message in a channel
uses: slackapi/slack-github-action@v2.1.0
with:
webhook: ${{ secrets.ISSUE_PR_METRICS_SLACK_WEBHOOK_URL }}
webhook-type: incoming-webhook
payload: |
text: "Issue / PR Metrics Report"
blocks:
- type: "header"
text:
type: "plain_text"
text: "Monthly Issue Metrics Report"
emoji: true
- type: "section"
text:
type: "mrkdwn"
text: |
${{ steps.read-metrics.outputs.metrics_content }}
- name: Upload metrics report as artifact
uses: actions/upload-artifact@v4
with:
name: issue-metrics-report
path: ./issue_metrics.md

View File

@ -426,6 +426,7 @@ jobs:
run: |
cd core
npm test
npm run vitest
env:
IGNORE_API_KEY_TESTS: ${{ github.event.pull_request.head.repo.fork == true || github.actor == 'dependabot[bot]' }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

1
.gitignore vendored
View File

@ -144,6 +144,7 @@ Icon?
.continuerc.json
.aider*
*.notes.md
notes.md
manual-testing-sandbox/.idea/**

View File

@ -45,6 +45,7 @@
"extensions/.continue-debug": true
// "sync/**": true
},
"editor.formatOnSave": true,
"eslint.workingDirectories": ["./core"],
"typescript.tsdk": "node_modules/typescript/lib",
"conventionalCommits.showNewVersionNotes": false,

View File

@ -22,6 +22,7 @@
- [Our Git Workflow](#our-git-workflow)
- [Development workflow](#development-workflow)
- [Formatting](#formatting)
- [Theme Colors](#theme-colors)
- [Testing](#testing)
- [Review Process](#review-process)
- [Getting help](#getting-help)
@ -192,6 +193,22 @@ To keep the Continue codebase clean and maintainable, we expect the following fr
Continue uses [Prettier](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) to format
JavaScript/TypeScript. Please install the Prettier extension in VS Code and enable "Format on Save" in your settings.
### Theme Colors
Continue has a set of named theme colors that we map to extension colors and tailwind classes, which can be found in [gui/src/styles/theme.ts](gui/src/styles/theme.ts)
Guidelines for using theme colors:
- Use Tailwind colors whenever possible. If developing in VS Code, download the [Tailwind CSS Intellisense extension](https://marketplace.visualstudio.com/items?itemName=bradlc.vscode-tailwindcss) for great suggestions
- Avoid using any explicit classes and CSS variables outside the theme (e.g. `text-yellow-400`)
Guidelines for adding/updating theme colors:
- Choose sensible VS Code variables to add/update in [gui/src/styles/theme.ts](gui/src/styles/theme.ts) (see [here](https://code.visualstudio.com/api/references/theme-color) and [here](https://www.notion.so/1fa1d55165f78097b551e3bc296fcf76?pvs=25) for inspiration)
- Choose sensible Jetbrains named colors to add/update in `GetTheme.kt` (flagship LLMs can give you good suggestions to try)
- Update `tailwind.config.js` if needed
- Use the Theme Test Page to check colors. This can be accessed by going to `Settings` -> `Help` -> `Theme Test Page` in dev/debug mode.
### Testing
We have a mix of unit, functional, and e2e test suites, with a primary focus on functional testing. These tests run on

View File

@ -52,10 +52,10 @@
"@aws-sdk/credential-providers": "^3.778.0",
"@continuedev/config-types": "^1.0.13",
"@continuedev/config-yaml": "file:../packages/config-yaml",
"@continuedev/fetch": "^1.0.6",
"@continuedev/fetch": "^1.0.10",
"@continuedev/llm-info": "^1.0.8",
"@continuedev/openai-adapters": "^1.0.19",
"@modelcontextprotocol/sdk": "^1.5.0",
"@continuedev/openai-adapters": "^1.0.25",
"@modelcontextprotocol/sdk": "^1.12.0",
"@mozilla/readability": "^0.5.0",
"@octokit/rest": "^20.1.1",
"@typescript-eslint/eslint-plugin": "^7.8.0",

View File

@ -226,6 +226,7 @@ describe("Test Suite", () => {
title: "Test Model",
provider: "openai",
model: "gpt-3.5-turbo",
underlyingProviderName: "openai",
};
await messenger.request("config/addModel", {
model,
@ -252,6 +253,7 @@ describe("Test Suite", () => {
title: "Test Model",
provider: "mock",
model: "gpt-3.5-turbo",
underlyingProviderName: "mock",
};
await messenger.request("config/addModel", {
model,

View File

@ -1,5 +1,4 @@
import { ConfigHandler } from "../config/ConfigHandler.js";
import { TRIAL_FIM_MODEL } from "../config/onboarding.js";
import { IDE, ILLM } from "../index.js";
import OpenAI from "../llm/llms/OpenAI.js";
import { DEFAULT_AUTOCOMPLETE_OPTS } from "../util/parameters.js";
@ -75,11 +74,6 @@ export class CompletionProvider {
if (llm instanceof OpenAI) {
llm.useLegacyCompletionsEndpoint = true;
} else if (
llm.providerName === "free-trial" &&
llm.model !== TRIAL_FIM_MODEL
) {
llm.model = TRIAL_FIM_MODEL;
}
return llm;
@ -244,7 +238,7 @@ export class CompletionProvider {
prefix,
suffix,
prompt,
modelProvider: llm.providerName,
modelProvider: llm.underlyingProviderName,
modelName: llm.model,
completionOptions,
cacheHit,

View File

@ -3,6 +3,7 @@ import { findUriInDirs } from "../../util/uri";
import { ContextRetrievalService } from "../context/ContextRetrievalService";
import { GetLspDefinitionsFunction } from "../types";
import { HelperVars } from "../util/HelperVars";
import { getDiffsFromCache } from "./gitDiffCache";
import {
AutocompleteClipboardSnippet,
@ -23,35 +24,14 @@ export interface SnippetPayload {
clipboardSnippets: AutocompleteClipboardSnippet[];
}
function racePromise<T>(promise: Promise<T[]>): Promise<T[]> {
function racePromise<T>(promise: Promise<T[]>, timeout = 100): Promise<T[]> {
const timeoutPromise = new Promise<T[]>((resolve) => {
setTimeout(() => resolve([]), 100);
setTimeout(() => resolve([]), timeout);
});
return Promise.race([promise, timeoutPromise]);
}
class DiffSnippetsCache {
private cache: Map<number, any> = new Map();
private lastTimestamp: number = 0;
public set<T>(timestamp: number, value: T): T {
// Clear old cache entry if exists
if (this.lastTimestamp !== timestamp) {
this.cache.clear();
}
this.lastTimestamp = timestamp;
this.cache.set(timestamp, value);
return value;
}
public get(timestamp: number): any | undefined {
return this.cache.get(timestamp);
}
}
const diffSnippetsCache = new DiffSnippetsCache();
// Some IDEs might have special ways of finding snippets (e.g. JetBrains and VS Code have different "LSP-equivalent" systems,
// or they might separately track recently edited ranges)
async function getIdeSnippets(
@ -113,35 +93,14 @@ const getClipboardSnippets = async (
const getDiffSnippets = async (
ide: IDE,
): Promise<AutocompleteDiffSnippet[]> => {
const currentTimestamp = ide.getLastFileSaveTimestamp
? ide.getLastFileSaveTimestamp()
: Math.floor(Date.now() / 10000) * 10000; // Defaults to update once in every 10 seconds
const diffs = await getDiffsFromCache(ide);
// Check cache first
const cached = diffSnippetsCache.get(
currentTimestamp,
) as AutocompleteDiffSnippet[];
if (cached) {
return cached;
}
let diff: string[] = [];
try {
diff = await ide.getDiff(true);
} catch (e) {
console.error("Error getting diff for autocomplete", e);
}
return diffSnippetsCache.set(
currentTimestamp,
diff.map((item) => {
return {
content: item,
type: AutocompleteSnippetType.Diff,
};
}),
);
return diffs.map((item) => {
return {
content: item,
type: AutocompleteSnippetType.Diff,
};
});
};
export const getAllSnippets = async ({
@ -172,7 +131,7 @@ export const getAllSnippets = async ({
IDE_SNIPPETS_ENABLED
? racePromise(getIdeSnippets(helper, ide, getDefinitionsFromLsp))
: [],
racePromise(getDiffSnippets(ide)),
[], // racePromise(getDiffSnippets(ide)) // temporarily disabled, see https://github.com/continuedev/continue/pull/5882,
racePromise(getClipboardSnippets(ide)),
]);

View File

@ -0,0 +1,84 @@
import { GitDiffCache } from "./gitDiffCache";
beforeEach(() => {
// Add this line to GitDiffCache class to make instance accessible
(GitDiffCache as any).instance = null;
});
test("GitDiffCache returns cached results within cache time", async () => {
const mockDiff = ["file1.ts", "file2.ts"];
const getDiffFn = jest.fn().mockResolvedValue(mockDiff);
const cache = GitDiffCache.getInstance(getDiffFn, 1); // 1 second cache
const result1 = await cache.get();
const result2 = await cache.get();
expect(result1).toEqual(mockDiff);
expect(result2).toEqual(mockDiff);
expect(getDiffFn).toHaveBeenCalledTimes(1);
});
test("GitDiffCache refreshes cache after expiration", async () => {
const mockDiff = ["file1.ts"];
const getDiffFn = jest.fn().mockResolvedValue(mockDiff);
const cache = GitDiffCache.getInstance(getDiffFn, 0.1); // 100ms cache
const result1 = await cache.get();
await new Promise((resolve) => setTimeout(resolve, 200)); // Wait for cache to expire
const result2 = await cache.get();
expect(getDiffFn).toHaveBeenCalledTimes(2);
});
test("GitDiffCache returns empty array on error", async () => {
const getDiffFn = jest.fn().mockRejectedValue(new Error("Git error"));
const cache = GitDiffCache.getInstance(getDiffFn);
const result = await cache.get();
expect(result).toEqual([]);
});
test("GitDiffCache reuses pending request", async () => {
const mockDiff = ["file1.ts"];
let resolvePromise: (value: string[]) => void;
const getDiffFn = jest.fn().mockImplementation(() => {
return new Promise((resolve) => {
resolvePromise = resolve;
});
});
const cache = GitDiffCache.getInstance(getDiffFn);
const promise1 = cache.get();
const promise2 = cache.get();
resolvePromise!(mockDiff);
const [result1, result2] = await Promise.all([promise1, promise2]);
expect(result1).toEqual(mockDiff);
expect(result2).toEqual(mockDiff);
expect(getDiffFn).toHaveBeenCalledTimes(1);
});
test("GitDiffCache invalidate clears cache", async () => {
const mockDiff = ["file1.ts"];
const getDiffFn = jest.fn().mockResolvedValue(mockDiff);
const cache = GitDiffCache.getInstance(getDiffFn);
await cache.get();
cache.invalidate();
await cache.get();
expect(getDiffFn).toHaveBeenCalledTimes(2);
});
test("GitDiffCache maintains singleton instance", () => {
const getDiffFn1 = jest.fn();
const getDiffFn2 = jest.fn();
const instance1 = GitDiffCache.getInstance(getDiffFn1);
const instance2 = GitDiffCache.getInstance(getDiffFn2);
expect(instance1).toBe(instance2);
});

View File

@ -0,0 +1,73 @@
import { IDE } from "../..";
type GetDiffFn = () => Promise<string[]>;
export class GitDiffCache {
private static instance: GitDiffCache | null = null;
private cachedDiff: string[] | undefined = undefined;
private lastFetchTime: number = 0;
private pendingRequest: Promise<string[]> | null = null;
private getDiffFn: GetDiffFn;
private cacheTimeMs: number;
private constructor(getDiffFn: GetDiffFn, cacheTimeSeconds: number = 60) {
this.getDiffFn = getDiffFn;
this.cacheTimeMs = cacheTimeSeconds * 1000;
}
public static getInstance(
getDiffFn: GetDiffFn,
cacheTimeSeconds?: number,
): GitDiffCache {
if (!GitDiffCache.instance) {
GitDiffCache.instance = new GitDiffCache(getDiffFn, cacheTimeSeconds);
}
return GitDiffCache.instance;
}
private async getDiffPromise(): Promise<string[]> {
try {
const diff = await this.getDiffFn();
this.cachedDiff = diff;
this.lastFetchTime = Date.now();
return this.cachedDiff;
} catch (e) {
console.error("Error fetching git diff:", e);
return [];
} finally {
this.pendingRequest = null;
}
}
public async get(): Promise<string[]> {
if (
this.cachedDiff !== undefined &&
Date.now() - this.lastFetchTime < this.cacheTimeMs
) {
return this.cachedDiff;
}
// If there's already a request in progress, return that instead of starting a new one
if (this.pendingRequest) {
return this.pendingRequest;
}
this.pendingRequest = this.getDiffPromise();
return this.pendingRequest;
}
public invalidate(): void {
this.cachedDiff = undefined;
this.pendingRequest = null;
}
}
// factory to make diff cache more testable
export function getDiffFn(ide: IDE): GetDiffFn {
return () => ide.getDiff(true);
}
export async function getDiffsFromCache(ide: IDE): Promise<string[]> {
const diffCache = GitDiffCache.getInstance(getDiffFn(ide));
return await diffCache.get();
}

View File

@ -43,7 +43,6 @@ export class HelperVars {
if (this._fileContents !== undefined) {
return;
}
this.workspaceUris = await this.ide.getWorkspaceDirs();
this._fileContents =

View File

@ -46,12 +46,10 @@ import { useHub } from "../control-plane/env";
import { BaseLLM } from "../llm";
import { LLMClasses, llmFromDescription } from "../llm/llms";
import CustomLLMClass from "../llm/llms/CustomLLM";
import FreeTrial from "../llm/llms/FreeTrial";
import { LLMReranker } from "../llm/llms/llm";
import TransformersJsEmbeddingsProvider from "../llm/llms/TransformersJsEmbeddingsProvider";
import { slashCommandFromPromptFileV1 } from "../promptFiles/v1/slashCommandFromPromptFile";
import { getAllPromptFiles } from "../promptFiles/v2/getPromptFiles";
import { allTools } from "../tools";
import { copyOf } from "../util";
import { GlobalContext } from "../util/GlobalContext";
import mergeJson from "../util/merge";
@ -67,6 +65,7 @@ import {
} from "../util/paths";
import { localPathToUri } from "../util/pathToUri";
import { baseToolDefinitions } from "../tools";
import { modifyAnyConfigWithSharedConfig } from "./sharedConfig";
import {
getModelByRole,
@ -244,7 +243,6 @@ async function intermediateToFinalConfig({
llmLogger,
workOsAccessToken,
loadPromptFiles = true,
allowFreeTrial = true,
}: {
config: Config;
ide: IDE;
@ -254,7 +252,6 @@ async function intermediateToFinalConfig({
llmLogger: ILLMLogger;
workOsAccessToken: string | undefined;
loadPromptFiles?: boolean;
allowFreeTrial?: boolean;
}): Promise<{ config: ContinueConfig; errors: ConfigValidationError[] }> {
const errors: ConfigValidationError[] = [];
@ -343,56 +340,43 @@ async function intermediateToFinalConfig({
"summarize",
]); // Default to chat role if not specified
if (allowFreeTrial) {
// Obtain auth token (iff free trial being used)
const freeTrialModels = models.filter(
(model) => model.providerName === "free-trial",
);
if (freeTrialModels.length > 0) {
const ghAuthToken = await ide.getGitHubAuthToken({});
for (const model of freeTrialModels) {
(model as FreeTrial).setupGhAuthToken(ghAuthToken);
}
}
} else {
// Remove free trial models
models = models.filter((model) => model.providerName !== "free-trial");
// Free trial provider will be completely ignored
let warnAboutFreeTrial = false;
models = models.filter((model) => model.providerName !== "free-trial");
if (models.filter((m) => m.providerName === "free-trial").length) {
warnAboutFreeTrial = true;
}
// Tab autocomplete model
let tabAutocompleteModels: BaseLLM[] = [];
const tabAutocompleteModels: BaseLLM[] = [];
if (config.tabAutocompleteModel) {
tabAutocompleteModels = (
await Promise.all(
(Array.isArray(config.tabAutocompleteModel)
? config.tabAutocompleteModel
: [config.tabAutocompleteModel]
).map(async (desc) => {
if ("title" in desc) {
const llm = await llmFromDescription(
desc,
ide.readFile.bind(ide),
uniqueId,
ideSettings,
llmLogger,
config.completionOptions,
);
const autocompleteConfigs = Array.isArray(config.tabAutocompleteModel)
? config.tabAutocompleteModel
: [config.tabAutocompleteModel];
if (llm?.providerName === "free-trial") {
if (!allowFreeTrial) {
// This shouldn't happen
throw new Error("Free trial cannot be used with control plane");
}
const ghAuthToken = await ide.getGitHubAuthToken({});
(llm as FreeTrial).setupGhAuthToken(ghAuthToken);
await Promise.all(
autocompleteConfigs.map(async (desc) => {
if ("title" in desc) {
const llm = await llmFromDescription(
desc,
ide.readFile.bind(ide),
uniqueId,
ideSettings,
llmLogger,
config.completionOptions,
);
if (llm) {
if (llm.providerName === "free-trial") {
warnAboutFreeTrial = true;
} else {
tabAutocompleteModels.push(llm);
}
return llm;
} else {
return new CustomLLMClass(desc);
}
}),
)
).filter((x) => x !== undefined) as BaseLLM[];
} else {
tabAutocompleteModels.push(new CustomLLMClass(desc));
}
}),
);
}
applyRequestOptionsToModels(tabAutocompleteModels, config);
@ -458,7 +442,10 @@ async function intermediateToFinalConfig({
return embedConfig;
}
const { provider, ...options } = embedConfig;
if (provider === "transformers.js") {
if (provider === "transformers.js" || provider === "free-trial") {
if (provider === "free-trial") {
warnAboutFreeTrial = true;
}
return new TransformersJsEmbeddingsProvider();
} else {
const cls = LLMClasses.find((c) => c.providerName === provider);
@ -495,7 +482,10 @@ async function intermediateToFinalConfig({
return rerankingConfig;
}
const { name, params } = config.reranker as RerankerDescription;
if (name === "free-trial") {
warnAboutFreeTrial = true;
return null;
}
if (name === "llm") {
const llm = models.find((model) => model.title === params?.modelTitle);
if (!llm) {
@ -526,10 +516,18 @@ async function intermediateToFinalConfig({
}
const newReranker = getRerankingILLM(config.reranker);
if (warnAboutFreeTrial) {
errors.push({
fatal: false,
message:
"Model provider 'free-trial' is no longer supported, will be ignored",
});
}
const continueConfig: ContinueConfig = {
...config,
contextProviders,
tools: [...allTools],
tools: [...baseToolDefinitions],
mcpServerStatuses: [],
slashCommands: config.slashCommands ?? [],
modelsByRole: {
@ -627,6 +625,7 @@ async function intermediateToFinalConfig({
function llmToSerializedModelDescription(llm: ILLM): ModelDescription {
return {
provider: llm.providerName,
underlyingProviderName: llm.underlyingProviderName,
model: llm.model,
title: llm.title ?? llm.model,
apiKey: llm.apiKey,

View File

@ -171,4 +171,22 @@ This is the content of the rule.`;
"This is a rule description from frontmatter",
);
});
it("should include `alwaysApply` from frontmatter", () => {
const content = `---
globs: "**/test/**/*.kt"
name: Test Rule
alwaysApply: false
---
# Test Rule
This is a rule with alwaysApply explicitly set to false.`;
const result = convertMarkdownRuleToContinueRule(
"/path/to/rule.md",
content,
);
expect(result.alwaysApply).toBe(false);
});
});

View File

@ -2,11 +2,18 @@ import { basename } from "path";
import * as YAML from "yaml";
import { RuleWithSource } from "../..";
export interface RuleFrontmatter {
globs?: RuleWithSource["globs"];
name?: RuleWithSource["name"];
description?: RuleWithSource["description"];
alwaysApply?: RuleWithSource["alwaysApply"];
}
/**
* Parses markdown content with YAML frontmatter
*/
export function parseMarkdownRule(content: string): {
frontmatter: Record<string, any>;
frontmatter: RuleFrontmatter;
markdown: string;
} {
// Normalize line endings to \n
@ -63,6 +70,7 @@ export function convertMarkdownRuleToContinueRule(
rule: markdown,
globs: frontmatter.globs,
description: frontmatter.description,
alwaysApply: frontmatter.alwaysApply,
source: "rules-block",
ruleFile: path,
};

View File

@ -1,6 +1,5 @@
import { ConfigYaml } from "@continuedev/config-yaml";
export const TRIAL_FIM_MODEL = "codestral-latest";
export const LOCAL_ONBOARDING_PROVIDER_TITLE = "Ollama";
export const LOCAL_ONBOARDING_FIM_MODEL = "qwen2.5-coder:1.5b-base";
export const LOCAL_ONBOARDING_FIM_TITLE = "Qwen2.5-Coder 1.5B";

View File

@ -25,6 +25,7 @@ import { ControlPlaneClient } from "../../control-plane/client.js";
import { getControlPlaneEnv } from "../../control-plane/env.js";
import { TeamAnalytics } from "../../control-plane/TeamAnalytics.js";
import ContinueProxy from "../../llm/llms/stubs/ContinueProxy";
import { getConfigDependentToolDefinitions } from "../../tools";
import { encodeMCPToolUri } from "../../tools/callTool";
import { getConfigJsonPath, getConfigYamlPath } from "../../util/paths";
import { localPathOrUriToPath } from "../../util/pathToUri";
@ -176,12 +177,21 @@ export default async function doLoadConfig(options: {
);
newConfig.slashCommands.push(...serverSlashCommands);
const submenuItems = server.resources.map((resource) => ({
title: resource.name,
description: resource.description ?? resource.name,
id: resource.uri,
icon: server.faviconUrl,
}));
const submenuItems = server.resources
.map((resource) => ({
title: resource.name,
description: resource.description ?? resource.name,
id: resource.uri,
icon: server.faviconUrl,
}))
.concat(
server.resourceTemplates.map((template) => ({
title: template.name,
description: template.description ?? template.name,
id: template.uriTemplate,
icon: server.faviconUrl,
})),
);
if (submenuItems.length > 0) {
const serverContextProvider = new MCPContextProvider({
submenuItems,
@ -193,6 +203,12 @@ export default async function doLoadConfig(options: {
}
}
newConfig.tools.push(
...getConfigDependentToolDefinitions({
rules: newConfig.rules,
}),
);
// Detect duplicate tool names
const counts: Record<string, number> = {};
newConfig.tools.forEach((tool) => {
@ -202,6 +218,7 @@ export default async function doLoadConfig(options: {
counts[tool.function.name] = 1;
}
});
Object.entries(counts).forEach(([toolName, count]) => {
if (count > 1) {
errors!.push({
@ -211,6 +228,26 @@ export default async function doLoadConfig(options: {
}
});
const ruleCounts: Record<string, number> = {};
newConfig.rules.forEach((rule) => {
if (rule.name) {
if (ruleCounts[rule.name]) {
ruleCounts[rule.name] = ruleCounts[rule.name] + 1;
} else {
ruleCounts[rule.name] = 1;
}
}
});
Object.entries(ruleCounts).forEach(([ruleName, count]) => {
if (count > 1) {
errors!.push({
fatal: false,
message: `Duplicate (${count}) rules named "${ruleName}" detected. This may cause unexpected behavior`,
});
}
});
newConfig.allowAnonymousTelemetry =
newConfig.allowAnonymousTelemetry && (await ide.isTelemetryEnabled());

View File

@ -18,6 +18,7 @@ export const sharedConfigSchema = z
useChromiumForDocsCrawling: z.boolean(),
readResponseTTS: z.boolean(),
promptPath: z.string(),
useCurrentFileAsContext: z.boolean(),
// `ui` in `ContinueConfig`
showSessionTabs: z.boolean(),
@ -166,6 +167,10 @@ export function modifyAnyConfigWithSharedConfig<
if (sharedConfig.readResponseTTS !== undefined) {
configCopy.experimental.readResponseTTS = sharedConfig.readResponseTTS;
}
if (sharedConfig.useCurrentFileAsContext !== undefined) {
configCopy.experimental.useCurrentFileAsContext =
sharedConfig.useCurrentFileAsContext;
}
return configCopy;
}

View File

@ -743,8 +743,6 @@ declare global {
getLastModified(files: string[]): Promise<{ [path: string]: number }>;
getGitHubAuthToken(args: GetGhTokenArgs): Promise<string | undefined>;
// LSP
gotoDefinition(location: Location): Promise<RangeInFile[]>;

View File

@ -0,0 +1,233 @@
import { FQSN, SecretResult, SecretType } from "@continuedev/config-yaml";
import {
afterEach,
beforeEach,
describe,
expect,
Mock,
test,
vi,
} from "vitest";
import { IDE } from "../..";
import { ControlPlaneClient } from "../../control-plane/client";
import { LocalPlatformClient } from "./LocalPlatformClient";
vi.mock("../../util/paths", { spy: true });
describe("LocalPlatformClient", () => {
const testFQSN: FQSN = {
packageSlugs: [
{
ownerSlug: "test-owner-slug",
packageSlug: "test-package-slug",
},
],
secretName: "TEST_CONTINUE_SECRET_KEY",
};
const testFQSN2: FQSN = {
packageSlugs: [
{
ownerSlug: "test-owner-slug-2",
packageSlug: "test-package-slug-2",
},
],
secretName: "TEST_WORKSPACE_SECRET_KEY",
};
const testResolvedFQSN: SecretResult = {
found: true,
fqsn: testFQSN,
secretLocation: {
secretName: testFQSN.secretName,
secretType: SecretType.Organization,
orgSlug: "test-org-slug",
},
};
let testControlPlaneClient: ControlPlaneClient;
let testIde: IDE;
beforeEach(
/**dynamic import before each test for test isolation */
async () => {
const testFixtures = await import("../../test/fixtures");
testControlPlaneClient = testFixtures.testControlPlaneClient;
testIde = testFixtures.testIde;
},
);
let secretValue: string;
let envKeyValues: Record<string, unknown>;
let envKeyValuesString: string;
beforeEach(
/**generate unique env key value pairs for each test */
() => {
secretValue = Math.floor(Math.random() * 100) + "";
envKeyValues = {
TEST_CONTINUE_SECRET_KEY: secretValue,
TEST_WORKSPACE_SECRET_KEY: secretValue + "-workspace",
};
envKeyValuesString = Object.entries(envKeyValues)
.map(([key, value]) => `${key}=${value}`)
.join("\n");
},
);
afterEach(() => {
vi.resetAllMocks();
vi.restoreAllMocks();
vi.resetModules(); // clear dynamic imported module cache
});
test("should not be able to resolve FQSNs if they do not exist", async () => {
const localPlatformClient = new LocalPlatformClient(
null,
testControlPlaneClient,
testIde,
);
const resolvedFQSNs = await localPlatformClient.resolveFQSNs([testFQSN]);
expect(resolvedFQSNs.length).toBeGreaterThan(0);
expect(resolvedFQSNs[0]?.found).toBe(false);
});
test("should be able to resolve FQSNs if they exist", async () => {
testControlPlaneClient.resolveFQSNs = vi.fn(async () => [testResolvedFQSN]);
const localPlatformClient = new LocalPlatformClient(
null,
testControlPlaneClient,
testIde,
);
const resolvedFQSNs = await localPlatformClient.resolveFQSNs([testFQSN]);
expect(testControlPlaneClient.resolveFQSNs).toHaveBeenCalled();
expect(resolvedFQSNs).toEqual([testResolvedFQSN]);
expect(resolvedFQSNs[0]?.found).toBe(true);
});
describe("searches for secrets in local .env files", () => {
let getContinueDotEnv: Mock;
beforeEach(async () => {
const utilPaths = await import("../../util/paths");
getContinueDotEnv = vi.fn(() => envKeyValues);
utilPaths.getContinueDotEnv = getContinueDotEnv;
});
test("should be able to get secrets from ~/.continue/.env files", async () => {
const localPlatformClient = new LocalPlatformClient(
null,
testControlPlaneClient,
testIde,
);
const resolvedFQSNs = await localPlatformClient.resolveFQSNs([testFQSN]);
expect(getContinueDotEnv).toHaveBeenCalled();
expect(resolvedFQSNs.length).toBe(1);
expect(
(resolvedFQSNs[0] as SecretResult & { value: unknown })?.value,
).toBe(secretValue);
console.log("debug1 resolved fqsn", resolvedFQSNs);
});
});
describe("should be able to get secrets from workspace .env files", () => {
test("should get secrets from <workspace>/.continue/.env and <workspace>/.env", async () => {
const originalIdeFileExists = testIde.fileExists;
testIde.fileExists = vi.fn(async (fileUri: string) =>
fileUri.includes(".env") ? true : originalIdeFileExists(fileUri),
);
const originalIdeReadFile = testIde.readFile;
const randomValueForContinueDirDotEnv =
"continue-dir-" + Math.floor(Math.random() * 100);
const randomValueForWorkspaceDotEnv =
"dotenv-" + Math.floor(Math.random() * 100);
testIde.readFile = vi.fn(async (fileUri: string) => {
// fileUri should contain .continue/.env and not .env
if (fileUri.match(/.*\.continue\/\.env.*/gi)?.length) {
return (
envKeyValuesString.split("\n")[0] + randomValueForContinueDirDotEnv
);
}
// filUri should contain .env and not .continue/.env
else if (fileUri.match(/.*(?<!\.continue\/)\.env.*/gi)?.length) {
return (
envKeyValuesString.split("\n")[1] + randomValueForWorkspaceDotEnv
);
}
return originalIdeReadFile(fileUri);
});
const localPlatformClient = new LocalPlatformClient(
null,
testControlPlaneClient,
testIde,
);
const resolvedFQSNs = await localPlatformClient.resolveFQSNs([
testFQSN,
testFQSN2,
]);
// both the secrets should be present as they are retrieved from different files
expect(resolvedFQSNs.length).toBe(2);
const continueDirSecretValue = (
resolvedFQSNs[0] as SecretResult & { value: unknown }
)?.value;
const dotEnvSecretValue = (
resolvedFQSNs[1] as SecretResult & { value: unknown }
)?.value;
expect(continueDirSecretValue).toContain(secretValue);
expect(continueDirSecretValue).toContain(randomValueForContinueDirDotEnv);
expect(dotEnvSecretValue).toContain(secretValue + "-workspace");
expect(dotEnvSecretValue).toContain(randomValueForWorkspaceDotEnv);
});
test("should first get secrets from <workspace>/.continue/.env and then <workspace>/.env", async () => {
const originalIdeFileExists = testIde.fileExists;
testIde.fileExists = vi.fn(async (fileUri: string) =>
fileUri.includes(".env") ? true : originalIdeFileExists(fileUri),
);
const randomValueForContinueDirDotEnv =
"continue-dir-" + Math.floor(Math.random() * 100);
const randomValueForWorkspaceDotEnv =
"dotenv-" + Math.floor(Math.random() * 100);
const originalIdeReadFile = testIde.readFile;
testIde.readFile = vi.fn(async (fileUri: string) => {
// fileUri should contain .continue/.env and not .env
if (fileUri.match(/.*\.continue\/\.env.*/gi)?.length) {
return (
envKeyValuesString.split("\n")[0] + randomValueForContinueDirDotEnv
);
}
// filUri should contain .env and not .continue/.env
else if (fileUri.match(/.*(?<!\.continue\/)\.env.*/gi)?.length) {
return (
envKeyValuesString.split("\n")[0] + randomValueForWorkspaceDotEnv
);
}
return originalIdeReadFile(fileUri);
});
const localPlatformClient = new LocalPlatformClient(
null,
testControlPlaneClient,
testIde,
);
const resolvedFQSNs = await localPlatformClient.resolveFQSNs([testFQSN]);
expect(resolvedFQSNs.length).toBe(1);
expect(
(resolvedFQSNs[0] as SecretResult & { value: unknown })?.value,
).toContain(secretValue);
// we check that workspace <workspace>.continue/.env does not override the <workspace>/.env secret
expect(
(resolvedFQSNs[0] as SecretResult & { value: unknown })?.value,
).toContain(randomValueForContinueDirDotEnv);
expect(
(resolvedFQSNs[0] as SecretResult & { value: unknown })?.value,
).not.toContain(randomValueForWorkspaceDotEnv);
});
});
});

View File

@ -17,12 +17,16 @@ export class LocalPlatformClient implements PlatformClient {
private readonly ide: IDE,
) {}
/**
* searches for the first valid secret file in order of ~/.continue/.env, <workspace>/.continue/.env, <workspace>/.env
*/
private async findSecretInEnvFiles(
fqsn: FQSN,
): Promise<SecretResult | undefined> {
const secretValue =
this.findSecretInLocalEnvFile(fqsn) ??
(await this.findSecretInWorkspaceEnvFiles(fqsn));
(await this.findSecretInWorkspaceEnvFiles(fqsn, true)) ??
(await this.findSecretInWorkspaceEnvFiles(fqsn, false));
if (secretValue) {
return {
@ -52,12 +56,16 @@ export class LocalPlatformClient implements PlatformClient {
private async findSecretInWorkspaceEnvFiles(
fqsn: FQSN,
insideContinue: boolean,
): Promise<string | undefined> {
try {
const workspaceDirs = await this.ide.getWorkspaceDirs();
for (const folder of workspaceDirs) {
const envFilePath = joinPathsToUri(folder, ".env");
const envFilePath = joinPathsToUri(
folder,
insideContinue ? ".continue" : "",
".env",
);
try {
const fileExists = await this.ide.fileExists(envFilePath);
if (fileExists) {

View File

@ -32,15 +32,14 @@ import DocsContextProvider from "../../context/providers/DocsContextProvider";
import FileContextProvider from "../../context/providers/FileContextProvider";
import { contextProviderClassFromName } from "../../context/providers/index";
import { ControlPlaneClient } from "../../control-plane/client";
import FreeTrial from "../../llm/llms/FreeTrial";
import TransformersJsEmbeddingsProvider from "../../llm/llms/TransformersJsEmbeddingsProvider";
import { slashCommandFromPromptFileV1 } from "../../promptFiles/v1/slashCommandFromPromptFile";
import { getAllPromptFiles } from "../../promptFiles/v2/getPromptFiles";
import { allTools } from "../../tools";
import { GlobalContext } from "../../util/GlobalContext";
import { modifyAnyConfigWithSharedConfig } from "../sharedConfig";
import { getControlPlaneEnvSync } from "../../control-plane/env";
import { baseToolDefinitions } from "../../tools";
import { getCleanUriPath } from "../../util/uri";
import { getAllDotContinueDefinitionFiles } from "../loadLocalAssistants";
import { LocalPlatformClient } from "./LocalPlatformClient";
@ -176,24 +175,14 @@ async function configYamlToContinueConfig(options: {
uniqueId: string;
llmLogger: ILLMLogger;
workOsAccessToken: string | undefined;
allowFreeTrial?: boolean;
}): Promise<{ config: ContinueConfig; errors: ConfigValidationError[] }> {
let {
config,
ide,
ideSettings,
ideInfo,
uniqueId,
llmLogger,
allowFreeTrial,
} = options;
allowFreeTrial = allowFreeTrial ?? true;
let { config, ide, ideSettings, ideInfo, uniqueId, llmLogger } = options;
const localErrors: ConfigValidationError[] = [];
const continueConfig: ContinueConfig = {
slashCommands: [],
tools: [...allTools],
tools: [...baseToolDefinitions],
mcpServerStatuses: [],
contextProviders: [],
modelsByRole: {
@ -302,9 +291,14 @@ async function configYamlToContinueConfig(options: {
});
// Models
let warnAboutFreeTrial = false;
const defaultModelRoles: ModelRole[] = ["chat", "summarize", "apply", "edit"];
for (const model of config.models ?? []) {
model.roles = model.roles ?? defaultModelRoles; // Default to all 4 chat-esque roles if not specified
if (model.provider === "free-trial") {
warnAboutFreeTrial = true;
}
try {
const llms = await llmsFromModelConfig({
model,
@ -376,34 +370,12 @@ async function configYamlToContinueConfig(options: {
);
}
if (allowFreeTrial) {
// Obtain auth token (iff free trial being used)
const freeTrialModels = continueConfig.modelsByRole.chat.filter(
(model) => model.providerName === "free-trial",
);
if (freeTrialModels.length > 0) {
try {
const ghAuthToken = await ide.getGitHubAuthToken({});
for (const model of freeTrialModels) {
(model as FreeTrial).setupGhAuthToken(ghAuthToken);
}
} catch (e) {
localErrors.push({
fatal: false,
message: `Failed to obtain GitHub auth token for free trial:\n${e instanceof Error ? e.message : e}`,
});
// Remove free trial models
continueConfig.modelsByRole.chat =
continueConfig.modelsByRole.chat.filter(
(model) => model.providerName !== "free-trial",
);
}
}
} else {
// Remove free trial models
continueConfig.modelsByRole.chat = continueConfig.modelsByRole.chat.filter(
(model) => model.providerName !== "free-trial",
);
if (warnAboutFreeTrial) {
localErrors.push({
fatal: false,
message:
"Model provider 'free-trial' is no longer supported, will be ignored.",
});
}
// Context providers

View File

@ -4,7 +4,7 @@ import MCPConnection from "./MCPConnection";
describe("MCPConnection", () => {
beforeEach(() => {
jest.clearAllMocks();
jest.restoreAllMocks();
});
describe("constructor", () => {
@ -111,6 +111,7 @@ describe("MCPConnection", () => {
errors: [],
prompts: [],
resources: [],
resourceTemplates: [],
tools: [],
status: "not-connected",
});
@ -173,11 +174,11 @@ describe("MCPConnection", () => {
});
it("should handle custom connection timeout", async () => {
const conn = new MCPConnection({ ...options, timeout: 11 });
const conn = new MCPConnection({ ...options, timeout: 1500 });
const mockConnect = jest
.spyOn(Client.prototype, "connect")
.mockImplementation(
() => new Promise((resolve) => setTimeout(resolve, 10)),
() => new Promise((resolve) => setTimeout(resolve, 1000)),
);
const abortController = new AbortController();
@ -229,6 +230,38 @@ describe("MCPConnection", () => {
expect(conn.errors[0]).toContain('command "test-cmd" not found');
expect(mockConnect).toHaveBeenCalled();
});
it.skip("should include stderr output in error message when stdio command fails", async () => {
// Clear any existing mocks to ensure we get real behavior
jest.restoreAllMocks();
// Use a command that will definitely fail and produce stderr output
const failingOptions = {
name: "failing-mcp",
id: "failing-id",
transport: {
type: "stdio" as const,
command: "node",
args: [
"-e",
"console.error('Custom error message from stderr'); process.exit(1);",
],
},
timeout: 5000, // Give enough time for the command to run and fail
};
const conn = new MCPConnection(failingOptions);
const abortController = new AbortController();
await conn.connectClient(false, abortController.signal);
expect(conn.status).toBe("error");
expect(conn.errors).toHaveLength(1);
expect(conn.errors[0]).toContain("Failed to connect");
expect(conn.errors[0]).toContain("Process output:");
expect(conn.errors[0]).toContain("STDERR:");
expect(conn.errors[0]).toContain("Custom error message from stderr");
});
});
describe.skip("actually connect to Filesystem MCP", () => {

View File

@ -3,6 +3,7 @@ import { Transport } from "@modelcontextprotocol/sdk/shared/transport.js";
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import { WebSocketClientTransport } from "@modelcontextprotocol/sdk/client/websocket.js";
import {
@ -10,6 +11,7 @@ import {
MCPOptions,
MCPPrompt,
MCPResource,
MCPResourceTemplate,
MCPServerStatus,
MCPTool,
} from "../..";
@ -36,8 +38,13 @@ class MCPConnection {
public prompts: MCPPrompt[] = [];
public tools: MCPTool[] = [];
public resources: MCPResource[] = [];
public resourceTemplates: MCPResourceTemplate[] = [];
private transport: Transport;
private connectionPromise: Promise<unknown> | null = null;
private stdioOutput: { stdout: string; stderr: string } = {
stdout: "",
stderr: "",
};
constructor(public options: MCPOptions) {
this.transport = this.constructTransport(options);
@ -67,6 +74,7 @@ class MCPConnection {
errors: this.errors,
prompts: this.prompts,
resources: this.resources,
resourceTemplates: this.resourceTemplates,
tools: this.tools,
status: this.status,
};
@ -90,7 +98,9 @@ class MCPConnection {
this.tools = [];
this.prompts = [];
this.resources = [];
this.resourceTemplates = [];
this.errors = [];
this.stdioOutput = { stdout: "", stderr: "" };
this.abortController.abort();
this.abortController = new AbortController();
@ -163,6 +173,23 @@ class MCPConnection {
}
this.errors.push(errorMessage);
}
// Resource templates
try {
const { resourceTemplates } =
await this.client.listResourceTemplates(
{},
{ signal: timeoutController.signal },
);
this.resourceTemplates = resourceTemplates;
} catch (e) {
let errorMessage = `Error loading resource templates for MCP Server ${this.options.name}`;
if (e instanceof Error) {
errorMessage += `: ${e.message}`;
}
this.errors.push(errorMessage);
}
}
// Tools <—> Tools
@ -215,6 +242,20 @@ class MCPConnection {
}
}
// Include stdio output if available for stdio transport
if (
this.options.transport.type === "stdio" &&
(this.stdioOutput.stdout || this.stdioOutput.stderr)
) {
errorMessage += "\n\nProcess output:";
if (this.stdioOutput.stdout) {
errorMessage += `\nSTDOUT:\n${this.stdioOutput.stdout}`;
}
if (this.stdioOutput.stderr) {
errorMessage += `\nSTDERR:\n${this.stdioOutput.stderr}`;
}
}
this.status = "error";
this.errors.push(errorMessage);
} finally {
@ -284,11 +325,20 @@ class MCPConnection {
options.transport.args || [],
);
return new StdioClientTransport({
const transport = new StdioClientTransport({
command,
args,
env,
stderr: "pipe",
});
// Capture stdio output for better error reporting
transport.stderr?.on("data", (data: Buffer) => {
this.stdioOutput.stderr += data.toString();
});
return transport;
case "websocket":
return new WebSocketClientTransport(new URL(options.transport.url));
case "sse":
@ -307,6 +357,13 @@ class MCPConnection {
},
requestInit: { headers: options.transport.requestOptions?.headers },
});
case "streamable-http":
return new StreamableHTTPClientTransport(
new URL(options.transport.url),
{
requestInit: { headers: options.transport.requestOptions?.headers },
},
);
default:
throw new Error(
`Unsupported transport type: ${(options.transport as any).type}`,

View File

@ -17,16 +17,15 @@ class DiffContextProvider extends BaseContextProvider {
query: string,
extras: ContextProviderExtras,
): Promise<ContextItem[]> {
const diff = await extras.ide.getDiff(
this.options?.includeUnstaged ?? true,
);
const includeUnstaged = this.options?.includeUnstaged ?? true;
const diffs = await extras.ide.getDiff(includeUnstaged); // TODO use diff cache (currently cache always includes unstaged)
return [
{
description: "The current git diff",
content:
diff.length === 0
diffs.length === 0
? "Git shows no current changes."
: `\`\`\`git diff\n${diff.join("\n")}\n\`\`\``,
: `\`\`\`git diff\n${diffs.join("\n")}\n\`\`\``,
name: "Git Diff",
},
];

View File

@ -20,6 +20,7 @@ class MCPContextProvider extends BaseContextProvider {
displayTitle: "MCP",
description: "Model Context Protocol",
type: "submenu",
renderInlineAs: "",
};
override get description(): ContextProviderDescription {
return {
@ -27,6 +28,7 @@ class MCPContextProvider extends BaseContextProvider {
displayTitle: this.options["serverName"]
? `${this.options["serverName"]} resources`
: "MCP",
renderInlineAs: "",
description: "Model Context Protocol",
type: "submenu",
};
@ -47,6 +49,18 @@ class MCPContextProvider extends BaseContextProvider {
super(options);
}
/**
* Continue experimentally supports resource templates (https://modelcontextprotocol.io/docs/concepts/resources#resource-templates)
* by allowing specifically just the "query" variable in the template, which we will update with the full input of the user in the input box
*/
private insertInputToUriTemplate(uri: string, query: string): string {
const TEMPLATE_VAR = "query";
if (uri.includes(`{${TEMPLATE_VAR}}`)) {
return uri.replace(`{${TEMPLATE_VAR}}`, encodeURIComponent(query));
}
return uri;
}
async getContextItems(
query: string,
extras: ContextProviderExtras,
@ -58,7 +72,9 @@ class MCPContextProvider extends BaseContextProvider {
throw new Error(`No MCP connection found for ${mcpId}`);
}
const { contents } = await connection.client.readResource({ uri });
const { contents } = await connection.client.readResource({
uri: this.insertInputToUriTemplate(uri, extras.fullInput),
});
return await Promise.all(
contents.map(async (resource) => {

View File

@ -13,14 +13,10 @@ export interface OnPremSessionInfo {
export type ControlPlaneSessionInfo = HubSessionInfo | OnPremSessionInfo;
export function isHubSession(
export function isOnPremSession(
sessionInfo: ControlPlaneSessionInfo | undefined,
): sessionInfo is HubSessionInfo {
return (
sessionInfo !== undefined &&
(sessionInfo.AUTH_TYPE === AuthType.WorkOsProd ||
sessionInfo.AUTH_TYPE === AuthType.WorkOsStaging)
);
): sessionInfo is OnPremSessionInfo {
return sessionInfo !== undefined && sessionInfo.AUTH_TYPE === AuthType.OnPrem;
}
export enum AuthType {

View File

@ -12,7 +12,7 @@ import fetch, { RequestInit, Response } from "node-fetch";
import { OrganizationDescription } from "../config/ProfileLifecycleManager.js";
import { IdeSettings, ModelDescription } from "../index.js";
import { ControlPlaneSessionInfo, isHubSession } from "./AuthTypes.js";
import { ControlPlaneSessionInfo, isOnPremSession } from "./AuthTypes.js";
import { getControlPlaneEnv } from "./env.js";
export interface ControlPlaneWorkspace {
@ -63,17 +63,16 @@ export class ControlPlaneClient {
async getAccessToken(): Promise<string | undefined> {
const sessionInfo = await this.sessionInfoPromise;
return isHubSession(sessionInfo) ? sessionInfo.accessToken : undefined;
return isOnPremSession(sessionInfo) ? undefined : sessionInfo?.accessToken;
}
private async request(path: string, init: RequestInit): Promise<Response> {
const sessionInfo = await this.sessionInfoPromise;
const hubSession = isHubSession(sessionInfo);
const accessToken = hubSession ? sessionInfo.accessToken : undefined;
const onPremSession = isOnPremSession(sessionInfo);
const accessToken = await this.getAccessToken();
// Bearer token not necessary for on-prem auth type
if (!accessToken && hubSession) {
if (!accessToken && !onPremSession) {
throw new Error("No access token");
}

View File

@ -88,7 +88,6 @@ const embeddingsProviderSchema = z.object({
"ollama",
"openai",
"cohere",
"free-trial",
"gemini",
"ovhcloud",
"nebius",

View File

@ -45,6 +45,7 @@ import {
} from ".";
import { ConfigYaml } from "@continuedev/config-yaml";
import { getDiffFn, GitDiffCache } from "./autocomplete/snippets/gitDiffCache";
import { isLocalAssistantFile } from "./config/loadLocalAssistants";
import {
setupBestConfig,
@ -723,6 +724,7 @@ export class Core {
};
return await callTool(tool, toolCall.function.arguments, {
config,
ide: this.ide,
llm: config.selectedModelByRole.chat,
fetch: (url, init) =>
@ -808,6 +810,8 @@ export class Core {
uris?: string[];
}>) {
if (data?.uris?.length) {
const diffCache = GitDiffCache.getInstance(getDiffFn(this.ide));
diffCache.invalidate();
walkDirCache.invalidate(); // safe approach for now - TODO - only invalidate on relevant changes
for (const uri of data.uris) {
const currentProfileUri =

47
core/index.d.ts vendored
View File

@ -5,7 +5,6 @@ import {
} from "@continuedev/config-yaml";
import Parser from "web-tree-sitter";
import { LLMConfigurationStatuses } from "./llm/constants";
import { GetGhTokenArgs } from "./protocol/ide";
declare global {
interface Window {
@ -93,6 +92,7 @@ export interface ILLM
extends Omit<LLMOptions, RequiredLLMOptions>,
Required<Pick<LLMOptions, RequiredLLMOptions>> {
get providerName(): string;
get underlyingProviderName(): string;
complete(
prompt: string,
@ -423,7 +423,7 @@ export interface PromptLog {
completion: string;
}
export type MessageModes = "chat" | "edit" | "agent";
export type MessageModes = "chat" | "agent";
export type ToolStatus =
| "generating"
@ -775,10 +775,6 @@ export interface IDE {
}
>;
getLastFileSaveTimestamp?(): number;
updateLastFileSaveTimestamp?(): void;
getPinnedFiles(): Promise<string[]>;
getSearchResults(query: string): Promise<string>;
@ -807,8 +803,6 @@ export interface IDE {
getFileStats(files: string[]): Promise<FileStatsMap>;
getGitHubAuthToken(args: GetGhTokenArgs): Promise<string | undefined>;
// Secret Storage
readSecrets(keys: string[]): Promise<Record<string, string>>;
@ -974,6 +968,7 @@ export interface ToolExtras {
toolCallId: string;
contextItems: ContextItem[];
}) => void;
config: ContinueConfig;
}
export interface Tool {
@ -1003,6 +998,12 @@ interface ToolChoice {
};
}
export interface ConfigDependentToolParams {
rules: RuleWithSource[];
}
export type GetTool = (params: ConfigDependentToolParams) => Tool;
export interface BaseCompletionOptions {
temperature?: number;
topP?: number;
@ -1035,6 +1036,7 @@ export interface ModelCapability {
export interface ModelDescription {
title: string;
provider: string;
underlyingProviderName: string;
model: string;
apiKey?: string;
@ -1133,7 +1135,17 @@ export interface SSEOptions {
requestOptions?: RequestOptions;
}
export type TransportOptions = StdioOptions | WebSocketOptions | SSEOptions;
export interface StreamableHTTPOptions {
type: "streamable-http";
url: string;
requestOptions?: RequestOptions;
}
export type TransportOptions =
| StdioOptions
| WebSocketOptions
| SSEOptions
| StreamableHTTPOptions;
export interface MCPOptions {
name: string;
@ -1162,6 +1174,7 @@ export interface MCPPrompt {
// Leaving here to ideate on
// export type ContinueConfigSource = "local-yaml" | "local-json" | "hub-assistant" | "hub"
// https://modelcontextprotocol.io/docs/concepts/resources#direct-resources
export interface MCPResource {
name: string;
uri: string;
@ -1169,6 +1182,14 @@ export interface MCPResource {
mimeType?: string;
}
// https://modelcontextprotocol.io/docs/concepts/resources#resource-templates
export interface MCPResourceTemplate {
uriTemplate: string;
name: string;
description?: string;
mimeType?: string;
}
export interface MCPTool {
name: string;
description?: string;
@ -1185,6 +1206,7 @@ export interface MCPServerStatus extends MCPOptions {
prompts: MCPPrompt[];
tools: MCPTool[];
resources: MCPResource[];
resourceTemplates: MCPResourceTemplate[];
}
export interface ContinueUIConfig {
@ -1322,6 +1344,11 @@ export interface ExperimentalConfig {
*/
useChromiumForDocsCrawling?: boolean;
modelContextProtocolServers?: ExperimentalMCPOptions[];
/**
* If enabled, will add the current file as context.
*/
useCurrentFileAsContext?: boolean;
}
export interface AnalyticsConfig {
@ -1333,6 +1360,7 @@ export interface AnalyticsConfig {
export interface JSONModelDescription {
title: string;
provider: string;
underlyingProviderName: string;
model: string;
apiKey?: string;
apiBase?: string;
@ -1543,4 +1571,5 @@ export interface RuleWithSource {
rule: string;
description?: string;
ruleFile?: string;
alwaysApply?: boolean;
}

View File

@ -249,7 +249,10 @@ export class CodebaseIndexer {
}
const { config } = await this.configHandler.loadConfig();
if (config?.disableIndexing) {
if (!config) {
return;
}
if (config.disableIndexing) {
yield {
progress,
desc: "Indexing is disabled in config.json",

View File

@ -101,23 +101,7 @@
// // const originalEmbeddingsProvider =
// // await docsService.getEmbeddingsProvider();
// // // Change embeddings provider
// // editConfigJson((config) => ({
// // ...config,
// // embeddingsProvider: {
// // provider: FreeTrial.providerName,
// // },
// // }));
// // await getReloadedConfig();
// // const { provider, isPreindexed} = await docsService.getEmbeddingsProvider();
// // // Verify reindexing
// // const [originalVector] = await originalEmbeddingsProvider.embed(["test"]);
// // const [newMockVector] = await provider.embed(["test"]);
// // expect(originalVector).not.toEqual(newMockVector);
// // TODO removed this test when free trial provider was removed
// // });
// test("Handles pulling down and adding pre-indexed docs", async () => {

View File

@ -11,8 +11,7 @@ test("tagToString returns full tag string when under length limit", () => {
expect(tagToString(tag)).toBe("/normal/path/to/repo::main::12345");
});
test("tagToString truncates beginning of directory when path is too long", () => {
// Create a very long directory path that exceeds MAX_DIR_LENGTH (200)
test("tagToString truncates beginning of directory when path is too long and adds hash for uniqueness", () => {
const longPrefix = "/very/long/path/that/will/be/truncated/";
const importantSuffix = "/user/important-project/src/feature";
const longPath = longPrefix + "x".repeat(200) + importantSuffix;
@ -25,18 +24,14 @@ test("tagToString truncates beginning of directory when path is too long", () =>
const result = tagToString(tag);
// The result should keep the important suffix part
expect(result).toContain(importantSuffix);
// The result should NOT contain the beginning of the path
expect(result).not.toContain(longPrefix);
// The result should include the branch and artifactId
expect(result).toContain("::feature-branch::67890");
// The result should be within the MAX_TABLE_NAME_LENGTH limit (240)
expect(result.length).toBeLessThanOrEqual(240);
expect(result).toMatch(/^[a-f0-9]{8}_/);
expect(result).toContain(importantSuffix);
});
test("tagToString preserves branch and artifactId exactly, even when truncating", () => {
const longPath = "/a".repeat(300); // Much longer than MAX_DIR_LENGTH
const longPath = "/a".repeat(300);
const tag: IndexTag = {
directory: longPath,
branch: "release-v2.0",
@ -45,14 +40,56 @@ test("tagToString preserves branch and artifactId exactly, even when truncating"
const result = tagToString(tag);
// Should contain the exact branch and artifactId
expect(result).toContain("::release-v2.0::build-123");
// Should contain the end of the path
expect(result).toContain("/a/a/a");
// Should not contain the full original path (it should be truncated)
expect(result.length).toBeLessThan(
longPath.length + "::release-v2.0::build-123".length,
);
// The result should be within the MAX_TABLE_NAME_LENGTH limit
expect(result).toMatch(/^[a-f0-9]{8}_/);
expect(result.length).toBeLessThanOrEqual(240);
});
test("tagToString ensures uniqueness for different long paths that would otherwise collide", () => {
const basePath = "/very/long/base/path/that/exceeds/limits/";
const commonSuffix = "/same/ending/path";
const tag1: IndexTag = {
directory: basePath + "different1" + "x".repeat(100) + commonSuffix,
branch: "main",
artifactId: "12345",
};
const tag2: IndexTag = {
directory: basePath + "different2" + "y".repeat(100) + commonSuffix,
branch: "main",
artifactId: "12345",
};
const fullString1 = `${tag1.directory}::${tag1.branch}::${tag1.artifactId}`;
const fullString2 = `${tag2.directory}::${tag2.branch}::${tag2.artifactId}`;
const result1 = tagToString(tag1);
const result2 = tagToString(tag2);
expect(result1).not.toBe(result2);
expect(result1.length).toBeLessThanOrEqual(240);
expect(result2.length).toBeLessThanOrEqual(240);
if (fullString1.length > 240) {
expect(result1).toMatch(/^[a-f0-9]{8}_/);
expect(result2).toMatch(/^[a-f0-9]{8}_/);
} else {
expect(result1).toBe(fullString1);
expect(result2).toBe(fullString2);
}
});
test("tagToString produces consistent results for the same input", () => {
const tag: IndexTag = {
directory:
"/some/very/long/path/that/exceeds/the/maximum/length/limit/for/directory/names/in/the/system",
branch: "develop",
artifactId: "test-123",
};
const result1 = tagToString(tag);
const result2 = tagToString(tag);
expect(result1).toBe(result2);
});

View File

@ -1,3 +1,4 @@
import crypto from "crypto";
import { IndexTag } from "..";
// Maximum length for table names to stay under OS filename limits
@ -18,8 +19,8 @@ const MAX_DIR_LENGTH = 200;
*
* To handle long paths:
* 1. First tries the full string - most backwards compatible
* 2. If too long, truncates directory from the beginning to maintain uniqueness
* (since final parts of paths are more unique than prefixes)
* 2. If too long, truncates directory from the beginning and adds a hash prefix
* to ensure uniqueness while preserving the more readable end parts
* 3. Finally ensures entire string stays under MAX_TABLE_NAME_LENGTH for OS compatibility
*
* @param tag The tag containing directory, branch, and artifactId
@ -32,14 +33,22 @@ export function tagToString(tag: IndexTag): string {
return result;
}
// Create a hash of the full directory path to ensure uniqueness
const dirHash = crypto
.createHash("md5")
.update(tag.directory)
.digest("hex")
.slice(0, 8);
// Calculate how much space we have for the directory after accounting for hash, separators, branch, and artifactId
const nonDirLength = `${dirHash}_::${tag.branch}::${tag.artifactId}`.length;
const maxDirForTruncated = MAX_TABLE_NAME_LENGTH - nonDirLength;
// Truncate from the beginning of directory path to preserve the more unique end parts
const dir =
tag.directory.length > MAX_DIR_LENGTH
? tag.directory.slice(tag.directory.length - MAX_DIR_LENGTH)
const truncatedDir =
tag.directory.length > maxDirForTruncated
? tag.directory.slice(tag.directory.length - maxDirForTruncated)
: tag.directory;
return `${dir}::${tag.branch}::${tag.artifactId}`.slice(
0,
MAX_TABLE_NAME_LENGTH,
);
return `${dirHash}_${truncatedDir}::${tag.branch}::${tag.artifactId}`;
}

View File

@ -29,4 +29,7 @@ export default {
globalSetup: "<rootDir>/test/jest.global-setup.ts",
setupFilesAfterEnv: ["<rootDir>/test/jest.setup-after-env.js"],
maxWorkers: 1, // equivalent to CLI --runInBand
modulePathIgnorePatterns: [
"<rootDir>/config/yaml/LocalPlatformClient.test.ts",
],
};

View File

@ -66,7 +66,6 @@ const PROVIDER_SUPPORTS_IMAGES: string[] = [
"openai",
"ollama",
"gemini",
"free-trial",
"msty",
"anthropic",
"bedrock",
@ -148,7 +147,6 @@ const PARALLEL_PROVIDERS: string[] = [
"huggingface-tgi",
"mistral",
"moonshot",
"free-trial",
"replicate",
"together",
"novita",

View File

@ -46,6 +46,39 @@ describe.skip("pruneLinesFromTop", () => {
const pruned = pruneLinesFromTop(prompt, 5, "gpt-4");
expect(pruned.split("\n").length).toBeLessThan(prompt.split("\n").length);
});
it("should return the original prompt if it's within max tokens", () => {
const prompt = "Line 1\nLine 2";
const pruned = pruneLinesFromTop(prompt, 10, "gpt-4");
expect(pruned).toEqual(prompt);
});
it("should return an empty string if maxTokens is 0", () => {
const prompt = "Line 1\nLine 2\nLine 3\nLine 4";
const pruned = pruneLinesFromTop(prompt, 0, "gpt-4");
expect(pruned).toEqual("");
});
it("should handle an empty prompt string", () => {
const prompt = "";
const pruned = pruneLinesFromTop(prompt, 5, "gpt-4");
expect(pruned).toEqual("");
});
it("should handle a prompt with a single line that exceeds maxTokens", () => {
const prompt =
"This is a single long line that will exceed the token limit";
const pruned = pruneLinesFromTop(prompt, 5, "gpt-4");
expect(pruned).toEqual("");
});
it("should correctly prune when all lines together exceed maxTokens but individual lines do not", () => {
const prompt = "L1\nL2\nL3\nL4";
const pruned = pruneLinesFromTop(prompt, 5, "gpt-4");
expect(pruned).toEqual("L3\nL4");
});
});
describe.skip("pruneLinesFromBottom", () => {
@ -54,6 +87,39 @@ describe.skip("pruneLinesFromBottom", () => {
const pruned = pruneLinesFromBottom(prompt, 5, "gpt-4");
expect(pruned.split("\n").length).toBeLessThan(prompt.split("\n").length);
});
it("should return the original prompt if it's within max tokens", () => {
const prompt = "Line 1\nLine 2";
const pruned = pruneLinesFromBottom(prompt, 10, "gpt-4");
expect(pruned).toEqual(prompt);
});
it("should return an empty string if maxTokens is 0", () => {
const prompt = "Line 1\nLine 2\nLine 3\nLine 4";
const pruned = pruneLinesFromBottom(prompt, 0, "gpt-4");
expect(pruned).toEqual("");
});
it("should handle an empty prompt string", () => {
const prompt = "";
const pruned = pruneLinesFromBottom(prompt, 5, "gpt-4");
expect(pruned).toEqual("");
});
it("should handle a prompt with a single line that exceeds maxTokens", () => {
const prompt =
"This is a single long line that will exceed the token limit";
const pruned = pruneLinesFromBottom(prompt, 5, "gpt-4");
expect(pruned).toEqual("");
});
it("should correctly prune when all lines together exceed maxTokens but individual lines do not", () => {
const prompt = "L1\nL2\nL3\nL4";
const pruned = pruneLinesFromBottom(prompt, 5, "gpt-4");
expect(pruned).toEqual("L1\nL2");
});
});
describe.skip("pruneRawPromptFromTop", () => {

View File

@ -215,13 +215,28 @@ function pruneLinesFromTop(
maxTokens: number,
modelName: string,
): string {
let totalTokens = countTokens(prompt, modelName);
const lines = prompt.split("\n");
while (totalTokens > maxTokens && lines.length > 0) {
totalTokens -= countTokens(lines.shift()!, modelName);
// Preprocess tokens for all lines and cache them.
const lineTokens = lines.map((line) => countTokens(line, modelName));
let totalTokens = lineTokens.reduce((sum, tokens) => sum + tokens, 0);
let start = 0;
let currentLines = lines.length;
// Calculate initial token count including newlines
totalTokens += Math.max(0, currentLines - 1); // Add tokens for joining newlines
// Using indexes instead of array modifications.
// Remove lines from the top until the token count is within the limit.
while (totalTokens > maxTokens && start < currentLines) {
totalTokens -= lineTokens[start];
// Decrement token count for the removed line and its preceding/joining newline (if not the last line)
if (currentLines - start > 1) {
totalTokens--;
}
start++;
}
return lines.join("\n");
return lines.slice(start).join("\n");
}
function pruneLinesFromBottom(
@ -229,13 +244,26 @@ function pruneLinesFromBottom(
maxTokens: number,
modelName: string,
): string {
let totalTokens = countTokens(prompt, modelName);
const lines = prompt.split("\n");
while (totalTokens > maxTokens && lines.length > 0) {
totalTokens -= countTokens(lines.pop()!, modelName);
const lineTokens = lines.map((line) => countTokens(line, modelName));
let totalTokens = lineTokens.reduce((sum, tokens) => sum + tokens, 0);
let end = lines.length;
// Calculate initial token count including newlines
totalTokens += Math.max(0, end - 1); // Add tokens for joining newlines
// Reverse traversal to avoid array modification
// Remove lines from the bottom until the token count is within the limit.
while (totalTokens > maxTokens && end > 0) {
end--;
totalTokens -= lineTokens[end];
// Decrement token count for the removed line and its following/joining newline (if not the first line)
if (end > 0) {
totalTokens--;
}
}
return lines.join("\n");
return lines.slice(0, end).join("\n");
}
function pruneStringFromBottom(
@ -452,5 +480,5 @@ export {
pruneLinesFromTop,
pruneRawPromptFromTop,
pruneStringFromBottom,
pruneStringFromTop,
pruneStringFromTop
};

View File

@ -88,6 +88,14 @@ export abstract class BaseLLM implements ILLM {
return (this.constructor as typeof BaseLLM).providerName;
}
/**
* This exists because for the continue-proxy, sometimes we want to get the value of the underlying provider that is used on the server
* For example, the underlying provider should always be sent with dev data
*/
get underlyingProviderName(): string {
return this.providerName;
}
supportsFim(): boolean {
return false;
}
@ -332,7 +340,7 @@ export abstract class BaseLLM implements ILLM {
name: "tokensGenerated",
data: {
model: model,
provider: this.providerName,
provider: this.underlyingProviderName,
promptTokens: promptTokens,
generatedTokens: generatedTokens,
},

View File

@ -1,270 +0,0 @@
import { streamResponse } from "@continuedev/fetch";
import { TRIAL_FIM_MODEL } from "../../config/onboarding.js";
import { getHeaders } from "../../continueServer/stubs/headers.js";
import { TRIAL_PROXY_URL } from "../../control-plane/client.js";
import {
ChatMessage,
Chunk,
CompletionOptions,
LLMOptions,
} from "../../index.js";
import { BaseLLM } from "../index.js";
class FreeTrial extends BaseLLM {
static providerName = "free-trial";
static defaultOptions: Partial<LLMOptions> | undefined = {
maxEmbeddingBatchSize: 128,
model: "voyage-code-2",
};
private ghAuthToken: string | undefined = undefined;
constructor(options: LLMOptions) {
super(options);
this.embeddingId = `${this.constructor.name}::${this.model}`;
}
setupGhAuthToken(ghAuthToken: string | undefined) {
this.ghAuthToken = ghAuthToken;
}
private async _getHeaders() {
if (!this.ghAuthToken) {
throw new Error(
"Please sign in with GitHub in order to use the free trial. If you'd like to use Continue without signing in, you can set up your own local model or API key.",
);
}
return {
"Content-Type": "application/json",
Authorization: `Bearer ${this.ghAuthToken}`,
...(await getHeaders()),
};
}
private async _countTokens(prompt: string, model: string, isPrompt: boolean) {
// Removed to reduce PostHog bill
// if (!Telemetry.client) {
// throw new Error(
// "In order to use the free trial, telemetry must be enabled so that we can monitor abuse. To enable telemetry, set \"allowAnonymousTelemetry\": true in config.json and make sure the box is checked in IDE settings. If you use your own model (local or API key), telemetry will never be required.",
// );
// }
// const event = isPrompt
// ? "free_trial_prompt_tokens"
// : "free_trial_completion_tokens";
// Telemetry.capture(event, {
// tokens: this.countTokens(prompt),
// model,
// });
}
private _convertArgs(options: CompletionOptions): any {
return {
model: options.model,
frequency_penalty: options.frequencyPenalty,
presence_penalty: options.presencePenalty,
max_tokens: options.maxTokens,
stop:
options.model === TRIAL_FIM_MODEL
? options.stop
: options.stop?.slice(0, 2),
temperature: options.temperature,
top_p: options.topP,
};
}
protected async *_streamComplete(
prompt: string,
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<string> {
const args = this._convertArgs(this.collectArgs(options));
await this._countTokens(prompt, args.model, true);
const response = await this.fetch(`${TRIAL_PROXY_URL}/stream_complete`, {
method: "POST",
headers: await this._getHeaders(),
body: JSON.stringify({
prompt,
...args,
}),
signal,
});
let completion = "";
for await (const value of streamResponse(response)) {
yield value;
completion += value;
}
void this._countTokens(completion, args.model, false);
}
protected _convertMessage(message: ChatMessage) {
if (message.role === "tool") {
return {
role: "tool",
content: message.content,
tool_call_id: message.toolCallId,
};
}
if (typeof message.content === "string") {
return message;
}
const parts = message.content.map((part) => {
if (part.type === "imageUrl") {
return {
type: "image_url",
image_url: {
url: part.imageUrl.url,
detail: "low",
},
};
}
return {
type: "text",
text: part.text,
};
});
return {
...message,
content: parts,
};
}
protected async *_streamChat(
messages: ChatMessage[],
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<ChatMessage> {
const args = this._convertArgs(this.collectArgs(options));
await this._countTokens(
messages.map((m) => m.content).join("\n"),
args.model,
true,
);
const response = await this.fetch(`${TRIAL_PROXY_URL}/stream_chat`, {
method: "POST",
headers: await this._getHeaders(),
body: JSON.stringify({
messages: messages.map(this._convertMessage),
...args,
}),
signal,
});
let completion = "";
for await (const chunk of streamResponse(response)) {
yield {
role: "assistant",
content: chunk,
};
completion += chunk;
}
await this._countTokens(completion, args.model, false);
}
supportsFim(): boolean {
return this.model === "codestral-latest";
}
async *_streamFim(
prefix: string,
suffix: string,
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<string> {
const args = this._convertArgs(this.collectArgs(options));
try {
const resp = await this.fetch(`${TRIAL_PROXY_URL}/stream_fim`, {
method: "POST",
headers: await this._getHeaders(),
body: JSON.stringify({
prefix,
suffix,
...args,
}),
signal,
});
let completion = "";
for await (const value of streamResponse(resp)) {
yield value;
completion += value;
}
await this._countTokens(completion, args.model, false);
} catch (e: any) {
if (e.message.startsWith("HTTP 429")) {
throw new Error(
"You have reached the 2000 request limit for the autocomplete free trial. To continue using autocomplete, please set up a local model or your own Codestral API key.",
);
}
throw e;
}
}
async listModels(): Promise<string[]> {
return [
"codestral-latest",
"claude-3-5-sonnet-latest",
"llama3.1-405b",
"llama3.1-70b",
"gpt-4o",
"gpt-3.5-turbo",
"claude-3-5-haiku-latest",
"gemini-1.5-pro-latest",
];
}
protected async _embed(chunks: string[]): Promise<number[][]> {
const resp = await this.fetch(new URL("embeddings", TRIAL_PROXY_URL), {
method: "POST",
body: JSON.stringify({
input: chunks,
model: this.model,
}),
headers: {
"Content-Type": "application/json",
...(await getHeaders()),
},
});
if (resp.status !== 200) {
throw new Error(`Failed to embed: ${resp.status} ${await resp.text()}`);
}
const data = (await resp.json()) as any;
return data.embeddings;
}
async rerank(query: string, chunks: Chunk[]): Promise<number[]> {
if (chunks.length === 0) {
return [];
}
const resp = await this.fetch(new URL("rerank", TRIAL_PROXY_URL), {
method: "POST",
headers: {
"Content-Type": "application/json",
...(await getHeaders()),
},
body: JSON.stringify({
query,
documents: chunks.map((chunk) => chunk.content),
}),
});
if (!resp.ok) {
throw new Error(await resp.text());
}
const data = (await resp.json()) as any;
const results = data.sort((a: any, b: any) => a.index - b.index);
return results.map((result: any) => result.relevance_score);
}
}
export default FreeTrial;

View File

@ -35,7 +35,7 @@ class LlamaCpp extends BaseLLM {
...this.requestOptions?.headers,
};
const resp = await this.fetch(new URL("completions", this.apiBase), {
const resp = await this.fetch(new URL("completion", this.apiBase), {
method: "POST",
headers,
body: JSON.stringify({

View File

@ -12,6 +12,10 @@ class Vllm extends OpenAI {
}
}
supportsFim(): boolean {
return false;
}
private _setupCompletionOptions() {
this.fetch(this._getEndpoint("models"), {
method: "GET",

View File

@ -23,7 +23,6 @@ import Deepseek from "./Deepseek";
import Docker from "./Docker";
import Fireworks from "./Fireworks";
import Flowise from "./Flowise";
import FreeTrial from "./FreeTrial";
import FunctionNetwork from "./FunctionNetwork";
import Gemini from "./Gemini";
import Groq from "./Groq";
@ -67,7 +66,6 @@ import xAI from "./xAI";
export const LLMClasses = [
Anthropic,
Cohere,
FreeTrial,
FunctionNetwork,
Gemini,
Llamafile,

View File

@ -1,6 +1,7 @@
import {
ContinueProperties,
decodeSecretLocation,
parseProxyModelName,
SecretType,
} from "@continuedev/config-yaml";
@ -47,6 +48,11 @@ class ContinueProxy extends OpenAI {
useLegacyCompletionsEndpoint: false,
};
get underlyingProviderName(): string {
const { provider } = parseProxyModelName(this.model);
return provider;
}
protected extraBodyProperties(): Record<string, any> {
const continueProperties: ContinueProperties = {
apiKeyLocation: this.apiKeyLocation,
@ -83,6 +89,10 @@ class ContinueProxy extends OpenAI {
}
supportsFim(): boolean {
const { provider } = parseProxyModelName(this.model);
if (provider === "vllm") {
return false;
}
return true;
}

View File

@ -0,0 +1,55 @@
import { jest } from "@jest/globals";
import Anthropic from "../Anthropic.js";
import Deepseek from "../Deepseek.js";
import FunctionNetwork from "../FunctionNetwork.js";
import Mistral from "../Mistral.js";
import OpenAI from "../OpenAI.js";
import ContinueProxy from "../stubs/ContinueProxy.js";
import Vllm from "../Vllm.js";
// Mock the parseProxyModelName function
const mockParseProxyModelName = jest.fn();
jest.mock("@continuedev/config-yaml", () => ({
parseProxyModelName: mockParseProxyModelName,
decodeSecretLocation: jest.fn(),
SecretType: { NotFound: "not-found" },
}));
// Test cases: [LLM class, model, expected supportsFim result, description]
const testCases: [any, string, boolean, string][] = [
[ContinueProxy, "owner/package/vllm/some-model", false, "vllm provider"],
[ContinueProxy, "owner/package/openai/gpt-4", true, "openai provider"],
[
ContinueProxy,
"owner/package/anthropic/claude-3",
true,
"anthropic provider",
],
[ContinueProxy, "owner/package/cohere/command-r", true, "cohere provider"],
[
ContinueProxy,
"owner/package/unknown-provider/some-model",
true,
"unknown provider",
],
[ContinueProxy, "owner/package/groq/llama-model", true, "groq provider"],
[Vllm, "any-model", false, "Vllm"],
[Anthropic, "claude-3-5-sonnet-latest", false, "Anthropic"],
[FunctionNetwork, "any-model", false, "FunctionNetwork"],
[OpenAI, "codestral", false, "OpenAI"],
[Mistral, "codestral", true, "Mistral"],
[Deepseek, "deepseek-chat", true, "Deepseek"],
];
testCases.forEach(([LLMClass, model, expectedResult, description]) => {
test(`supportsFim returns ${expectedResult} for ${description}`, () => {
const llm = new LLMClass({
model,
apiKey: "test-key",
});
const result = llm.supportsFim();
expect(result).toBe(expectedResult);
});
});

View File

@ -1,5 +1,6 @@
/* eslint-disable max-lines-per-function */
import { ContextItemWithId, RuleWithSource, UserChatMessage } from "../..";
import { getSystemMessageWithRules } from "./getSystemMessageWithRules";
import { getSystemMessageWithRules, shouldApplyRule } from "./getSystemMessageWithRules";
describe("getSystemMessageWithRules", () => {
const baseSystemMessage = "Base system message";
@ -402,4 +403,402 @@ describe("getSystemMessageWithRules", () => {
const expected = `${baseSystemMessage}\n\n${jsRule.rule}\n\n${tsRule.rule}\n\n${pythonRule.rule}\n\n${generalRule.rule}`;
expect(result).toBe(expected);
});
// Tests for alwaysApply property
describe("alwaysApply property", () => {
const alwaysApplyTrueRule: RuleWithSource = {
name: "Always Apply True Rule",
rule: "This rule should always be applied",
globs: "**/*.nonexistent",
alwaysApply: true,
source: "rules-block",
};
const alwaysApplyFalseRule: RuleWithSource = {
name: "Always Apply False Rule",
rule: "This rule should never be applied",
alwaysApply: false,
source: "rules-block",
};
const alwaysApplyFalseWithMatchingGlobs: RuleWithSource = {
name: "Always Apply False with Matching Globs",
rule: "This rule should never be applied even with matching globs",
globs: "**/*.ts?(x)",
alwaysApply: false,
source: "rules-block",
};
it("should always include rules with alwaysApply: true regardless of globs or file paths", () => {
const userMessage: UserChatMessage = {
role: "user",
content: "```js main.js\nconsole.log('hello');\n```",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [alwaysApplyTrueRule, tsRule],
contextItems: emptyContextItems,
});
// Should include the alwaysApply:true rule even though globs don't match
const expected = `${baseSystemMessage}\n\n${alwaysApplyTrueRule.rule}`;
expect(result).toBe(expected);
});
it("should include rules with alwaysApply: false when globs match", () => {
const userMessage: UserChatMessage = {
role: "user",
content: "```tsx Component.tsx\nexport const Component = () => <div>Hello</div>;\n```",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [alwaysApplyFalseWithMatchingGlobs, tsRule, generalRule],
contextItems: emptyContextItems,
});
// Should include alwaysApply:false rule because globs match
const expected = `${baseSystemMessage}\n\n${alwaysApplyFalseWithMatchingGlobs.rule}\n\n${tsRule.rule}\n\n${generalRule.rule}`;
expect(result).toBe(expected);
});
it("should include rules with alwaysApply: false when globs match", () => {
const userMessage: UserChatMessage = {
role: "user",
content:
"```ts Component.tsx\nexport const Component = () => <div>Hello</div>;\n```",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [alwaysApplyFalseWithMatchingGlobs, tsRule, generalRule],
contextItems: emptyContextItems,
});
// Should include alwaysApply:false rule because globs match
const expected = `${baseSystemMessage}\n\n${alwaysApplyFalseWithMatchingGlobs.rule}\n\n${tsRule.rule}\n\n${generalRule.rule}`;
expect(result).toBe(expected);
});
it("should NOT include rules with alwaysApply: false when globs don't match", () => {
const userMessage: UserChatMessage = {
role: "user",
content: "```py script.py\nprint('hello')\n```",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [alwaysApplyFalseWithMatchingGlobs, generalRule],
contextItems: emptyContextItems,
});
// Should only include general rule (alwaysApply:false rule doesn't match .py files)
const expected = `${baseSystemMessage}\n\n${generalRule.rule}`;
expect(result).toBe(expected);
});
it("should NOT include rules with alwaysApply: false when no globs are specified", () => {
const userMessage: UserChatMessage = {
role: "user",
content: "```js main.js\nconsole.log('hello');\n```",
};
const alwaysApplyFalseNoGlobs: RuleWithSource = {
name: "Always Apply False No Globs",
rule: "This rule has alwaysApply false and no globs",
alwaysApply: false,
source: "rules-block",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [alwaysApplyFalseNoGlobs, jsRule, generalRule],
contextItems: emptyContextItems,
});
// Should NOT include alwaysApply:false rule when no globs specified
const expected = `${baseSystemMessage}\n\n${jsRule.rule}\n\n${generalRule.rule}`;
expect(result).toBe(expected);
});
it("should include rules with alwaysApply: true even when no files are present", () => {
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage: undefined,
rules: [alwaysApplyTrueRule, tsRule, pythonRule],
contextItems: emptyContextItems,
});
// Should only include the alwaysApply:true rule
const expected = `${baseSystemMessage}\n\n${alwaysApplyTrueRule.rule}`;
expect(result).toBe(expected);
});
it("should handle mixed alwaysApply values correctly", () => {
const userMessage: UserChatMessage = {
role: "user",
content:
"```ts Component.tsx\nexport const Component = () => <div>Hello</div>;\n```",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [
alwaysApplyTrueRule,
alwaysApplyFalseRule,
alwaysApplyFalseWithMatchingGlobs,
tsRule,
generalRule,
],
contextItems: emptyContextItems,
});
// Should include:
// - alwaysApplyTrueRule (always applies)
// - alwaysApplyFalseWithMatchingGlobs (has globs that match .tsx)
// - tsRule (globs match .tsx)
// - generalRule (no globs, so applies to all)
// Should NOT include:
// - alwaysApplyFalseRule (alwaysApply: false and no globs)
const expected = `${baseSystemMessage}\n\n${alwaysApplyTrueRule.rule}\n\n${alwaysApplyFalseWithMatchingGlobs.rule}\n\n${tsRule.rule}\n\n${generalRule.rule}`;
expect(result).toBe(expected);
});
it("should use glob matching when alwaysApply is false", () => {
// This tests that rules with alwaysApply: false follow glob matching
const ruleWithAlwaysApplyFalse: RuleWithSource = {
name: "Rule With Always Apply False",
rule: "This rule follows glob matching behavior",
globs: "**/*.ts?(x)",
alwaysApply: false,
source: "rules-block",
};
const userMessage: UserChatMessage = {
role: "user",
content:
"```ts Component.tsx\nexport const Component = () => <div>Hello</div>;\n```",
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage,
rules: [ruleWithAlwaysApplyFalse],
contextItems: emptyContextItems,
});
// Should include the rule because it matches the file path
const expected = `${baseSystemMessage}\n\n${ruleWithAlwaysApplyFalse.rule}`;
expect(result).toBe(expected);
});
it("should include rules with globs when context file paths match (alwaysApply false)", () => {
const ruleWithGlobsOnly: RuleWithSource = {
name: "TypeScript Only Rule",
rule: "This rule should apply to TypeScript files only",
globs: "**/*.ts",
alwaysApply: false,
source: "rules-block",
};
const tsContextItem: ContextItemWithId = {
content: "TypeScript file content",
name: "utils.ts",
description: "A TypeScript utility file",
id: { providerTitle: "file", itemId: "src/utils.ts" },
uri: { type: "file", value: "src/utils.ts" },
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage: undefined, // No message, only context
rules: [ruleWithGlobsOnly, pythonRule], // Include a non-matching rule
contextItems: [tsContextItem],
});
// Should include the TypeScript rule but not the Python rule
const expected = `${baseSystemMessage}\n\n${ruleWithGlobsOnly.rule}`;
expect(result).toBe(expected);
});
it("should NOT include rules with globs when context file paths don't match (alwaysApply false)", () => {
const ruleWithGlobsOnly: RuleWithSource = {
name: "TypeScript Only Rule",
rule: "This rule should apply to TypeScript files only",
globs: "**/*.ts",
alwaysApply: false,
source: "rules-block",
};
const pyContextItem: ContextItemWithId = {
content: "Python file content",
name: "utils.py",
description: "A Python utility file",
id: { providerTitle: "file", itemId: "src/utils.py" },
uri: { type: "file", value: "src/utils.py" },
};
const result = getSystemMessageWithRules({
baseSystemMessage,
userMessage: undefined, // No message, only context
rules: [ruleWithGlobsOnly],
contextItems: [pyContextItem], // Python file doesn't match *.ts pattern
});
// Should NOT include the rule because context doesn't match the glob
expect(result).toBe(baseSystemMessage);
});
});
});
describe("shouldApplyRule", () => {
const ruleWithGlobs: RuleWithSource = {
name: "Rule with Globs",
rule: "Apply to TypeScript files",
globs: "**/*.ts?(x)",
alwaysApply: false,
source: "rules-block",
};
const ruleWithoutGlobs: RuleWithSource = {
name: "Rule without Globs",
rule: "Apply to all files",
alwaysApply: true,
source: "rules-block",
};
const ruleAlwaysApplyTrue: RuleWithSource = {
name: "Always Apply True",
rule: "Always apply this rule",
globs: "**/*.nonexistent",
alwaysApply: true,
source: "rules-block",
};
const ruleAlwaysApplyFalse: RuleWithSource = {
name: "Always Apply False",
rule: "Never apply this rule",
globs: "**/*.ts?(x)",
alwaysApply: false,
source: "rules-block",
};
const ruleAlwaysApplyFalseNoGlobs: RuleWithSource = {
name: "Always Apply False No Globs",
rule: "Never apply this rule",
alwaysApply: false,
source: "rules-block",
};
describe("alwaysApply behavior", () => {
it("should return true when alwaysApply is true, regardless of file paths", () => {
expect(shouldApplyRule(ruleAlwaysApplyTrue, [])).toBe(true);
expect(shouldApplyRule(ruleAlwaysApplyTrue, ["src/main.js"])).toBe(true);
expect(shouldApplyRule(ruleAlwaysApplyTrue, ["Component.tsx"])).toBe(true);
});
it("should use glob matching when alwaysApply is false", () => {
// Should apply when globs match
expect(shouldApplyRule(ruleAlwaysApplyFalse, ["src/main.ts"])).toBe(true);
expect(shouldApplyRule(ruleAlwaysApplyFalse, ["Component.tsx"])).toBe(true);
// Should not apply when globs don't match
expect(shouldApplyRule(ruleAlwaysApplyFalse, ["script.py"])).toBe(false);
expect(shouldApplyRule(ruleAlwaysApplyFalse, [])).toBe(false);
});
it("should return false when alwaysApply is false and no globs specified", () => {
expect(shouldApplyRule(ruleAlwaysApplyFalseNoGlobs, [])).toBe(false);
expect(shouldApplyRule(ruleAlwaysApplyFalseNoGlobs, ["any-file.js"])).toBe(false);
});
});
describe("default behavior (alwaysApply undefined)", () => {
it("should return true for rules without globs regardless of file paths", () => {
expect(shouldApplyRule(ruleWithoutGlobs, [])).toBe(true);
expect(shouldApplyRule(ruleWithoutGlobs, ["src/main.js"])).toBe(true);
expect(shouldApplyRule(ruleWithoutGlobs, ["Component.tsx", "utils.py"])).toBe(true);
});
it("should return false for rules with globs when no file paths are provided", () => {
expect(shouldApplyRule(ruleWithGlobs, [])).toBe(false);
});
it("should return true for rules with globs when matching file paths are provided", () => {
expect(shouldApplyRule(ruleWithGlobs, ["Component.tsx"])).toBe(true);
expect(shouldApplyRule(ruleWithGlobs, ["src/main.ts"])).toBe(true);
expect(shouldApplyRule(ruleWithGlobs, ["utils.js", "Component.tsx"])).toBe(true);
});
it("should return false for rules with globs when no matching file paths are provided", () => {
expect(shouldApplyRule(ruleWithGlobs, ["utils.py"])).toBe(false);
expect(shouldApplyRule(ruleWithGlobs, ["main.js", "script.rb"])).toBe(false);
});
});
describe("glob pattern matching", () => {
const ruleWithArrayGlobs: RuleWithSource = {
name: "Rule with Array Globs",
rule: "Apply to specific patterns",
globs: ["src/**/*.ts", "tests/**/*.test.js"],
source: "rules-block",
};
const ruleWithSpecificPattern: RuleWithSource = {
name: "Rule with Specific Pattern",
rule: "Apply to Python files",
globs: "**/*.py",
source: "rules-block",
};
it("should handle array of glob patterns", () => {
expect(shouldApplyRule(ruleWithArrayGlobs, ["src/main.ts"])).toBe(true);
expect(shouldApplyRule(ruleWithArrayGlobs, ["tests/unit.test.js"])).toBe(true);
expect(shouldApplyRule(ruleWithArrayGlobs, ["config/settings.json"])).toBe(false);
});
it("should handle string glob patterns", () => {
expect(shouldApplyRule(ruleWithSpecificPattern, ["utils.py"])).toBe(true);
expect(shouldApplyRule(ruleWithSpecificPattern, ["src/models/user.py"])).toBe(true);
expect(shouldApplyRule(ruleWithSpecificPattern, ["utils.js"])).toBe(false);
});
it("should return true if any file path matches when multiple paths provided", () => {
expect(shouldApplyRule(ruleWithSpecificPattern, ["utils.js", "models.py", "config.json"])).toBe(true);
expect(shouldApplyRule(ruleWithGlobs, ["utils.py", "Component.tsx", "script.rb"])).toBe(true);
});
});
describe("edge cases", () => {
it("should handle empty globs array", () => {
const ruleWithEmptyGlobs: RuleWithSource = {
name: "Rule with Empty Globs",
rule: "Test rule",
globs: [],
source: "rules-block",
};
// Empty array should be treated as "no globs" (truthy check fails)
expect(shouldApplyRule(ruleWithEmptyGlobs, ["any-file.js"])).toBe(false);
});
it("should handle undefined globs", () => {
const ruleUndefinedGlobs: RuleWithSource = {
name: "Rule with Undefined Globs",
rule: "Test rule",
globs: undefined,
source: "rules-block",
};
expect(shouldApplyRule(ruleUndefinedGlobs, ["any-file.js"])).toBe(true);
expect(shouldApplyRule(ruleUndefinedGlobs, [])).toBe(true);
});
});
});

View File

@ -28,6 +28,39 @@ const matchesGlobs = (
return false;
};
/**
* Determines if a rule should be applied based on its alwaysApply property and file path matching
*
* @param rule - The rule to check
* @param filePaths - Array of file paths to check against the rule's globs
* @returns true if the rule should be applied, false otherwise
*/
export const shouldApplyRule = (
rule: RuleWithSource,
filePaths: string[],
): boolean => {
if (rule.alwaysApply) {
return true;
}
// If alwaysApply is explicitly false, only apply if there are globs AND they match
if (rule.alwaysApply === false) {
if (!rule.globs) {
return false; // No globs specified, don't apply
}
return filePaths.some((path) => matchesGlobs(path, rule.globs));
}
// If alwaysApply is undefined, default behavior:
// - No globs: always apply
// - Has globs: only apply if they match
if (!rule.globs) {
return true;
}
return filePaths.some((path) => matchesGlobs(path, rule.globs));
};
/**
* Filters rules that apply to the given message and/or context items
*
@ -52,16 +85,7 @@ export const getApplicableRules = (
// Combine file paths from both sources
const allFilePaths = [...filePathsFromMessage, ...filePathsFromContextItems];
return rules.filter((rule) => {
// A rule is active if it has no globs (applies to all files)
// or if at least one file path matches its globs
const hasNoGlobs = !rule.globs;
const matchesAnyFilePath = allFilePaths.some((path) =>
matchesGlobs(path, rule.globs),
);
return hasNoGlobs || matchesAnyFilePath;
});
return rules.filter((rule) => shouldApplyRule(rule, allFilePaths));
};
/**

View File

@ -6,39 +6,57 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
const supportsFn = PROVIDER_TOOL_SUPPORT["continue-proxy"];
it("should return true for Claude 3.5 models", () => {
expect(supportsFn("claude-3-5-sonnet")).toBe(true);
expect(supportsFn("claude-3.5-sonnet")).toBe(true);
expect(
supportsFn("ownerSlug/packageSlug/anthropic/claude-3-5-sonnet"),
).toBe(true);
expect(
supportsFn("ownerSlug/packageSlug/anthropic/claude-3.5-sonnet"),
).toBe(true);
});
it("should return true for Claude 3.7 models", () => {
expect(supportsFn("claude-3-7-haiku")).toBe(true);
expect(supportsFn("claude-3.7-sonnet")).toBe(true);
expect(
supportsFn("ownerSlug/packageSlug/anthropic/claude-3-7-haiku"),
).toBe(true);
expect(
supportsFn("ownerSlug/packageSlug/anthropic/claude-3.7-sonnet"),
).toBe(true);
});
it("should return true for GPT-4 models", () => {
expect(supportsFn("gpt-4-turbo")).toBe(true);
expect(supportsFn("gpt-4-1106-preview")).toBe(true);
expect(supportsFn("ownerSlug/packageSlug/openai/gpt-4-turbo")).toBe(true);
expect(
supportsFn("ownerSlug/packageSlug/openai/gpt-4-1106-preview"),
).toBe(true);
});
it("should return true for O3 models", () => {
expect(supportsFn("o3-preview")).toBe(true);
expect(supportsFn("ownerSlug/packageSlug/openai/o3-preview")).toBe(true);
});
it("should return true for Gemini models", () => {
expect(supportsFn("gemini-pro")).toBe(true);
expect(supportsFn("gemini-1.5-pro")).toBe(true);
expect(supportsFn("ownerSlug/packageSlug/gemini/gemini-pro")).toBe(true);
expect(supportsFn("ownerSlug/packageSlug/gemini/gemini-1.5-pro")).toBe(
true,
);
});
it("should return false for unsupported models", () => {
expect(supportsFn("gpt-3.5-turbo")).toBe(false);
expect(supportsFn("claude-2")).toBe(false);
expect(supportsFn("llama-3")).toBe(false);
expect(supportsFn("ownerSlug/packageSlug/openai/gpt-3.5-turbo")).toBe(
false,
);
expect(supportsFn("ownerSlug/packageSlug/anthropic/claude-2")).toBe(
false,
);
expect(supportsFn("ownerSlug/packageSlug/together/llama-3")).toBe(false);
});
it("should handle case insensitivity", () => {
expect(supportsFn("CLAUDE-3-5-sonnet")).toBe(true);
expect(supportsFn("GPT-4-turbo")).toBe(true);
expect(supportsFn("GEMINI-pro")).toBe(true);
expect(
supportsFn("ownerSlug/packageSlug/anthropic/CLAUDE-3-5-sonnet"),
).toBe(true);
expect(supportsFn("ownerSlug/packageSlug/openai/GPT-4-turbo")).toBe(true);
expect(supportsFn("ownerSlug/packageSlug/gemini/GEMINI-pro")).toBe(true);
});
});
@ -56,8 +74,8 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
});
it("should return undefined for unsupported models", () => {
expect(supportsFn("claude-2")).toBeUndefined();
expect(supportsFn("claude-instant")).toBeUndefined();
expect(supportsFn("claude-2")).toBe(false);
expect(supportsFn("claude-instant")).toBe(false);
});
it("should handle case insensitivity", () => {
@ -81,8 +99,8 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
});
it("should return undefined for unsupported models", () => {
expect(supportsFn("gpt-3.5-turbo")).toBeUndefined();
expect(supportsFn("davinci")).toBeUndefined();
expect(supportsFn("gpt-3.5-turbo")).toBe(false);
expect(supportsFn("davinci")).toBe(false);
});
it("should handle case insensitivity", () => {
@ -133,26 +151,22 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
});
it("should return undefined for Claude Haiku and Opus models", () => {
expect(
supportsFn("anthropic.claude-3-5-haiku-20240307-v1:0"),
).toBeUndefined();
expect(
supportsFn("anthropic.claude-3.5-haiku-20240620-v1:0"),
).toBeUndefined();
expect(
supportsFn("anthropic.claude-3-7-haiku-20240620-v1:0"),
).toBeUndefined();
expect(
supportsFn("anthropic.claude-3-5-opus-20240620-v1:0"),
).toBeUndefined();
expect(
supportsFn("anthropic.claude-3.7-opus-20240620-v1:0"),
).toBeUndefined();
expect(supportsFn("anthropic.claude-3-5-haiku-20240307-v1:0")).toBe(
false,
);
expect(supportsFn("anthropic.claude-3.5-haiku-20240620-v1:0")).toBe(
false,
);
expect(supportsFn("anthropic.claude-3-7-haiku-20240620-v1:0")).toBe(
false,
);
expect(supportsFn("anthropic.claude-3-5-opus-20240620-v1:0")).toBe(false);
expect(supportsFn("anthropic.claude-3.7-opus-20240620-v1:0")).toBe(false);
});
it("should return undefined for other unsupported models", () => {
expect(supportsFn("anthropic.claude-instant-v1")).toBeUndefined();
expect(supportsFn("anthropic.titan-text-express-v1")).toBeUndefined();
expect(supportsFn("anthropic.claude-instant-v1")).toBe(false);
expect(supportsFn("anthropic.titan-text-express-v1")).toBe(false);
});
it("should handle case insensitivity", () => {
@ -228,9 +242,9 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
});
it("should return undefined for other models", () => {
expect(supportsFn("llama2")).toBeUndefined();
expect(supportsFn("phi-2")).toBeUndefined();
expect(supportsFn("falcon")).toBeUndefined();
expect(supportsFn("llama2")).toBe(false);
expect(supportsFn("phi-2")).toBe(false);
expect(supportsFn("falcon")).toBe(false);
});
it("should handle case insensitivity", () => {
@ -243,16 +257,16 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
describe("edge cases", () => {
it("should handle empty model names", () => {
expect(PROVIDER_TOOL_SUPPORT["continue-proxy"]("")).toBe(false);
expect(PROVIDER_TOOL_SUPPORT["anthropic"]("")).toBeUndefined();
expect(PROVIDER_TOOL_SUPPORT["openai"]("")).toBeUndefined();
expect(PROVIDER_TOOL_SUPPORT["anthropic"]("")).toBe(false);
expect(PROVIDER_TOOL_SUPPORT["openai"]("")).toBe(false);
expect(PROVIDER_TOOL_SUPPORT["gemini"]("")).toBe(false);
expect(PROVIDER_TOOL_SUPPORT["bedrock"]("")).toBeUndefined();
expect(PROVIDER_TOOL_SUPPORT["ollama"]("")).toBeUndefined();
expect(PROVIDER_TOOL_SUPPORT["bedrock"]("")).toBe(false);
expect(PROVIDER_TOOL_SUPPORT["ollama"]("")).toBe(false);
});
it("should handle non-existent provider", () => {
// @ts-ignore - Testing runtime behavior with invalid provider
expect(PROVIDER_TOOL_SUPPORT["non-existent"]).toBeUndefined();
expect(PROVIDER_TOOL_SUPPORT["non-existent"]).toBe(undefined);
});
});
});

View File

@ -1,246 +1,274 @@
export const PROVIDER_TOOL_SUPPORT: Record<
string,
(model: string) => boolean | undefined
> = {
"continue-proxy": (model) => {
// see getContinueProxyModelName
const provider = model.split("/")[2];
const _model = model.split("/")[3];
if (provider && _model && provider !== "continue-proxy") {
const fn = PROVIDER_TOOL_SUPPORT[provider];
if (fn) {
return fn(_model);
}
}
return [
"claude-3-5",
"claude-3.5",
"claude-3-7",
"claude-3.7",
"claude-sonnet-4",
"gpt-4",
"o3",
"gemini",
].some((part) => model.toLowerCase().startsWith(part));
},
anthropic: (model) => {
if (
[
import { parseProxyModelName } from "@continuedev/config-yaml";
export const PROVIDER_TOOL_SUPPORT: Record<string, (model: string) => boolean> =
{
"continue-proxy": (model) => {
try {
const { provider, model: _model } = parseProxyModelName(model);
if (provider && _model && provider !== "continue-proxy") {
const fn = PROVIDER_TOOL_SUPPORT[provider];
if (fn) {
return fn(_model);
}
}
} catch (e) {}
return [
"claude-3-5",
"claude-3.5",
"claude-3-7",
"claude-3.7",
"claude-sonnet-4",
].some((part) => model.toLowerCase().startsWith(part))
) {
return true;
}
},
azure: (model) => {
if (
model.toLowerCase().startsWith("gpt-4") ||
model.toLowerCase().startsWith("o3")
)
return true;
return false;
},
openai: (model) => {
// https://platform.openai.com/docs/guides/function-calling#models-supporting-function-calling
if (
model.toLowerCase().startsWith("gpt-4") ||
model.toLowerCase().startsWith("o3")
) {
return true;
}
// firworks-ai https://docs.fireworks.ai/guides/function-calling
if (model.startsWith("accounts/fireworks/models/")) {
switch (model.substring(26)) {
case "llama-v3p1-405b-instruct":
case "llama-v3p1-70b-instruct":
case "qwen2p5-72b-instruct":
case "firefunction-v1":
case "firefunction-v2":
"gpt-4",
"o3",
"gemini",
].some((part) => model.toLowerCase().startsWith(part));
},
anthropic: (model) => {
if (
[
"claude-3-5",
"claude-3.5",
"claude-3-7",
"claude-3.7",
"claude-sonnet-4",
].some((part) => model.toLowerCase().startsWith(part))
) {
return true;
}
return false;
},
azure: (model) => {
if (
model.toLowerCase().startsWith("gpt-4") ||
model.toLowerCase().startsWith("o3")
)
return true;
return false;
},
openai: (model) => {
// https://platform.openai.com/docs/guides/function-calling#models-supporting-function-calling
if (
model.toLowerCase().startsWith("gpt-4") ||
model.toLowerCase().startsWith("o3")
) {
return true;
}
// firworks-ai https://docs.fireworks.ai/guides/function-calling
if (model.startsWith("accounts/fireworks/models/")) {
switch (model.substring(26)) {
case "llama-v3p1-405b-instruct":
case "llama-v3p1-70b-instruct":
case "qwen2p5-72b-instruct":
case "firefunction-v1":
case "firefunction-v2":
return true;
default:
return false;
}
}
return false;
},
gemini: (model) => {
// All gemini models support function calling
return model.toLowerCase().includes("gemini");
},
vertexai: (model) => {
// All gemini models except flash 2.0 lite support function calling
return (
model.toLowerCase().includes("gemini") &&
!model.toLowerCase().includes("lite")
);
},
bedrock: (model) => {
// For Bedrock, only support Claude Sonnet models with versions 3.5/3-5 and 3.7/3-7
if (
model.toLowerCase().includes("sonnet") &&
[
"claude-3-5",
"claude-3.5",
"claude-3-7",
"claude-3.7",
"claude-sonnet-4",
].some((part) => model.toLowerCase().includes(part))
) {
return true;
}
return false;
},
mistral: (model) => {
// https://docs.mistral.ai/capabilities/function_calling/
return (
!model.toLowerCase().includes("mamba") &&
[
"devstral",
"codestral",
"mistral-large",
"mistral-small",
"pixtral",
"ministral",
"mistral-nemo",
"devstral",
].some((part) => model.toLowerCase().includes(part))
);
},
// https://ollama.com/search?c=tools
ollama: (model) => {
let modelName = "";
// Extract the model name after the last slash to support other registries
if (model.includes("/")) {
let parts = model.split("/");
modelName = parts[parts.length - 1];
} else {
modelName = model;
}
if (
["vision", "math", "guard", "mistrallite", "mistral-openorca"].some(
(part) => modelName.toLowerCase().includes(part),
)
) {
return false;
}
if (
[
"cogito",
"llama3.3",
"qwq",
"llama3.2",
"llama3.1",
"qwen2",
"qwen3",
"mixtral",
"command-r",
"smollm2",
"hermes3",
"athene-v2",
"nemotron",
"llama3-groq",
"granite3",
"granite-3",
"aya-expanse",
"firefunction-v2",
"mistral",
"devstral",
].some((part) => modelName.toLowerCase().includes(part))
) {
return true;
}
return false;
},
sambanova: (model) => {
// https://docs.sambanova.ai/cloud/docs/capabilities/function-calling
if (
model.toLowerCase().startsWith("meta-llama-3") ||
model.toLowerCase().includes("llama-4") ||
model.toLowerCase().includes("deepseek")
) {
return true;
}
return false;
},
deepseek: (model) => {
if (model !== "deepseek-reasoner") {
return true;
}
return false;
},
watsonx: (model) => {
if (model.toLowerCase().includes("guard")) {
return false;
}
if (
[
"llama-3",
"llama-4",
"mistral",
"codestral",
"granite-3",
"devstral",
].some((part) => model.toLowerCase().includes(part))
) {
return true;
}
return false;
},
openrouter: (model) => {
// https://openrouter.ai/models?fmt=cards&supported_parameters=tools
if (
["vision", "math", "guard", "mistrallite", "mistral-openorca"].some(
(part) => model.toLowerCase().includes(part),
)
) {
return false;
}
const supportedPrefixes = [
"openai/gpt-3.5",
"openai/gpt-4",
"openai/o1",
"openai/o3",
"openai/o4",
"anthropic/claude-3",
"anthropic/claude-4",
"microsoft/phi-3",
"google/gemini-flash-1.5",
"google/gemini-2",
"google/gemini-pro",
"x-ai/grok",
"qwen/qwen3",
"qwen/qwen-",
"cohere/command-r",
"ai21/jamba-1.6",
"mistralai/mistral",
"mistralai/ministral",
"mistralai/codestral",
"mistralai/mixtral",
"mistral/ministral",
"mistral/devstral",
"mistralai/pixtral",
"meta-llama/llama-3.3",
"amazon/nova",
"deepseek/deepseek-r1",
"deepseek/deepseek-chat",
"meta-llama/llama-4",
"all-hands/openhands-lm-32b",
];
for (const prefix of supportedPrefixes) {
if (model.toLowerCase().startsWith(prefix)) {
return true;
default:
return false;
}
}
const specificModels = [
"qwen/qwq-32b",
"qwen/qwen-2.5-72b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3-70b-instruct",
"arcee-ai/caller-large",
"nousresearch/hermes-3-llama-3.1-70b",
];
for (const model of specificModels) {
if (model.toLowerCase() === model) {
return true;
}
}
const supportedContains = ["llama-3.1"];
for (const model of supportedContains) {
if (model.toLowerCase().includes(model)) {
return true;
}
}
}
},
gemini: (model) => {
// All gemini models support function calling
return model.toLowerCase().includes("gemini");
},
vertexai: (model) => {
// All gemini models except flash 2.0 lite support function calling
return (
model.toLowerCase().includes("gemini") &&
!model.toLowerCase().includes("lite")
);
},
bedrock: (model) => {
// For Bedrock, only support Claude Sonnet models with versions 3.5/3-5 and 3.7/3-7
if (
model.toLowerCase().includes("sonnet") &&
[
"claude-3-5",
"claude-3.5",
"claude-3-7",
"claude-3.7",
"claude-sonnet-4",
].some((part) => model.toLowerCase().includes(part))
) {
return true;
}
},
mistral: (model) => {
// https://docs.mistral.ai/capabilities/function_calling/
return (
!model.toLowerCase().includes("mamba") &&
[
"devstral",
"codestral",
"mistral-large",
"mistral-small",
"pixtral",
"ministral",
"mistral-nemo",
].some((part) => model.toLowerCase().includes(part))
);
},
// https://ollama.com/search?c=tools
ollama: (model) => {
let modelName = "";
// Extract the model name after the last slash to support other registries
if (model.includes("/")) {
let parts = model.split("/");
modelName = parts[parts.length - 1];
} else {
modelName = model;
}
if (
["vision", "math", "guard", "mistrallite", "mistral-openorca"].some(
(part) => modelName.toLowerCase().includes(part),
)
) {
return false;
}
if (
[
"cogito",
"llama3.3",
"qwq",
"llama3.2",
"llama3.1",
"qwen2",
"qwen3",
"mixtral",
"command-r",
"smollm2",
"hermes3",
"athene-v2",
"nemotron",
"llama3-groq",
"granite3",
"granite-3",
"aya-expanse",
"firefunction-v2",
"mistral",
"devstral",
].some((part) => modelName.toLowerCase().includes(part))
) {
return true;
}
},
sambanova: (model) => {
// https://docs.sambanova.ai/cloud/docs/capabilities/function-calling
if (
model.toLowerCase().startsWith("meta-llama-3") ||
model.toLowerCase().includes("llama-4") ||
model.toLowerCase().includes("deepseek")
) {
return true;
}
},
deepseek: (model) => {
if (model !== "deepseek-reasoner") {
return true;
}
},
watsonx: (model) => {
if (model.toLowerCase().includes("guard")) return false;
if (
["llama-3", "llama-4", "mistral", "codestral", "granite-3"].some((part) =>
model.toLowerCase().includes(part),
)
)
return true;
},
openrouter: (model) => {
// https://openrouter.ai/models?fmt=cards&supported_parameters=tools
if (
["vision", "math", "guard", "mistrallite", "mistral-openorca"].some(
(part) => model.toLowerCase().includes(part),
)
) {
return false;
}
const supportedPrefixes = [
"openai/gpt-3.5",
"openai/gpt-4",
"openai/o1",
"openai/o3",
"openai/o4",
"anthropic/claude-3",
"microsoft/phi-3",
"google/gemini-flash-1.5",
"google/gemini-2",
"google/gemini-pro",
"x-ai/grok",
"qwen/qwen3",
"qwen/qwen-",
"cohere/command-r",
"ai21/jamba-1.6",
"mistralai/mistral",
"mistralai/ministral",
"mistralai/codestral",
"mistralai/mixtral",
"mistral/ministral",
"mistralai/pixtral",
"meta-llama/llama-3.3",
"amazon/nova",
"deepseek/deepseek-r1",
"deepseek/deepseek-chat",
"meta-llama/llama-4",
"all-hands/openhands-lm-32b",
];
for (const prefix of supportedPrefixes) {
if (model.toLowerCase().startsWith(prefix)) {
return true;
}
}
const specificModels = [
"qwen/qwq-32b",
"qwen/qwen-2.5-72b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3-70b-instruct",
"arcee-ai/caller-large",
"nousresearch/hermes-3-llama-3.1-70b",
];
for (const model of specificModels) {
if (model.toLowerCase() === model) {
return true;
}
}
const supportedContains = ["llama-3.1"];
for (const model of supportedContains) {
if (model.toLowerCase().includes(model)) {
return true;
}
}
},
};
},
};

View File

@ -0,0 +1,282 @@
import { ConfigHandler } from "../config/ConfigHandler.js";
import { TRIAL_FIM_MODEL } from "../config/onboarding.js";
import { IDE, ILLM } from "../index.js";
import OpenAI from "../llm/llms/OpenAI.js";
import { DEFAULT_AUTOCOMPLETE_OPTS } from "../util/parameters.js";
import { shouldCompleteMultiline } from "../autocomplete/classification/shouldCompleteMultiline.js";
import { ContextRetrievalService } from "../autocomplete/context/ContextRetrievalService.js";
import { BracketMatchingService } from "../autocomplete/filtering/BracketMatchingService.js";
import { CompletionStreamer } from "../autocomplete/generation/CompletionStreamer.js";
import { postprocessCompletion } from "../autocomplete/postprocessing/index.js";
import { shouldPrefilter } from "../autocomplete/prefiltering/index.js";
import { getAllSnippets } from "../autocomplete/snippets/index.js";
import { renderPrompt } from "../autocomplete/templating/index.js";
import { GetLspDefinitionsFunction } from "../autocomplete/types.js";
import { AutocompleteDebouncer } from "../autocomplete/util/AutocompleteDebouncer.js";
import { AutocompleteLoggingService } from "../autocomplete/util/AutocompleteLoggingService.js";
import AutocompleteLruCache from "../autocomplete/util/AutocompleteLruCache.js";
import { HelperVars } from "../autocomplete/util/HelperVars.js";
import { AutocompleteInput, AutocompleteOutcome } from "../autocomplete/util/types.js";
const autocompleteCache = AutocompleteLruCache.get();
// Errors that can be expected on occasion even during normal functioning should not be shown.
// Not worth disrupting the user to tell them that a single autocomplete request didn't go through
const ERRORS_TO_IGNORE = [
// From Ollama
"unexpected server status",
"operation was aborted",
];
export class NextEditProvider {
private autocompleteCache = AutocompleteLruCache.get();
public errorsShown: Set<string> = new Set();
private bracketMatchingService = new BracketMatchingService();
private debouncer = new AutocompleteDebouncer();
private completionStreamer: CompletionStreamer;
private loggingService = new AutocompleteLoggingService();
private contextRetrievalService: ContextRetrievalService;
constructor(
private readonly configHandler: ConfigHandler,
private readonly ide: IDE,
private readonly _injectedGetLlm: () => Promise<ILLM | undefined>,
private readonly _onError: (e: any) => void,
private readonly getDefinitionsFromLsp: GetLspDefinitionsFunction,
) {
this.completionStreamer = new CompletionStreamer(this.onError.bind(this));
this.contextRetrievalService = new ContextRetrievalService(this.ide);
}
private async _prepareLlm(): Promise<ILLM | undefined> {
const llm = await this._injectedGetLlm();
if (!llm) {
return undefined;
}
// Temporary fix for JetBrains autocomplete bug as described in https://github.com/continuedev/continue/pull/3022
if (llm.model === undefined && llm.completionOptions?.model !== undefined) {
llm.model = llm.completionOptions.model;
}
// Ignore empty API keys for Mistral since we currently write
// a template provider without one during onboarding
if (llm.providerName === "mistral" && llm.apiKey === "") {
return undefined;
}
// Set temperature (but don't override)
if (llm.completionOptions.temperature === undefined) {
llm.completionOptions.temperature = 0.01;
}
if (llm instanceof OpenAI) {
llm.useLegacyCompletionsEndpoint = true;
} else if (
llm.providerName === "free-trial" &&
llm.model !== TRIAL_FIM_MODEL
) {
llm.model = TRIAL_FIM_MODEL;
}
return llm;
}
private onError(e: any) {
if (
ERRORS_TO_IGNORE.some((err) =>
typeof e === "string" ? e.includes(err) : e?.message?.includes(err),
)
) {
return;
}
console.warn("Error generating autocompletion: ", e);
if (!this.errorsShown.has(e.message)) {
this.errorsShown.add(e.message);
this._onError(e);
}
}
public cancel() {
this.loggingService.cancel();
}
public accept(completionId: string) {
const outcome = this.loggingService.accept(completionId);
if (!outcome) {
return;
}
this.bracketMatchingService.handleAcceptedCompletion(
outcome.completion,
outcome.filepath,
);
}
public markDisplayed(completionId: string, outcome: AutocompleteOutcome) {
this.loggingService.markDisplayed(completionId, outcome);
}
private async _getAutocompleteOptions() {
const { config } = await this.configHandler.loadConfig();
const options = {
...DEFAULT_AUTOCOMPLETE_OPTS,
...config?.tabAutocompleteOptions,
};
return options;
}
public async provideInlineCompletionItems(
input: AutocompleteInput,
token: AbortSignal | undefined,
): Promise<AutocompleteOutcome | undefined> {
try {
// Create abort signal if not given
if (!token) {
const controller = this.loggingService.createAbortController(
input.completionId,
);
token = controller.signal;
}
const startTime = Date.now();
const options = await this._getAutocompleteOptions();
// Debounce
if (await this.debouncer.delayAndShouldDebounce(options.debounceDelay)) {
return undefined;
}
const llm = await this._prepareLlm();
if (!llm) {
return undefined;
}
if (llm.promptTemplates?.autocomplete) {
options.template = llm.promptTemplates.autocomplete as string;
}
const helper = await HelperVars.create(
input,
options,
llm.model,
this.ide,
);
if (await shouldPrefilter(helper, this.ide)) {
return undefined;
}
const [snippetPayload, workspaceDirs] = await Promise.all([
getAllSnippets({
helper,
ide: this.ide,
getDefinitionsFromLsp: this.getDefinitionsFromLsp,
contextRetrievalService: this.contextRetrievalService,
}),
this.ide.getWorkspaceDirs(),
]);
const { prompt, prefix, suffix, completionOptions } = renderPrompt({
snippetPayload,
workspaceDirs,
helper,
});
// Completion
let completion: string | undefined = "";
const cache = await autocompleteCache;
const cachedCompletion = helper.options.useCache
? await cache.get(helper.prunedPrefix)
: undefined;
let cacheHit = false;
if (cachedCompletion) {
// Cache
cacheHit = true;
completion = cachedCompletion;
} else {
const multiline =
!helper.options.transform || shouldCompleteMultiline(helper);
const completionStream =
this.completionStreamer.streamCompletionWithFilters(
token,
llm,
prefix,
suffix,
prompt,
multiline,
completionOptions,
helper,
);
for await (const update of completionStream) {
completion += update;
}
// Don't postprocess if aborted
if (token.aborted) {
return undefined;
}
const processedCompletion = helper.options.transform
? postprocessCompletion({
completion,
prefix: helper.prunedPrefix,
suffix: helper.prunedSuffix,
llm,
})
: completion;
completion = processedCompletion;
}
if (!completion) {
return undefined;
}
const outcome: AutocompleteOutcome = {
time: Date.now() - startTime,
completion,
prefix,
suffix,
prompt,
modelProvider: llm.underlyingProviderName,
modelName: llm.model,
completionOptions,
cacheHit,
filepath: helper.filepath,
numLines: completion.split("\n").length,
completionId: helper.input.completionId,
gitRepo: await this.ide.getRepoName(helper.filepath),
uniqueId: await this.ide.getUniqueId(),
timestamp: Date.now(),
...helper.options,
};
//////////
// Save to cache
if (!outcome.cacheHit && helper.options.useCache) {
(await this.autocompleteCache)
.put(outcome.prefix, outcome.completion)
.catch((e) => console.warn(`Failed to save to cache: ${e.message}`));
}
// When using the JetBrains extension, Mark as displayed
const ideType = (await this.ide.getIdeInfo()).ideType;
if (ideType === "jetbrains") {
this.markDisplayed(input.completionId, outcome);
}
return outcome;
} catch (e: any) {
this.onError(e);
} finally {
this.loggingService.deleteAbortController(input.completionId);
}
}
}

View File

@ -0,0 +1 @@
export const IS_NEXT_EDIT_ACTIVE = false;

2383
core/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@
"description": "The Continue Core contains functionality that can be shared across web, VS Code, or Node.js",
"scripts": {
"test": "cross-env NODE_OPTIONS=--experimental-vm-modules jest",
"vitest": "vitest run LocalPlatformClient",
"test:coverage": "cross-env NODE_OPTIONS=--experimental-vm-modules jest --coverage && open ./coverage/lcov-report/index.html",
"tsc:check": "tsc -p ./ --noEmit",
"build:npm": "tsc -p ./tsconfig.npm.json",
@ -41,7 +42,8 @@
"onnxruntime-common": "1.14.0",
"onnxruntime-web": "1.14.0",
"ts-jest": "^29.1.1",
"typescript": "^5.6.3"
"typescript": "^5.6.3",
"vitest": "^3.1.4"
},
"dependencies": {
"@aws-sdk/client-bedrock-runtime": "^3.779.0",
@ -52,7 +54,7 @@
"@continuedev/fetch": "^1.0.10",
"@continuedev/llm-info": "^1.0.8",
"@continuedev/openai-adapters": "^1.0.25",
"@modelcontextprotocol/sdk": "^1.5.0",
"@modelcontextprotocol/sdk": "^1.12.0",
"@mozilla/readability": "^0.5.0",
"@octokit/rest": "^20.1.1",
"@typescript-eslint/eslint-plugin": "^7.8.0",

View File

@ -1,4 +1,5 @@
import { ContinueSDK, SlashCommand } from "../..";
import { getDiffsFromCache } from "../../autocomplete/snippets/gitDiffCache";
import { renderChatMessage } from "../../util/messageContent";
import { getLastNPathParts } from "../../util/uri";
import { parsePromptFileV1V2 } from "../v2/parsePromptFileV1V2";
@ -32,8 +33,8 @@ async function renderPromptV1(
// A few context providers that don't need to be in config.json to work in .prompt files
if (helpers?.find((helper) => helper[0] === "diff")) {
const diff = await context.ide.getDiff(true);
inputData.diff = diff.join("\n");
const diffs = await getDiffsFromCache(context.ide);
inputData.diff = diffs.join("\n");
}
if (helpers?.find((helper) => helper[0] === "currentFile")) {
const currentFile = await context.ide.getCurrentFile();

View File

@ -92,7 +92,6 @@ export type ToIdeFromWebviewOrCoreProtocol = {
gotoDefinition: [{ location: Location }, RangeInFile[]];
getGitHubAuthToken: [GetGhTokenArgs, string | undefined];
getControlPlaneSessionInfo: [
{ silent: boolean; useOnboarding: boolean },
ControlPlaneSessionInfo | undefined,

View File

@ -44,7 +44,6 @@ export type ToIdeFromWebviewProtocol = ToIdeFromWebviewOrCoreProtocol & {
];
"jetbrains/getColors": [undefined, Record<string, string | null | undefined>];
"vscode/openMoveRightMarkdown": [undefined, void];
setGitHubAuthToken: [{ token: string }, void];
acceptDiff: [AcceptOrRejectDiffPayload, void];
rejectDiff: [AcceptOrRejectDiffPayload, void];
"edit/sendPrompt": [

View File

@ -1,5 +1,5 @@
import { FromIdeProtocol } from "..";
import { GetGhTokenArgs, ToIdeFromWebviewOrCoreProtocol } from "../ide";
import { ToIdeFromWebviewOrCoreProtocol } from "../ide";
import type {
ContinueRcJson,
@ -50,9 +50,6 @@ export class MessageIde implements IDE {
getIdeSettings(): Promise<IdeSettings> {
return this.request("getIdeSettings", undefined);
}
getGitHubAuthToken(args: GetGhTokenArgs): Promise<string | undefined> {
return this.request("getGitHubAuthToken", args);
}
getFileStats(files: string[]): Promise<FileStatsMap> {
return this.request("getFileStats", { files });
}

View File

@ -34,10 +34,6 @@ export class ReverseMessageIde {
}
private initializeListeners() {
this.on("getGitHubAuthToken", (data) => {
return this.ide.getGitHubAuthToken(data);
});
this.on("getFileStats", (data) => {
return this.ide.getFileStats(data.files);
});

View File

@ -10,6 +10,7 @@ export enum BuiltInToolNames {
ViewDiff = "builtin_view_diff",
LSTool = "builtin_ls",
CreateRuleBlock = "builtin_create_rule_block",
RequestRule = "builtin_request_rule",
// excluded from allTools for now
ViewRepoMap = "builtin_view_repo_map",
@ -18,4 +19,4 @@ export enum BuiltInToolNames {
export const BUILT_IN_GROUP_NAME = "Built-In";
export const CLIENT_TOOLS = [BuiltInToolNames.EditExistingFile];
export const CLIENT_TOOLS_IMPLS = [BuiltInToolNames.EditExistingFile];

View File

@ -10,6 +10,7 @@ import { grepSearchImpl } from "./implementations/grepSearch";
import { lsToolImpl } from "./implementations/lsTool";
import { readCurrentlyOpenFileImpl } from "./implementations/readCurrentlyOpenFile";
import { readFileImpl } from "./implementations/readFile";
import { requestRuleImpl } from "./implementations/requestRule";
import { runTerminalCommandImpl } from "./implementations/runTerminalCommand";
import { searchWebImpl } from "./implementations/searchWeb";
import { viewDiffImpl } from "./implementations/viewDiff";
@ -157,6 +158,8 @@ async function callBuiltInTool(
return await readCurrentlyOpenFileImpl(args, extras);
case BuiltInToolNames.CreateRuleBlock:
return await createRuleBlockImpl(args, extras);
case BuiltInToolNames.RequestRule:
return await requestRuleImpl(args, extras);
default:
throw new Error(`Tool "${functionName}" not found`);
}

View File

@ -16,7 +16,7 @@ export const createRuleBlock: Tool = {
"Creates a persistent rule for all future conversations. For establishing code standards or preferences that should be applied consistently. To modify existing rules, use the edit tool instead.",
parameters: {
type: "object",
required: ["name", "rule"],
required: ["name", "rule", "alwaysApply", "description"],
properties: {
name: {
type: "string",
@ -37,6 +37,11 @@ export const createRuleBlock: Tool = {
description:
"Optional file patterns to which this rule applies (e.g. ['**/*.{ts,tsx}'] or ['src/**/*.ts', 'tests/**/*.ts'])",
},
alwaysApply: {
type: "boolean",
description:
"Whether this rule should always be applied regardless of file pattern matching",
},
},
},
},

View File

@ -6,7 +6,7 @@ export const globSearchTool: Tool = {
displayTitle: "Glob File Search",
wouldLikeTo: 'find file matches for "{{{ pattern }}}"',
isCurrently: 'finding file matches for "{{{ pattern }}}"',
hasAlready: 'retreived file matches for "{{{ pattern }}}"',
hasAlready: 'retrieved file matches for "{{{ pattern }}}"',
readonly: true,
isInstant: true,
group: BUILT_IN_GROUP_NAME,

View File

@ -0,0 +1,237 @@
import { RuleWithSource } from "../..";
import { getRequestRuleDescription } from "./requestRule";
describe("getRequestRuleDescription", () => {
it("should return no rules message when no agent-requested rules exist", () => {
const rules: RuleWithSource[] = [
{
name: "Always Apply Rule",
description: "This rule always applies",
source: "default-chat",
rule: "Always use semicolons",
alwaysApply: true,
ruleFile: "/path/to/rule1.md",
},
{
name: "Rule with Globs",
description: "This rule has globs",
source: "rules-block",
rule: "Use TypeScript",
globs: "**/*.ts",
ruleFile: "/path/to/rule2.md",
},
];
const result = getRequestRuleDescription(rules);
expect(result).toBe(
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\nNo rules available.",
);
});
it("should return formatted rules when agent-requested rules exist", () => {
const rules: RuleWithSource[] = [
{
name: "Agent Rule 1",
description: "First agent-requested rule",
source: "rules-block",
rule: "Use consistent formatting",
alwaysApply: false,
ruleFile: "/path/to/agent-rule1.md",
},
{
name: "Agent Rule 2",
description: "Second agent-requested rule",
source: "default-agent",
rule: "Follow naming conventions",
alwaysApply: false,
ruleFile: "/path/to/agent-rule2.md",
},
// These should be filtered out
{
name: "Always Apply Rule",
description: "This rule always applies",
source: "default-chat",
rule: "Always use semicolons",
alwaysApply: true,
ruleFile: "/path/to/rule1.md",
},
{
name: "Rule with Globs",
description: "This rule has globs",
source: "rules-block",
rule: "Use TypeScript",
globs: "**/*.ts",
ruleFile: "/path/to/rule2.md",
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"Agent Rule 1: First agent-requested rule\n" +
"Agent Rule 2: Second agent-requested rule";
expect(result).toBe(expected);
});
it("should handle rules with missing description", () => {
const rules: RuleWithSource[] = [
{
name: "Rule Without Description",
source: "rules-block",
rule: "Some rule content",
alwaysApply: false,
ruleFile: "/path/to/no-desc-rule.md",
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"Rule Without Description: undefined";
expect(result).toBe(expected);
});
it("should handle rules with missing name", () => {
const rules: RuleWithSource[] = [
{
description: "Rule without name",
source: "rules-block",
rule: "Some rule content",
alwaysApply: false,
ruleFile: "/path/to/no-name-rule.md",
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"undefined: Rule without name";
expect(result).toBe(expected);
});
it("should handle rules with missing ruleFile", () => {
const rules: RuleWithSource[] = [
{
name: "Rule Without File",
description: "Rule without file path",
source: "rules-block",
rule: "Some rule content",
alwaysApply: false,
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"Rule Without File: Rule without file path";
expect(result).toBe(expected);
});
it("should filter out rules with alwaysApply undefined (truthy)", () => {
const rules: RuleWithSource[] = [
{
name: "Rule with undefined alwaysApply",
description: "This should be filtered out",
source: "rules-block",
rule: "Some rule",
ruleFile: "/path/to/rule.md",
// alwaysApply is undefined, which is truthy in the filter condition
},
{
name: "Valid Agent Rule",
description: "This should be included",
source: "rules-block",
rule: "Some rule",
alwaysApply: false,
ruleFile: "/path/to/valid-rule.md",
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"Valid Agent Rule: This should be included";
expect(result).toBe(expected);
});
it("should filter out rules with any globs defined", () => {
const rules: RuleWithSource[] = [
{
name: "Rule with string globs",
description: "This should be filtered out",
source: "rules-block",
rule: "Some rule",
alwaysApply: false,
globs: "**/*.ts",
ruleFile: "/path/to/rule1.md",
},
{
name: "Rule with array globs",
description: "This should also be filtered out",
source: "rules-block",
rule: "Some rule",
alwaysApply: false,
globs: ["**/*.ts", "**/*.js"],
ruleFile: "/path/to/rule2.md",
},
{
name: "Valid Agent Rule",
description: "This should be included",
source: "rules-block",
rule: "Some rule",
alwaysApply: false,
ruleFile: "/path/to/valid-rule.md",
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"Valid Agent Rule: This should be included";
expect(result).toBe(expected);
});
it("should handle empty rules array", () => {
const rules: RuleWithSource[] = [];
const result = getRequestRuleDescription(rules);
expect(result).toBe(
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\nNo rules available.",
);
});
it("should handle single agent-requested rule", () => {
const rules: RuleWithSource[] = [
{
name: "Single Rule",
description: "The only agent-requested rule",
source: "rules-block",
rule: "Follow this guideline",
alwaysApply: false,
ruleFile: "/path/to/single-rule.md",
},
];
const result = getRequestRuleDescription(rules);
const expected =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n" +
"Single Rule: The only agent-requested rule";
expect(result).toBe(expected);
});
});

View File

@ -0,0 +1,52 @@
import { ConfigDependentToolParams, GetTool } from "../..";
import { BUILT_IN_GROUP_NAME, BuiltInToolNames } from "../builtIn";
export interface RequestRuleArgs {
name: string;
}
export function getRequestRuleDescription(
rules: ConfigDependentToolParams["rules"],
): string {
// Must be explicitly false and no globs
const agentRequestedRules = rules.filter(
(rule) => rule.alwaysApply === false && !rule.globs,
);
const prefix =
"Use this tool to select additional rules, specifically based on their descriptions. Available rules:\n";
const body = agentRequestedRules
.map((rule) => `${rule.name}: ${rule.description}`)
.join("\n");
if (body.length === 0) {
return prefix + "No rules available.";
}
return prefix + body;
}
export const requestRuleTool: GetTool = ({ rules }) => ({
type: "function",
displayTitle: "Request Rules",
wouldLikeTo: "request rule {{{ name }}}",
isCurrently: "reading rule {{{ name }}}",
hasAlready: "read rule {{{ name }}}",
group: BUILT_IN_GROUP_NAME,
readonly: false,
function: {
name: BuiltInToolNames.RequestRule,
description: getRequestRuleDescription(rules),
parameters: {
type: "object",
required: ["name"],
properties: {
name: {
type: "string",
description: "Name of the rule",
},
},
},
},
});

View File

@ -1,6 +1,27 @@
import { Tool } from "../..";
import { BUILT_IN_GROUP_NAME, BuiltInToolNames } from "../builtIn";
import os from "os";
/**
* Get the preferred shell for the current platform
* @returns The preferred shell command or path
*/
export function getPreferredShell(): string {
const platform = os.platform();
if (platform === "win32") {
return process.env.COMSPEC || "cmd.exe";
} else if (platform === "darwin") {
return process.env.SHELL || "/bin/zsh";
} else {
// Linux and other Unix-like systems
return process.env.SHELL || "/bin/bash";
}
}
export const PLATFORM_INFO = `Choose terminal commands and scripts optimized for ${os.platform} and ${os.arch} and shell ${getPreferredShell()}.`;
export const runTerminalCommandTool: Tool = {
type: "function",
displayTitle: "Run Terminal Command",
@ -11,12 +32,12 @@ export const runTerminalCommandTool: Tool = {
group: BUILT_IN_GROUP_NAME,
function: {
name: BuiltInToolNames.RunTerminalCommand,
description:
"Run a terminal command in the current directory.\
description: `Run a terminal command in the current directory.\
The shell is not stateful and will not remember any previous commands.\
When a command is run in the background ALWAYS suggest using shell commands to stop it; NEVER suggest using Ctrl+C.\
When suggesting subsequent shell commands ALWAYS format them in shell command blocks.\
Do NOT perform actions requiring special/admin privileges.",
Do NOT perform actions requiring special/admin privileges.\
${PLATFORM_INFO}`,
parameters: {
type: "object",
required: ["command"],

View File

@ -93,4 +93,22 @@ describe("createRuleBlockImpl", () => {
expect(markdown).toContain("# Complete Rule");
expect(markdown).toContain("Follow this standard");
});
it("should create a rule with alwaysApply set to false", async () => {
const args = {
name: "Conditional Rule",
rule: "This rule should not always be applied",
alwaysApply: false,
};
await createRuleBlockImpl(args, mockExtras as any);
const fileContent = mockIde.writeFile.mock.calls[0][1];
const { frontmatter } = parseMarkdownRule(fileContent);
expect(frontmatter).toEqual({
alwaysApply: false,
});
});
});

View File

@ -1,13 +1,14 @@
import * as YAML from "yaml";
import { ToolImpl } from ".";
import { RuleWithSource } from "../..";
import { RuleFrontmatter } from "../../config/markdown/parseMarkdownRule";
import { joinPathsToUri } from "../../util/uri";
export interface CreateRuleBlockArgs {
name: string;
rule: string;
description: string;
globs?: string;
}
export type CreateRuleBlockArgs = Pick<
Required<RuleWithSource>,
"rule" | "description" | "alwaysApply" | "name"
> &
Pick<RuleWithSource, "globs">;
export const createRuleBlockImpl: ToolImpl = async (
args: CreateRuleBlockArgs,
@ -21,15 +22,21 @@ export const createRuleBlockImpl: ToolImpl = async (
const fileExtension = "md";
const frontmatter: Record<string, string> = {};
const frontmatter: RuleFrontmatter = {};
if (args.globs) {
frontmatter.globs = args.globs.trim();
frontmatter.globs =
typeof args.globs === "string" ? args.globs.trim() : args.globs;
}
if (args.description) {
frontmatter.description = args.description.trim();
}
if (args.alwaysApply !== undefined) {
frontmatter.alwaysApply = args.alwaysApply;
}
const frontmatterYaml = YAML.stringify(frontmatter).trim();
let fileContent = `---
${frontmatterYaml}

View File

@ -0,0 +1,28 @@
import { ToolImpl } from ".";
import { parseMarkdownRule } from "../../config/markdown/parseMarkdownRule";
export const requestRuleImpl: ToolImpl = async (args, extras) => {
// Find the rule by name in the config
const rule = extras.config.rules.find((r) => r.name === args.name);
if (!rule || !rule.ruleFile) {
throw new Error(
`Rule with name "${args.name}" not found or has no file path`,
);
}
const fileContent = await extras.ide.readFile(rule.ruleFile);
const { markdown, frontmatter } = parseMarkdownRule(fileContent);
return [
{
name: frontmatter.name ?? "",
description: frontmatter.description ?? "",
content: markdown,
uri: {
type: "file",
value: rule.ruleFile,
},
},
];
};

View File

@ -1,12 +1,15 @@
import { ToolImpl } from ".";
import { getDiffsFromCache } from "../../autocomplete/snippets/gitDiffCache";
export const viewDiffImpl: ToolImpl = async (args, extras) => {
const diff = await extras.ide.getDiff(true);
const diffs = await getDiffsFromCache(extras.ide); // const diffs = await extras.ide.getDiff(true);
// TODO includeUnstaged should be an option
return [
{
name: "Diff",
description: "The current git diff",
content: diff.join("\n"),
content: diffs.join("\n"),
},
];
};

View File

@ -1,3 +1,4 @@
import { ConfigDependentToolParams, Tool } from "..";
import { createNewFileTool } from "./definitions/createNewFile";
import { createRuleBlock } from "./definitions/createRuleBlock";
import { editFileTool } from "./definitions/editFile";
@ -6,11 +7,12 @@ import { grepSearchTool } from "./definitions/grepSearch";
import { lsTool } from "./definitions/lsTool";
import { readCurrentlyOpenFileTool } from "./definitions/readCurrentlyOpenFile";
import { readFileTool } from "./definitions/readFile";
import { requestRuleTool } from "./definitions/requestRule";
import { runTerminalCommandTool } from "./definitions/runTerminalCommand";
import { searchWebTool } from "./definitions/searchWeb";
import { viewDiffTool } from "./definitions/viewDiff";
export const allTools = [
export const baseToolDefinitions = [
readFileTool,
editFileTool,
createNewFileTool,
@ -26,3 +28,7 @@ export const allTools = [
// viewSubdirectoryTool,
// viewRepoMapTool,
];
export const getConfigDependentToolDefinitions = (
params: ConfigDependentToolParams,
): Tool[] => [requestRuleTool(params)];

View File

@ -17,7 +17,6 @@ import {
Thread,
ToastType,
} from "../index.js";
import { GetGhTokenArgs } from "../protocol/ide.js";
class FileSystemIde implements IDE {
constructor(private readonly workspaceDir: string) {}
@ -56,9 +55,6 @@ class FileSystemIde implements IDE {
pauseCodebaseIndexOnStart: false,
};
}
async getGitHubAuthToken(args: GetGhTokenArgs): Promise<string | undefined> {
return undefined;
}
async getFileStats(fileUris: string[]): Promise<FileStatsMap> {
const result: FileStatsMap = {};
for (const uri of fileUris) {

5
core/vitest.config.ts Normal file
View File

@ -0,0 +1,5 @@
import { defineConfig } from "vitest/config";
export default defineConfig({
test: {},
});

View File

@ -7,14 +7,12 @@ sidebar_position: 2
Agent can be used with models that support tool use through Continue.
For the best Agent experience we recommend Claude 3.7 Sonnet from Anthropic. Add Claude 3.7 Sonnet to your assistant from the hub [here](https://hub.continue.dev/anthropic/claude-3-7-sonnet). You can get an API key from the [Anthropic console](https://console.anthropic.com/settings/keys).
For the best Agent experience we recommend Claude Sonnet 4 from Anthropic. Add Claude 3.7 Sonnet to your assistant from the hub [here](https://hub.continue.dev/anthropic/claude-4-sonnet). You can get an API key from the [Anthropic console](https://console.anthropic.com/settings/keys).
Currently the following providers are supported:
Currently the following models are recommended:
- [Anthropic](../customize/model-providers/top-level/anthropic.mdx) - we recommend [Claude Sonnet 3.7](https://hub.continue.dev/anthropic/claude-3-7-sonnet)
- [Ollama](../customize/model-providers/top-level/ollama.mdx) - we recommend [Qwen 2.5 Coder 7b](https://hub.continue.dev/ollama/qwen2.5-coder-7b). Also see all Ollama models that support tools [here](https://ollama.com/search?c=tools).
- [Anthropic](../customize/model-providers/top-level/anthropic.mdx) - we recommend [Claude Sonnet 4](https://hub.continue.dev/anthropic/claude-4-sonnet)
- [OpenAI](../customize/model-providers/top-level/openai.mdx) - we recommend [GPT-4o](https://hub.continue.dev/openai/gpt-4o)
- [Gemini](../customize/model-providers/top-level/gemini.mdx) - we recommend [Gemini 2.0 Flash](https://hub.continue.dev/google/gemini-2.0-flash)
- [Mistral](../customize/model-providers/top-level/mistral.mdx) - we recommend [Codestral](https://hub.continue.dev/mistral/codestral)
- [Gemini](../customize/model-providers/top-level/gemini.mdx) - we recommend [Gemini 2.5 Pro](https://hub.continue.dev/google/gemini-2.5-pro)
The best chat models generally support tool usage. See [recommended chat models](../customize/model-roles/chat.mdx#recommended-chat-models) for more.
The best chat models generally support tool use. See [recommended chat models](../customize/model-roles/chat.mdx#recommended-chat-models) for more.

View File

@ -13,6 +13,10 @@ Currently custom tools can be configured using the [Model Context Protocol](http
MCP Servers can be added to hub Assistants using `mcpServers` blocks. You can explore available MCP server blocks [here](https://hub.continue.dev/explore/mcp).
:::info
MCP can only be used in the **agent** mode.
:::
To set up your own MCP server, read the [MCP quickstart](https://modelcontextprotocol.io/quickstart) and then [create an `mcpServers` block](https://hub.continue.dev/new?type=block&blockType=mcpServers) or add the following to your [config file](./configuration.md):
<Tabs groupId="config-example">

View File

@ -27,7 +27,7 @@ For example, you can say "Create a rule for this", and a rule will be created fo
### Syntax
Rules blocks can be simple text, or have the following properties:
Rules blocks can be simple text, written in YAML configuration files, or as Markdown (`.md`) files. They can have the following properties:
- `name` (**required**): A display name/title for the rule
- `rule` (**required**): The text content of the rule

View File

@ -12,6 +12,18 @@ Morph provides a fast apply model that helps you quickly and accurately apply co
- uses: morphllm/morph-v0
with:
MORPH_API_KEY: ${{ secrets.MORPH_API_KEY }}
```
or
```yaml title="config.yaml"
- name: Morph Fast Apply
provider: openai
model: morph-v2
apiKey: <YOUR_MORPH_API_KEY>
apiBase: https://api.morphllm.com/v1/
roles:
- apply
promptTemplates:
apply: "<code>{{{ original_code }}}</code>\n<update>{{{ new_code }}}</update>"
```
</TabItem>
<TabItem value="json" label="JSON">
@ -21,10 +33,10 @@ Morph provides a fast apply model that helps you quickly and accurately apply co
{
"title": "Morph Fast Apply",
"provider": "openai",
"model": "morph-v0",
"model": "morph-v2",
"apiKey": "<YOUR_MORPH_API_KEY>",
"apiBase": "https://api.morphllm.com/v1/",
"roles": ["apply", "chat"],
"roles": ["apply"],
"promptTemplates": {
"apply": "<code>{{{ original_code }}}</code>\n<update>{{{ new_code }}}</update>"
}
@ -34,3 +46,67 @@ Morph provides a fast apply model that helps you quickly and accurately apply co
```
</TabItem>
</Tabs>
## Embeddings model
We recommend configuring **morph-embedding-v2** as your embeddings model.
<Tabs groupId="config-example">
<TabItem value="yaml" label="YAML">
```yaml title="config.yaml"
models:
- name: Morph Embeddings
provider: openai
model: morph-embedding-v2
apiKey: <YOUR_MORPH_API_KEY>
apiBase: https://api.morphllm.com/v1/
roles:
- embed
```
</TabItem>
<TabItem value="json" label="JSON">
```json title="config.json"
{
"embeddingsProvider": {
"provider": "openai",
"model": "morph-embedding-v2",
"apiKey": "<YOUR_MORPH_API_KEY>",
"apiBase": "https://api.morphllm.com/v1/"
}
}
```
</TabItem>
</Tabs>
## Reranking model
We recommend configuring **morph-rerank-v2** as your reranking model.
<Tabs groupId="config-example">
<TabItem value="yaml" label="YAML">
```yaml title="config.yaml"
models:
- name: Morph Reranker
provider: cohere
model: morph-rerank-v2
apiKey: <YOUR_MORPH_API_KEY>
apiBase: https://api.morphllm.com/v1/
roles:
- rerank
```
</TabItem>
<TabItem value="json" label="JSON">
```json title="config.json"
{
"reranker": {
"name": "cohere",
"params": {
"model": "morph-rerank-v2",
"apiKey": "<YOUR_MORPH_API_KEY>",
"apiBase": "https://api.morphllm.com/v1/"
}
}
}
```
</TabItem>
</Tabs>

View File

@ -20,7 +20,7 @@
<option value="runIde"/>
</list>
</option>
<option name="vmOptions"/>
<option name="vmOptions" value="-Dide.browser.jcef.out-of-process.enabled=false"/>
</ExternalSystemSettings>
<ExternalSystemDebugServerProcess>true</ExternalSystemDebugServerProcess>
<ExternalSystemReattachDebugProcess>true</ExternalSystemReattachDebugProcess>

View File

@ -19,7 +19,7 @@
<!-- <option value="runInspections" />-->
<!-- </list>-->
<!-- </option>-->
<!-- <option name="vmOptions" />-->
<!-- <option name="vmOptions" value="-Dide.browser.jcef.out-of-process.enabled=false"/>-->
<!-- </ExternalSystemSettings>-->
<!-- <ExternalSystemDebugServerProcess>true</ExternalSystemDebugServerProcess>-->
<!-- <ExternalSystemReattachDebugProcess>true</ExternalSystemReattachDebugProcess>-->

View File

@ -13,7 +13,7 @@
<option value="runIdeForUiTests"/>
</list>
</option>
<option name="vmOptions"/>
<option name="vmOptions" value="-Dide.browser.jcef.out-of-process.enabled=false"/>
</ExternalSystemSettings>
<ExternalSystemDebugServerProcess>true</ExternalSystemDebugServerProcess>
<ExternalSystemReattachDebugProcess>true</ExternalSystemReattachDebugProcess>

View File

@ -13,7 +13,7 @@
<option value="test" />
</list>
</option>
<option name="vmOptions" />
<option name="vmOptions" value="-Dide.browser.jcef.out-of-process.enabled=false"/>
</ExternalSystemSettings>
<ExternalSystemDebugServerProcess>true</ExternalSystemDebugServerProcess>
<ExternalSystemReattachDebugProcess>true</ExternalSystemReattachDebugProcess>

View File

@ -3,7 +3,7 @@ pluginGroup=com.github.continuedev.continueintellijextension
pluginName=continue-intellij-extension
pluginRepositoryUrl=https://github.com/continuedev/continue
# SemVer format -> https://semver.org
pluginVersion=1.0.18
pluginVersion=1.0.21
# Supported build number ranges and IntelliJ Platform versions -> https://plugins.jetbrains.com/docs/intellij/build-number-ranges.html
pluginSinceBuild=223
# IntelliJ Platform Properties -> https://plugins.jetbrains.com/docs/intellij/tools-gradle-intellij-plugin.html#configuration-intellij-extension

View File

@ -182,8 +182,6 @@ class ContinuePluginStartupActivity : StartupActivity, DumbAware {
// Notify core of content changes
if (changedURIs.isNotEmpty()) {
continuePluginService.updateLastFileSaveTimestamp()
val data = mapOf("uris" to changedURIs)
continuePluginService.coreMessenger?.request("files/changed", data, null) { _ -> }
}

View File

@ -36,8 +36,6 @@ class MessageTypes {
"getFileStats",
"insertAtCursor",
"applyToFile",
"getGitHubAuthToken",
"setGitHubAuthToken",
"getControlPlaneSessionInfo",
"logoutOfControlPlane",
"getTerminalContents",

View File

@ -132,18 +132,18 @@ class GetTheme {
val inputPlaceholder = namedColor("TextField.inactiveForeground")
val listHoverBackground = namedColor("List.hoverBackground") ?: slightChange(background)
val actionHoverBackground = namedColor("ActionButton.hoverBackground") ?: namedColor("Button.darcula.hoverBackground")
val listHoverForeground = namedColor("List.hoverForeground") ?: slightChange(foreground)
val tableOddRow = namedColor("Table.hoverBackground") ?: namedColor("Table.stripeColor")
val listSelectionForeground = namedColor("List.selectionForeground")
val tableOddRow = namedColor("Table.hoverBackground") ?: namedColor("Table.stripeColor")
val description = namedColor("Label.disabledForeground") ?: namedColor("Label.infoForeground")
val description = namedColor("Label.infoForeground") ?: namedColor("Label.disabledForeground") ?: foreground
val mutedDescription = namedColor("Component.infoForeground")
?: namedColor("ContextHelp.foreground")
?: namedColor("TextField.placeholderForeground")
?: namedColor("Label.disabledForeground")
?: namedColor("ToolTip.foreground")
?: description
val link = namedColor("Link.activeForeground")
@ -201,8 +201,8 @@ class GetTheme {
"find-match" to findMatchBackground,
"find-match-selected" to findMatchSelectedBackground,
"list-hover" to listHoverBackground,
"list-active" to actionHoverBackground,
"list-active-foreground" to listSelectionForeground
"list-active" to listHoverBackground,
"list-active-foreground" to listHoverForeground
).mapValues { toHex(it.value) }
return theme
} catch (error: Error) {

View File

@ -13,33 +13,12 @@ class GitService(
private val project: Project,
private val continuePluginService: ContinuePluginService
) {
// Add a simple cache for diff results
private data class DiffCache(
val timestamp: Long,
val diffs: List<String>
)
// Cache the last diff result
private var diffCache: DiffCache? = null
private var lastFileSaveTimestamp: Long = System.currentTimeMillis()
/**
* Updates the timestamp when a file is saved
*/
fun updateLastFileSaveTimestamp() {
lastFileSaveTimestamp = System.currentTimeMillis()
}
/**
* Returns the git diff for all workspace directories
*/
suspend fun getDiff(includeUnstaged: Boolean): List<String> {
// Check if we have a valid cache entry
if (diffCache != null && diffCache!!.timestamp == lastFileSaveTimestamp) {
return diffCache!!.diffs
}
// If no cache hit, compute the diff
val workspaceDirs = workspaceDirectories()
val diffs = mutableListOf<String>()
@ -74,8 +53,6 @@ class GitService(
diffs.add(output.toString())
}
// Cache the result
diffCache = DiffCache(lastFileSaveTimestamp, diffs)
return diffs
}

View File

@ -50,10 +50,6 @@ class IdeProtocolClient(
)
}
fun updateLastFileSaveTimestamp() {
(ide as IntelliJIDE).updateLastFileSaveTimestamp()
}
fun handleMessage(msg: String, respond: (Any?) -> Unit) {
coroutineScope.launch(limitedDispatcher) {
val message = Gson().fromJson(msg, Message::class.java)
@ -388,33 +384,6 @@ class IdeProtocolClient(
respond(pinnedFiles)
}
"getGitHubAuthToken" -> {
val params = Gson().fromJson(
dataElement.toString(),
GetGhTokenArgs::class.java
)
val ghAuthToken = ide.getGitHubAuthToken(params)
if (ghAuthToken == null) {
// Open a dialog so user can enter their GitHub token
continuePluginService.sendToWebview("openOnboardingCard", null, uuid())
respond(null)
} else {
respond(ghAuthToken)
}
}
"setGitHubAuthToken" -> {
val params = Gson().fromJson(
dataElement.toString(),
SetGitHubAuthTokenParams::class.java
)
val continueSettingsService = service<ContinueExtensionSettings>()
continueSettingsService.continueState.ghAuthToken = params.token
respond(null)
}
"openUrl" -> {
val url = Gson().fromJson(
dataElement.toString(),
@ -504,10 +473,16 @@ class IdeProtocolClient(
val editor = EditorUtils.getEditor(project)
val rif = editor?.getHighlightedRIF() ?: return
val serializedRif = com.github.continuedev.continueintellijextension.RangeInFileWithContents(
filepath = rif.filepath,
range = rif.range,
contents = rif.contents
)
continuePluginService.sendToWebview(
"highlightedCode",
HighlightedCodePayload(
rangeInFileWithContents = rif,
rangeInFileWithContents = serializedRif,
shouldRun = edit
)
)

View File

@ -66,13 +66,6 @@ class IntelliJIDE(
}
}
/**
* Updates the timestamp when a file is saved
*/
override fun updateLastFileSaveTimestamp() {
gitService.updateLastFileSaveTimestamp()
}
override suspend fun getIdeInfo(): IdeInfo {
val applicationInfo = ApplicationInfo.getInstance()
val ideName: String = applicationInfo.fullApplicationName
@ -576,11 +569,6 @@ class IntelliJIDE(
}
}
override suspend fun getGitHubAuthToken(args: GetGhTokenArgs): String? {
val continueSettingsService = service<ContinueExtensionSettings>()
return continueSettingsService.continueState.ghAuthToken
}
override suspend fun gotoDefinition(location: Location): List<RangeInFile> {
throw NotImplementedError("gotoDefinition not implemented yet")
}

View File

@ -4,10 +4,6 @@ data class CopyTextParams(
val text: String
)
data class SetGitHubAuthTokenParams(
val token: String
)
data class ApplyToFileParams(
val text: String,
val streamId: String,

View File

@ -91,7 +91,6 @@ open class ContinueExtensionSettings : PersistentStateComponent<ContinueExtensio
var remoteConfigSyncPeriod: Int = 60
var userToken: String? = null
var enableTabAutocomplete: Boolean = true
var ghAuthToken: String? = null
var enableOSR: Boolean = shouldRenderOffScreen()
var displayEditorTooltip: Boolean = true
var showIDECompletionSideBySide: Boolean = false

View File

@ -63,8 +63,4 @@ class ContinuePluginService : Disposable, DumbAware {
listener()
}
}
fun updateLastFileSaveTimestamp() {
ideProtocolClient?.updateLastFileSaveTimestamp()
}
}

View File

@ -187,23 +187,13 @@ interface IDE {
suspend fun getFileStats(files: List<String>): Map<String, FileStats>
suspend fun getGitHubAuthToken(args: GetGhTokenArgs): String?
// LSP
suspend fun gotoDefinition(location: Location): List<RangeInFile>
// Callbacks
fun onDidChangeActiveTextEditor(callback: (filepath: String) -> Unit)
fun updateLastFileSaveTimestamp() {
// Default implementation does nothing
}
}
data class GetGhTokenArgs(
val force: String?
)
data class Message(
val messageType: String,
val messageId: String,
@ -232,7 +222,7 @@ data class ApplyState(
)
data class HighlightedCodePayload(
val rangeInFileWithContents: RangeInFileWithContents,
val rangeInFileWithContents: com.github.continuedev.continueintellijextension.RangeInFileWithContents,
val prompt: String? = null,
val shouldRun: Boolean? = null
)

View File

@ -187,7 +187,6 @@
"description": "The provider of the model. This is used to determine the type of model, and how to interact with it.",
"enum": [
"openai",
"free-trial",
"anthropic",
"anthropic-vertexai",
"cohere",
@ -237,7 +236,6 @@
],
"markdownEnumDescriptions": [
"### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)",
"### Free Trial\nNew users can try out Continue for free using a proxy server that securely makes calls to OpenAI using our API key. If you are ready to use your own API key or have used all 250 free uses, you can enter your API key in config.json where it says `apiKey=\"\"` or select another model provider.\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/freetrial)",
"### Anthropic\nTo get started with Anthropic models, you first need to sign up for the open beta [here](https://claude.ai/login) to obtain an API key.\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/anthropicllm)",
"### Anthropic Vertex AI\nTo get started you need to enable the [Vertex AI API](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) and set up the [Google Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc).",
"### Cohere\nTo use Cohere, visit the [Cohere dashboard](https://dashboard.cohere.com/api-keys) to create an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/cohere)",
@ -736,33 +734,6 @@
}
}
},
{
"if": {
"properties": {
"provider": {
"enum": ["free-trial"]
}
},
"required": ["provider"]
},
"then": {
"properties": {
"model": {
"enum": [
"gpt-4o",
"codestral-latest",
"llama3.1-70b",
"llama3.1-405b",
"gpt-3.5-turbo",
"gemini-pro",
"claude-3-5-sonnet-latest",
"claude-3-haiku-20240307",
"AUTODETECT"
]
}
}
}
},
{
"if": {
"properties": {
@ -2727,10 +2698,10 @@
"x-intellij-html-description": "Learn about setting up models in <a href='https://docs.continue.dev/model-setup/overview'>the documentation</a>.",
"default": [
{
"title": "GPT-4 (trial)",
"provider": "free-trial",
"model": "gpt-4",
"apiKey": ""
"title": "Claude 3.7 Sonnet",
"provider": "anthropic",
"model": "claude-3-7-sonnet-latest",
"apiKey": "sk_..."
}
],
"type": "array",
@ -2838,7 +2809,6 @@
"ollama",
"openai",
"cohere",
"free-trial",
"continue-proxy",
"gemini",
"voyage",
@ -2974,7 +2944,6 @@
"voyage",
"watsonx",
"llm",
"free-trial",
"huggingface-tei",
"siliconflow"
]
@ -3498,6 +3467,20 @@
}
},
"required": ["type", "url"]
},
{
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["streamable-http"]
},
"url": {
"type": "string",
"format": "uri"
}
},
"required": ["type", "url"]
}
]
}

View File

@ -1,12 +1,12 @@
{
"name": "continue",
"version": "1.1.39",
"version": "1.1.40",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "continue",
"version": "1.1.39",
"version": "1.1.40",
"license": "Apache-2.0",
"dependencies": {
"@continuedev/config-types": "^1.0.14",
@ -110,7 +110,7 @@
"@continuedev/fetch": "^1.0.10",
"@continuedev/llm-info": "^1.0.8",
"@continuedev/openai-adapters": "^1.0.25",
"@modelcontextprotocol/sdk": "^1.5.0",
"@modelcontextprotocol/sdk": "^1.12.0",
"@mozilla/readability": "^0.5.0",
"@octokit/rest": "^20.1.1",
"@typescript-eslint/eslint-plugin": "^7.8.0",

View File

@ -2,7 +2,7 @@
"name": "continue",
"icon": "media/icon.png",
"author": "Continue Dev, Inc",
"version": "1.1.39",
"version": "1.1.41",
"repository": {
"type": "git",
"url": "https://github.com/continuedev/continue"

Some files were not shown because too many files have changed in this diff Show More