Merge branch 'main' into speedup-builds
This commit is contained in:
commit
8f7be11657
|
@ -1,16 +1,111 @@
|
|||
---
|
||||
globs: docs/**/*.{md,mdx}
|
||||
description: Standards for writing and maintaining Continue Docs
|
||||
globs: docs/\*_/_.{md,mdx}
|
||||
description: This style guide should be used as a reference for maintaining consistency across all Continue documentation
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Continue Docs Standards
|
||||
# Continue Documentation Style Guide
|
||||
|
||||
## Overview
|
||||
|
||||
## Writing Tone & Voice
|
||||
|
||||
### Conversational and Direct
|
||||
|
||||
- Follow Docusaurus documentation standards
|
||||
- Include YAML frontmatter with title, description, and keywords
|
||||
- Use consistent heading hierarchy starting with h2 (##)
|
||||
- Include relevant Admonition components for tips, warnings, and info
|
||||
- Use descriptive alt text for images
|
||||
- Include cross-references to related documentation
|
||||
- Reference other docs with relative paths
|
||||
- Use simple, conversational language that gets straight to the point
|
||||
- Avoid overly technical jargon when simpler terms work
|
||||
- Write as if speaking directly to the developer using the tool
|
||||
- Keep paragraphs concise and scannable
|
||||
- Use code blocks with appropriate language tags
|
||||
|
||||
**Example:**
|
||||
✅ "You send it a question, and it replies with an answer"
|
||||
❌ "The system processes user queries and generates corresponding responses"
|
||||
|
||||
### Helpful and Instructional
|
||||
|
||||
- Focus on helping users accomplish their goals
|
||||
- Use active voice and imperative mood for instructions
|
||||
- Assume users want to get things done quickly
|
||||
- Include relevant Admonition components for tips, warnings, and info
|
||||
|
||||
**Example:**
|
||||
✅ "Press cmd/ctrl + L to begin a new session"
|
||||
❌ "A new session can be initiated by pressing cmd/ctrl + L"
|
||||
|
||||
### Practical and Task-Oriented
|
||||
|
||||
- Emphasize what users can accomplish with each feature
|
||||
- Lead with benefits and use cases before diving into mechanics
|
||||
- Keep explanations grounded in real-world scenarios
|
||||
|
||||
## Content Structure
|
||||
|
||||
### Page Organization
|
||||
|
||||
1. **Visual Introduction**: Lead with GIFs or images showing the feature in action
|
||||
2. **Purpose Statement**: Brief explanation of what the feature does and when to use it
|
||||
3. **Step-by-Step Instructions**: Clear, actionable steps with keyboard shortcuts
|
||||
4. **Platform-Specific Notes**: Separate sections for VS Code and JetBrains when needed
|
||||
5. **Additional Tips**: Advanced usage or troubleshooting notes
|
||||
|
||||
### Section Headers
|
||||
|
||||
- Use consistent heading hierarchy starting with h2 (##)
|
||||
- Include YAML frontmatter with title, description, and keywords
|
||||
- Use action-oriented headers that describe what users will do
|
||||
- Format: "Verb + object" (e.g., "Type a request and press enter")
|
||||
- Keep headers concise but descriptive
|
||||
- Use title case
|
||||
|
||||
**Examples:**
|
||||
✅ "Highlight code and activate"
|
||||
✅ "Accept or reject changes"
|
||||
✅ "Switch between different models"
|
||||
|
||||
### Lists and Steps
|
||||
|
||||
- Use numbered lists for sequential steps
|
||||
- Use bullet points for feature lists or options
|
||||
- Keep list items parallel in structure
|
||||
- Start action items with verbs
|
||||
|
||||
## Technical Writing Standards
|
||||
|
||||
### Code and Keyboard Shortcuts
|
||||
|
||||
- Use `backticks` for inline code elements
|
||||
- Format keyboard shortcuts consistently: `cmd/ctrl + L`
|
||||
- Always provide shortcuts for Mac/Windows/Linux
|
||||
- Use code blocks for configuration examples with proper syntax highlighting
|
||||
|
||||
### Cross-References
|
||||
|
||||
- Link to related sections using descriptive anchor text
|
||||
- Use relative links to other documentation pages
|
||||
- Format: `[descriptive text](/path/to/page)`
|
||||
|
||||
### Platform Differences
|
||||
|
||||
- Always address both VS Code and JetBrains when applicable
|
||||
- Use clear subheadings to separate platform-specific instructions
|
||||
- Lead with the more common platform (typically VS Code) when both are covered
|
||||
|
||||
## Language Conventions
|
||||
|
||||
### Terminology
|
||||
|
||||
- **Consistent Terms**: Use the same terms throughout (e.g., "LLM" not "AI model" in some places)
|
||||
- **Product Names**: Capitalize product names correctly (VS Code, JetBrains, Continue)
|
||||
- **Feature Names**: Use consistent capitalization for Continue features (Chat, Edit, Agent, Autocomplete)
|
||||
|
||||
### Abbreviations
|
||||
|
||||
- Spell out acronyms on first use, then use abbreviation consistently
|
||||
- Common abbreviations: LLM, IDE, API, URL
|
||||
|
||||
### Pronouns
|
||||
|
||||
- Use "you" to address the user directly
|
||||
- Use "it" to refer to the tool/model
|
||||
- Avoid "we" unless referring to the Continue team
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
"@typescript-eslint/naming-convention": "off",
|
||||
"@typescript-eslint/no-floating-promises": "warn",
|
||||
"@typescript-eslint/semi": "warn",
|
||||
"@typescript-eslint/no-misused-promises": "error",
|
||||
"curly": "warn",
|
||||
"eqeqeq": "warn",
|
||||
"no-throw-literal": "warn",
|
||||
|
|
|
@ -396,6 +396,11 @@ jobs:
|
|||
npx tsc --noEmit
|
||||
npm run lint
|
||||
|
||||
- name: Run vitest tests
|
||||
run: |
|
||||
cd extensions/vscode
|
||||
npm run vitest
|
||||
|
||||
core-tests:
|
||||
needs: [install-core, install-config-yaml]
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -434,7 +439,7 @@ jobs:
|
|||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_FOUNDRY_API_KEY: ${{ secrets.AZURE_FOUNDRY_API_KEY }}
|
||||
AZURE_FOUNDRY_CODESTRAL_API_KEY: ${{ secrets.AZURE_FOUNDRY_CODESTRAL_API_KEY }}
|
||||
AZURE_FOUNDRY_MISTRAL_SMALL_API_KEY: ${{ secrets.AZURE_FOUNDRY_MISTRAL_SMALL_API_KEY }}
|
||||
AZURE_OPENAI_GPT41_API_KEY: ${{ secrets.AZURE_OPENAI_GPT41_API_KEY }}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
"posthog-node": "^3.6.3",
|
||||
"system-ca": "^1.0.2",
|
||||
"tar": "^7.4.3",
|
||||
"undici": "^7.10.0",
|
||||
"uuid": "^9.0.1",
|
||||
"vectordb": "^0.4.20",
|
||||
"win-ca": "^3.5.1"
|
||||
|
@ -54,7 +55,7 @@
|
|||
"@continuedev/config-yaml": "file:../packages/config-yaml",
|
||||
"@continuedev/fetch": "^1.0.13",
|
||||
"@continuedev/llm-info": "^1.0.8",
|
||||
"@continuedev/openai-adapters": "^1.0.25",
|
||||
"@continuedev/openai-adapters": "^1.0.32",
|
||||
"@modelcontextprotocol/sdk": "^1.12.0",
|
||||
"@mozilla/readability": "^0.5.0",
|
||||
"@octokit/rest": "^20.1.1",
|
||||
|
@ -105,6 +106,7 @@
|
|||
"sqlite3": "^5.1.7",
|
||||
"system-ca": "^1.0.3",
|
||||
"tar": "^7.4.3",
|
||||
"tree-sitter-structured-text": "^0.0.1",
|
||||
"tree-sitter-wasms": "^0.1.11",
|
||||
"uuid": "^9.0.1",
|
||||
"vectordb": "^0.4.20",
|
||||
|
@ -143,6 +145,7 @@
|
|||
"myers-diff": "^2.1.0",
|
||||
"onnxruntime-common": "1.14.0",
|
||||
"onnxruntime-web": "1.14.0",
|
||||
"tree-sitter-cli": "^0.22.5",
|
||||
"ts-jest": "^29.1.1",
|
||||
"typescript": "^5.6.3",
|
||||
"vitest": "^3.1.4"
|
||||
|
@ -6774,6 +6777,15 @@
|
|||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/undici": {
|
||||
"version": "7.10.0",
|
||||
"resolved": "https://registry.npmmirror.com/undici/-/undici-7.10.0.tgz",
|
||||
"integrity": "sha512-u5otvFBOBZvmdjWLVW+5DAc9Nkq8f24g0O9oY7qw2JVIF1VocIFoyz9JFkuVOS2j41AufeO0xnlweJ2RLT8nGw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=20.18.1"
|
||||
}
|
||||
},
|
||||
"node_modules/universal-user-agent": {
|
||||
"version": "6.0.1",
|
||||
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.1.tgz",
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
"posthog-node": "^3.6.3",
|
||||
"system-ca": "^1.0.2",
|
||||
"tar": "^7.4.3",
|
||||
"undici": "^7.10.0",
|
||||
"uuid": "^9.0.1",
|
||||
"vectordb": "^0.4.20",
|
||||
"win-ca": "^3.5.1"
|
||||
|
|
|
@ -4,6 +4,7 @@ const { rimrafSync } = require("rimraf");
|
|||
const tar = require("tar");
|
||||
const { RIPGREP_VERSION, TARGET_TO_RIPGREP_RELEASE } = require("./targets");
|
||||
const AdmZip = require("adm-zip");
|
||||
const { ProxyAgent } = require("undici");
|
||||
|
||||
const RIPGREP_BASE_URL = `https://github.com/BurntSushi/ripgrep/releases/download/${RIPGREP_VERSION}`;
|
||||
|
||||
|
@ -16,8 +17,13 @@ const RIPGREP_BASE_URL = `https://github.com/BurntSushi/ripgrep/releases/downloa
|
|||
*/
|
||||
async function downloadFile(url, destPath) {
|
||||
// Use the built-in fetch API instead of node-fetch
|
||||
// Use proxy if set in environment variables
|
||||
const proxy = process.env.https_proxy || process.env.HTTPS_PROXY;
|
||||
const agent = proxy ? new ProxyAgent(proxy) : undefined;
|
||||
|
||||
const response = await fetch(url, {
|
||||
redirect: "follow", // Automatically follow redirects
|
||||
dispatcher: agent,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import { describe, test } from "vitest";
|
||||
import { PYTHON_TEST_CASES, TYPESCRIPT_TEST_CASES } from "./testCases";
|
||||
import { testRootPathContext } from "./testUtils";
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import { jest } from "@jest/globals";
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { expect, vi } from "vitest";
|
||||
|
||||
import Parser from "web-tree-sitter";
|
||||
import { Position } from "../../../..";
|
||||
|
@ -36,17 +36,17 @@ export async function testRootPathContext(
|
|||
const importDefinitionsService = new ImportDefinitionsService(ide);
|
||||
const service = new RootPathContextService(importDefinitionsService, ide);
|
||||
|
||||
const getSnippetsMock = jest
|
||||
const getSnippetsMock = vi
|
||||
// @ts-ignore
|
||||
.spyOn(service, "getSnippets")
|
||||
// @ts-ignore
|
||||
.mockImplementation(async (_filepath, _endPosition) => {
|
||||
.mockImplementation((_filePath, _endPosition) => {
|
||||
return [];
|
||||
});
|
||||
|
||||
// Copy the folder to the test directory
|
||||
const folderPath = path.join(
|
||||
__dirname,
|
||||
process.cwd(),
|
||||
"autocomplete",
|
||||
"context",
|
||||
"root-path-context",
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import { stopAtStartOf, stopAtStopTokens } from "./charStream";
|
||||
|
||||
async function* createMockStream(chunks: string[]): AsyncGenerator<string> {
|
|
@ -1,10 +1,10 @@
|
|||
import { jest } from "@jest/globals";
|
||||
import { beforeEach, describe, expect, it, Mock, vi } from "vitest";
|
||||
|
||||
import * as lineStream from "./lineStream";
|
||||
|
||||
// eslint-disable-next-line max-lines-per-function
|
||||
describe("lineStream", () => {
|
||||
let mockFullStop: jest.Mock;
|
||||
let mockFullStop: Mock;
|
||||
|
||||
async function getLineGenerator(lines: any) {
|
||||
return (async function* () {
|
||||
|
@ -25,7 +25,7 @@ describe("lineStream", () => {
|
|||
}
|
||||
|
||||
beforeEach(() => {
|
||||
mockFullStop = jest.fn();
|
||||
mockFullStop = vi.fn();
|
||||
});
|
||||
|
||||
describe("noTopLevelKeywordsMidline", () => {
|
|
@ -1,4 +1,9 @@
|
|||
import { setUpTestDir, tearDownTestDir } from "../../../test/testDir";
|
||||
import { afterAll, beforeAll, beforeEach, describe, it } from "vitest";
|
||||
import {
|
||||
addToTestDir,
|
||||
setUpTestDir,
|
||||
tearDownTestDir,
|
||||
} from "../../../test/testDir";
|
||||
|
||||
import { TEST_CASES_WITH_DIFF, TEST_CASES_WITHOUT_DIFF } from "./testCases";
|
||||
import {
|
||||
|
@ -18,6 +23,7 @@ describe("Autocomplete filtering tests", () => {
|
|||
beforeAll(async () => {
|
||||
tearDownTestDir();
|
||||
setUpTestDir();
|
||||
addToTestDir([".continueignore"]);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
|
@ -1,3 +1,4 @@
|
|||
import { expect } from "vitest";
|
||||
import MockLLM from "../../../llm/llms/Mock";
|
||||
import { testConfigHandler, testIde } from "../../../test/fixtures";
|
||||
import { joinPathsToUri } from "../../../util/uri";
|
||||
|
|
|
@ -1,4 +1,12 @@
|
|||
import { jest } from "@jest/globals";
|
||||
import {
|
||||
afterEach,
|
||||
beforeEach,
|
||||
describe,
|
||||
expect,
|
||||
Mock,
|
||||
test,
|
||||
vi,
|
||||
} from "vitest";
|
||||
import { GeneratorReuseManager } from "./GeneratorReuseManager";
|
||||
|
||||
function createMockGenerator(
|
||||
|
@ -14,7 +22,7 @@ function createMockGenerator(
|
|||
}
|
||||
}
|
||||
};
|
||||
const newGenerator = jest
|
||||
const newGenerator = vi
|
||||
.fn<() => AsyncGenerator<string>>()
|
||||
.mockReturnValue(mockGenerator());
|
||||
|
||||
|
@ -23,15 +31,15 @@ function createMockGenerator(
|
|||
|
||||
describe("GeneratorReuseManager", () => {
|
||||
let reuseManager: GeneratorReuseManager;
|
||||
let onErrorMock: jest.Mock;
|
||||
let onErrorMock: Mock;
|
||||
|
||||
beforeEach(() => {
|
||||
onErrorMock = jest.fn();
|
||||
onErrorMock = vi.fn();
|
||||
reuseManager = new GeneratorReuseManager(onErrorMock);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
test("creates new generator when there is no current generator", async () => {
|
||||
|
@ -169,7 +177,7 @@ describe("GeneratorReuseManager", () => {
|
|||
const mockGenerator = async function* () {
|
||||
throw error;
|
||||
};
|
||||
const newGenerator = jest
|
||||
const newGenerator = vi
|
||||
.fn<() => AsyncGenerator<string>>()
|
||||
.mockReturnValue(mockGenerator());
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
import { jest } from "@jest/globals";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
|
||||
import { ListenableGenerator } from "./ListenableGenerator";
|
||||
|
||||
|
@ -16,7 +16,7 @@ describe("ListenableGenerator", () => {
|
|||
it("should yield values from the source generator via tee()", async () => {
|
||||
const values = [1, 2, 3];
|
||||
const source = asyncGenerator(values);
|
||||
const onError = jest.fn();
|
||||
const onError = vi.fn();
|
||||
|
||||
const lg = new ListenableGenerator<number>(
|
||||
source,
|
||||
|
@ -36,7 +36,7 @@ describe("ListenableGenerator", () => {
|
|||
it("should allow listeners to receive values", async () => {
|
||||
const values = [1, 2, 3];
|
||||
const source = asyncGenerator(values, 10); // Introduce delay to simulate async behavior
|
||||
const onError = jest.fn();
|
||||
const onError = vi.fn();
|
||||
|
||||
const lg = new ListenableGenerator<number>(
|
||||
source,
|
||||
|
@ -44,7 +44,7 @@ describe("ListenableGenerator", () => {
|
|||
new AbortController(),
|
||||
);
|
||||
|
||||
const listener = jest.fn();
|
||||
const listener = vi.fn();
|
||||
|
||||
// Add listener after some delay to simulate late subscription
|
||||
setTimeout(() => {
|
||||
|
@ -64,7 +64,7 @@ describe("ListenableGenerator", () => {
|
|||
it("should buffer values for listeners added after some values have been yielded", async () => {
|
||||
const values = [1, 2, 3];
|
||||
const source = asyncGenerator(values, 10);
|
||||
const onError = jest.fn();
|
||||
const onError = vi.fn();
|
||||
|
||||
const lg = new ListenableGenerator<number>(
|
||||
source,
|
||||
|
@ -72,7 +72,7 @@ describe("ListenableGenerator", () => {
|
|||
new AbortController(),
|
||||
);
|
||||
|
||||
const initialListener = jest.fn();
|
||||
const initialListener = vi.fn();
|
||||
|
||||
lg.listen(initialListener);
|
||||
|
||||
|
@ -80,7 +80,7 @@ describe("ListenableGenerator", () => {
|
|||
await new Promise((resolve) => setTimeout(resolve, 15));
|
||||
|
||||
// Add a second listener
|
||||
const newListener = jest.fn();
|
||||
const newListener = vi.fn();
|
||||
lg.listen(newListener);
|
||||
|
||||
// Wait for generator to finish
|
||||
|
@ -98,7 +98,7 @@ describe("ListenableGenerator", () => {
|
|||
it("should handle cancellation", async () => {
|
||||
const values = [1, 2, 3, 4, 5];
|
||||
const source = asyncGenerator(values, 10);
|
||||
const onError = jest.fn();
|
||||
const onError = vi.fn();
|
||||
|
||||
const lg = new ListenableGenerator<number>(
|
||||
source,
|
||||
|
@ -131,7 +131,7 @@ describe("ListenableGenerator", () => {
|
|||
}
|
||||
|
||||
const source = errorGenerator();
|
||||
const onError = jest.fn();
|
||||
const onError = vi.fn();
|
||||
|
||||
const lg = new ListenableGenerator<number>(
|
||||
source,
|
||||
|
@ -152,7 +152,7 @@ describe("ListenableGenerator", () => {
|
|||
it("should notify listeners when the generator ends", async () => {
|
||||
const values = [1, 2, 3];
|
||||
const source = asyncGenerator(values);
|
||||
const onError = jest.fn();
|
||||
const onError = vi.fn();
|
||||
|
||||
const lg = new ListenableGenerator<number>(
|
||||
source,
|
||||
|
@ -160,7 +160,7 @@ describe("ListenableGenerator", () => {
|
|||
new AbortController(),
|
||||
);
|
||||
|
||||
const listener = jest.fn();
|
||||
const listener = vi.fn();
|
||||
lg.listen(listener);
|
||||
|
||||
// Wait for the generator to finish
|
|
@ -1,13 +1,13 @@
|
|||
import { jest } from "@jest/globals";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { stopAfterMaxProcessingTime } from "./utils";
|
||||
|
||||
describe("stopAfterMaxProcessingTime", () => {
|
||||
beforeEach(() => {
|
||||
jest.useFakeTimers();
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.useRealTimers();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
async function* createMockStream(chunks: string[]): AsyncGenerator<string> {
|
||||
|
@ -28,7 +28,7 @@ describe("stopAfterMaxProcessingTime", () => {
|
|||
|
||||
it("should yield all chunks when maxTimeMs is not reached", async () => {
|
||||
const mockStream = createMockStream(["Hello", " world", "!"]);
|
||||
const fullStop = jest.fn();
|
||||
const fullStop = vi.fn();
|
||||
const result = stopAfterMaxProcessingTime(mockStream, 1000, fullStop);
|
||||
|
||||
const output = await streamToString(result);
|
||||
|
@ -37,11 +37,11 @@ describe("stopAfterMaxProcessingTime", () => {
|
|||
expect(fullStop).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it.only("should stop processing after max time is reached", async () => {
|
||||
it("should stop processing after max time is reached", async () => {
|
||||
// Mock implementation of Date.now
|
||||
let currentTime = 0;
|
||||
const originalDateNow = Date.now;
|
||||
Date.now = jest.fn(() => currentTime);
|
||||
Date.now = vi.fn(() => currentTime);
|
||||
|
||||
// Create a generator that we can control
|
||||
async function* controlledGenerator(): AsyncGenerator<string> {
|
||||
|
@ -54,7 +54,7 @@ describe("stopAfterMaxProcessingTime", () => {
|
|||
}
|
||||
}
|
||||
|
||||
const fullStop = jest.fn();
|
||||
const fullStop = vi.fn();
|
||||
const maxTimeMs = 500;
|
||||
|
||||
const transformedGenerator = stopAfterMaxProcessingTime(
|
||||
|
@ -84,10 +84,10 @@ describe("stopAfterMaxProcessingTime", () => {
|
|||
it("should check time only periodically based on checkInterval", async () => {
|
||||
const chunks = Array(100).fill("x");
|
||||
const mockStream = createMockStream(chunks);
|
||||
const fullStop = jest.fn();
|
||||
const fullStop = vi.fn();
|
||||
|
||||
// Spy on Date.now to count how many times it's called
|
||||
const dateSpy = jest.spyOn(Date, "now");
|
||||
const dateSpy = vi.spyOn(Date, "now");
|
||||
|
||||
// Stream should complete normally (not hitting the timeout)
|
||||
await streamToString(
|
||||
|
@ -105,7 +105,7 @@ describe("stopAfterMaxProcessingTime", () => {
|
|||
|
||||
it("should handle empty stream gracefully", async () => {
|
||||
const mockStream = createMockStream([]);
|
||||
const fullStop = jest.fn();
|
||||
const fullStop = vi.fn();
|
||||
const result = stopAfterMaxProcessingTime(mockStream, 1000, fullStop);
|
||||
|
||||
const output = await streamToString(result);
|
||||
|
@ -117,7 +117,7 @@ describe("stopAfterMaxProcessingTime", () => {
|
|||
it("should pass through all chunks if there's no timeout", async () => {
|
||||
const chunks = Array(100).fill("test chunk");
|
||||
const mockStream = createMockStream(chunks);
|
||||
const fullStop = jest.fn();
|
||||
const fullStop = vi.fn();
|
||||
|
||||
// Use undefined as timeout to simulate no timeout
|
||||
const result = stopAfterMaxProcessingTime(
|
|
@ -1,3 +1,4 @@
|
|||
import { beforeEach, expect, test, vi } from "vitest";
|
||||
import { GitDiffCache } from "./gitDiffCache";
|
||||
|
||||
beforeEach(() => {
|
||||
|
@ -7,7 +8,7 @@ beforeEach(() => {
|
|||
|
||||
test("GitDiffCache returns cached results within cache time", async () => {
|
||||
const mockDiff = ["file1.ts", "file2.ts"];
|
||||
const getDiffFn = jest.fn().mockResolvedValue(mockDiff);
|
||||
const getDiffFn = vi.fn().mockResolvedValue(mockDiff);
|
||||
const cache = GitDiffCache.getInstance(getDiffFn, 1); // 1 second cache
|
||||
|
||||
const result1 = await cache.get();
|
||||
|
@ -20,7 +21,7 @@ test("GitDiffCache returns cached results within cache time", async () => {
|
|||
|
||||
test("GitDiffCache refreshes cache after expiration", async () => {
|
||||
const mockDiff = ["file1.ts"];
|
||||
const getDiffFn = jest.fn().mockResolvedValue(mockDiff);
|
||||
const getDiffFn = vi.fn().mockResolvedValue(mockDiff);
|
||||
const cache = GitDiffCache.getInstance(getDiffFn, 0.1); // 100ms cache
|
||||
|
||||
const result1 = await cache.get();
|
||||
|
@ -31,7 +32,7 @@ test("GitDiffCache refreshes cache after expiration", async () => {
|
|||
});
|
||||
|
||||
test("GitDiffCache returns empty array on error", async () => {
|
||||
const getDiffFn = jest.fn().mockRejectedValue(new Error("Git error"));
|
||||
const getDiffFn = vi.fn().mockRejectedValue(new Error("Git error"));
|
||||
const cache = GitDiffCache.getInstance(getDiffFn);
|
||||
|
||||
const result = await cache.get();
|
||||
|
@ -41,7 +42,7 @@ test("GitDiffCache returns empty array on error", async () => {
|
|||
test("GitDiffCache reuses pending request", async () => {
|
||||
const mockDiff = ["file1.ts"];
|
||||
let resolvePromise: (value: string[]) => void;
|
||||
const getDiffFn = jest.fn().mockImplementation(() => {
|
||||
const getDiffFn = vi.fn().mockImplementation(() => {
|
||||
return new Promise((resolve) => {
|
||||
resolvePromise = resolve;
|
||||
});
|
||||
|
@ -63,7 +64,7 @@ test("GitDiffCache reuses pending request", async () => {
|
|||
|
||||
test("GitDiffCache invalidate clears cache", async () => {
|
||||
const mockDiff = ["file1.ts"];
|
||||
const getDiffFn = jest.fn().mockResolvedValue(mockDiff);
|
||||
const getDiffFn = vi.fn().mockResolvedValue(mockDiff);
|
||||
const cache = GitDiffCache.getInstance(getDiffFn);
|
||||
|
||||
await cache.get();
|
||||
|
@ -74,8 +75,8 @@ test("GitDiffCache invalidate clears cache", async () => {
|
|||
});
|
||||
|
||||
test("GitDiffCache maintains singleton instance", () => {
|
||||
const getDiffFn1 = jest.fn();
|
||||
const getDiffFn2 = jest.fn();
|
||||
const getDiffFn1 = vi.fn();
|
||||
const getDiffFn2 = vi.fn();
|
||||
|
||||
const instance1 = GitDiffCache.getInstance(getDiffFn1);
|
||||
const instance2 = GitDiffCache.getInstance(getDiffFn2);
|
|
@ -3,6 +3,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
import { describe, expect, test } from "vitest";
|
||||
import {
|
||||
AutocompleteCodeSnippet,
|
||||
AutocompleteDiffSnippet,
|
|
@ -1,3 +1,4 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
processTestCase,
|
||||
type CompletionTestCase,
|
|
@ -1,3 +1,4 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import { processTestCase } from "./completionTestUtils";
|
||||
import { processSingleLineCompletion } from "./processSingleLineCompletion";
|
||||
|
|
@ -87,38 +87,40 @@ export class ProfileLifecycleManager {
|
|||
}
|
||||
|
||||
// Set pending config promise
|
||||
this.pendingConfigPromise = new Promise(async (resolve, reject) => {
|
||||
let result: ConfigResult<ContinueConfig>;
|
||||
// This try catch is expected to catch high-level errors that aren't block-specific
|
||||
// Like invalid json, invalid yaml, file read errors, etc.
|
||||
// NOT block-specific loading errors
|
||||
try {
|
||||
result = await this.profileLoader.doLoadConfig();
|
||||
} catch (e) {
|
||||
const message =
|
||||
e instanceof Error
|
||||
? `${e.message}\n${e.stack ? e.stack : ""}`
|
||||
: "Error loading config";
|
||||
result = {
|
||||
errors: [
|
||||
{
|
||||
fatal: true,
|
||||
message,
|
||||
},
|
||||
],
|
||||
config: undefined,
|
||||
configLoadInterrupted: true,
|
||||
};
|
||||
}
|
||||
this.pendingConfigPromise = new Promise((resolve) => {
|
||||
void (async () => {
|
||||
let result: ConfigResult<ContinueConfig>;
|
||||
// This try catch is expected to catch high-level errors that aren't block-specific
|
||||
// Like invalid json, invalid yaml, file read errors, etc.
|
||||
// NOT block-specific loading errors
|
||||
try {
|
||||
result = await this.profileLoader.doLoadConfig();
|
||||
} catch (e) {
|
||||
const message =
|
||||
e instanceof Error
|
||||
? `${e.message}\n${e.stack ? e.stack : ""}`
|
||||
: "Error loading config";
|
||||
result = {
|
||||
errors: [
|
||||
{
|
||||
fatal: true,
|
||||
message,
|
||||
},
|
||||
],
|
||||
config: undefined,
|
||||
configLoadInterrupted: true,
|
||||
};
|
||||
}
|
||||
|
||||
if (result.config) {
|
||||
// Add registered context providers
|
||||
result.config.contextProviders = (
|
||||
result.config.contextProviders ?? []
|
||||
).concat(additionalContextProviders);
|
||||
}
|
||||
if (result.config) {
|
||||
// Add registered context providers
|
||||
result.config.contextProviders = (
|
||||
result.config.contextProviders ?? []
|
||||
).concat(additionalContextProviders);
|
||||
}
|
||||
|
||||
resolve(result);
|
||||
resolve(result);
|
||||
})();
|
||||
});
|
||||
|
||||
// Wait for the config promise to resolve
|
||||
|
|
|
@ -0,0 +1,241 @@
|
|||
import { walkDirCache } from "../indexing/walkDir";
|
||||
import { testIde } from "../test/fixtures";
|
||||
import { addToTestDir, setUpTestDir, tearDownTestDir } from "../test/testDir";
|
||||
import {
|
||||
getAllDotContinueDefinitionFiles,
|
||||
LoadAssistantFilesOptions,
|
||||
} from "./loadLocalAssistants";
|
||||
describe("getAllDotContinueDefinitionFiles with fileExtType option", () => {
|
||||
beforeEach(() => {
|
||||
setUpTestDir();
|
||||
walkDirCache.invalidate();
|
||||
|
||||
// Add test files to the test directory
|
||||
addToTestDir([
|
||||
".continue/assistants/",
|
||||
[".continue/assistants/assistant1.yaml", "yaml content 1"],
|
||||
[".continue/assistants/assistant2.yml", "yaml content 2"],
|
||||
[".continue/assistants/assistant3.md", "markdown content 1"],
|
||||
[".continue/assistants/assistant4.txt", "txt content"],
|
||||
]);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
tearDownTestDir();
|
||||
walkDirCache.invalidate();
|
||||
});
|
||||
|
||||
it("should return only YAML files when fileExtType is 'yaml'", async () => {
|
||||
const options: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false, // Only test workspace for simplicity
|
||||
includeWorkspace: true,
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const result = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
options,
|
||||
"assistants",
|
||||
);
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result.map((f) => f.path.split("/").pop())).toEqual(
|
||||
expect.arrayContaining(["assistant1.yaml", "assistant2.yml"]),
|
||||
);
|
||||
expect(result.map((f) => f.path.split("/").pop())).not.toContain(
|
||||
"assistant3.md",
|
||||
);
|
||||
});
|
||||
|
||||
it("should return only Markdown files when fileExtType is 'markdown'", async () => {
|
||||
const options: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
fileExtType: "markdown",
|
||||
};
|
||||
|
||||
const result = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
options,
|
||||
"assistants",
|
||||
);
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result.map((f) => f.path.split("/").pop())).toEqual([
|
||||
"assistant3.md",
|
||||
]);
|
||||
expect(result.map((f) => f.path.split("/").pop())).not.toContain(
|
||||
"assistant1.yaml",
|
||||
);
|
||||
expect(result.map((f) => f.path.split("/").pop())).not.toContain(
|
||||
"assistant2.yml",
|
||||
);
|
||||
});
|
||||
|
||||
it("should return all supported files when fileExtType is not specified", async () => {
|
||||
const options: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
// fileExtType not specified
|
||||
};
|
||||
|
||||
const result = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
options,
|
||||
"assistants",
|
||||
);
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result.map((f) => f.path.split("/").pop())).toEqual(
|
||||
expect.arrayContaining([
|
||||
"assistant1.yaml",
|
||||
"assistant2.yml",
|
||||
"assistant3.md",
|
||||
]),
|
||||
);
|
||||
// Should not include .txt files
|
||||
expect(result.map((f) => f.path.split("/").pop())).not.toContain(
|
||||
"assistant4.txt",
|
||||
);
|
||||
});
|
||||
|
||||
it("should respect includeWorkspace option with fileExtType", async () => {
|
||||
// Test with includeWorkspace: false
|
||||
const workspaceOffOptions: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: false,
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const noWorkspaceResult = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
workspaceOffOptions,
|
||||
"assistants",
|
||||
);
|
||||
expect(noWorkspaceResult).toHaveLength(0);
|
||||
|
||||
// Test with includeWorkspace: true
|
||||
const workspaceOnOptions: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const workspaceResult = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
workspaceOnOptions,
|
||||
"assistants",
|
||||
);
|
||||
expect(workspaceResult).toHaveLength(2);
|
||||
expect(workspaceResult.map((f) => f.path.split("/").pop())).toEqual(
|
||||
expect.arrayContaining(["assistant1.yaml", "assistant2.yml"]),
|
||||
);
|
||||
});
|
||||
|
||||
it("should return empty array when no files match the specified extension type", async () => {
|
||||
// Create a test directory with only non-matching files
|
||||
tearDownTestDir();
|
||||
walkDirCache.invalidate();
|
||||
setUpTestDir();
|
||||
addToTestDir([
|
||||
".continue/assistants/",
|
||||
[".continue/assistants/nonmatch1.txt", "txt content"],
|
||||
[".continue/assistants/nonmatch2.json", "json content"],
|
||||
]);
|
||||
|
||||
const options: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const result = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
options,
|
||||
"assistants",
|
||||
);
|
||||
expect(result).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should handle directories that don't exist", async () => {
|
||||
// Create a clean test directory without the assistants folder
|
||||
tearDownTestDir();
|
||||
setUpTestDir();
|
||||
|
||||
const options: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const result = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
options,
|
||||
"assistants",
|
||||
);
|
||||
expect(result).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should return correct file content", async () => {
|
||||
const options: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const result = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
options,
|
||||
"assistants",
|
||||
);
|
||||
expect(result).toHaveLength(2);
|
||||
const yamlFile = result.find((f) => f.path.includes("assistant1.yaml"));
|
||||
expect(yamlFile?.content).toBe("yaml content 1");
|
||||
});
|
||||
|
||||
it("should filter by file extension case sensitively", async () => {
|
||||
// Add files with uppercase extensions
|
||||
addToTestDir([
|
||||
[".continue/assistants/assistant5.YAML", "uppercase yaml"],
|
||||
[".continue/assistants/assistant6.YML", "uppercase yml"],
|
||||
[".continue/assistants/assistant7.MD", "uppercase md"],
|
||||
]);
|
||||
|
||||
const yamlOptions: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
fileExtType: "yaml",
|
||||
};
|
||||
|
||||
const yamlResult = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
yamlOptions,
|
||||
"assistants",
|
||||
);
|
||||
// Should only get lowercase extensions (current implementation)
|
||||
expect(yamlResult).toHaveLength(2);
|
||||
expect(yamlResult.map((f) => f.path.split("/").pop())).toEqual(
|
||||
expect.arrayContaining(["assistant1.yaml", "assistant2.yml"]),
|
||||
);
|
||||
expect(yamlResult.map((f) => f.path.split("/").pop())).not.toContain(
|
||||
"assistant5.YAML",
|
||||
);
|
||||
|
||||
const markdownOptions: LoadAssistantFilesOptions = {
|
||||
includeGlobal: false,
|
||||
includeWorkspace: true,
|
||||
fileExtType: "markdown",
|
||||
};
|
||||
|
||||
const markdownResult = await getAllDotContinueDefinitionFiles(
|
||||
testIde,
|
||||
markdownOptions,
|
||||
"assistants",
|
||||
);
|
||||
expect(markdownResult).toHaveLength(1);
|
||||
expect(markdownResult.map((f) => f.path.split("/").pop())).toEqual([
|
||||
"assistant3.md",
|
||||
]);
|
||||
expect(markdownResult.map((f) => f.path.split("/").pop())).not.toContain(
|
||||
"assistant7.MD",
|
||||
);
|
||||
});
|
||||
});
|
|
@ -1,4 +1,5 @@
|
|||
import ignore from "ignore";
|
||||
import * as URI from "uri-js";
|
||||
import { IDE } from "..";
|
||||
import {
|
||||
DEFAULT_IGNORE_DIRS,
|
||||
|
@ -12,18 +13,19 @@ import { joinPathsToUri } from "../util/uri";
|
|||
export const ASSISTANTS = "assistants";
|
||||
export const ASSISTANTS_FOLDER = `.continue/${ASSISTANTS}`;
|
||||
|
||||
export function isLocalAssistantFile(uri: string): boolean {
|
||||
export function isLocalDefinitionFile(uri: string): boolean {
|
||||
if (!uri.endsWith(".yaml") && !uri.endsWith(".yml") && !uri.endsWith(".md")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const normalizedUri = uri.replace(/\\/g, "/");
|
||||
const normalizedUri = URI.normalize(uri);
|
||||
return normalizedUri.includes(`/${ASSISTANTS_FOLDER}/`);
|
||||
}
|
||||
|
||||
async function getDefinitionFilesInDir(
|
||||
ide: IDE,
|
||||
dir: string,
|
||||
fileExtType?: "yaml" | "markdown",
|
||||
): Promise<{ path: string; content: string }[]> {
|
||||
try {
|
||||
const exists = await ide.fileExists(dir);
|
||||
|
@ -40,9 +42,19 @@ async function getDefinitionFilesInDir(
|
|||
overrideDefaultIgnores,
|
||||
source: "get assistant files",
|
||||
});
|
||||
const assistantFilePaths = uris.filter(
|
||||
(p) => p.endsWith(".yaml") || p.endsWith(".yml") || p.endsWith(".md"),
|
||||
);
|
||||
let assistantFilePaths: string[];
|
||||
if (fileExtType === "yaml") {
|
||||
assistantFilePaths = uris.filter(
|
||||
(p) => p.endsWith(".yaml") || p.endsWith(".yml"),
|
||||
);
|
||||
} else if (fileExtType === "markdown") {
|
||||
assistantFilePaths = uris.filter((p) => p.endsWith(".md"));
|
||||
} else {
|
||||
assistantFilePaths = uris.filter(
|
||||
(p) => p.endsWith(".yaml") || p.endsWith(".yml") || p.endsWith(".md"),
|
||||
);
|
||||
}
|
||||
|
||||
const results = assistantFilePaths.map(async (uri) => {
|
||||
const content = await ide.readFile(uri); // make a try catch
|
||||
return { path: uri, content };
|
||||
|
@ -57,6 +69,7 @@ async function getDefinitionFilesInDir(
|
|||
export interface LoadAssistantFilesOptions {
|
||||
includeGlobal: boolean;
|
||||
includeWorkspace: boolean;
|
||||
fileExtType?: "yaml" | "markdown";
|
||||
}
|
||||
|
||||
export function getDotContinueSubDirs(
|
||||
|
@ -84,7 +97,7 @@ export function getDotContinueSubDirs(
|
|||
|
||||
/**
|
||||
* This method searches in both ~/.continue and workspace .continue
|
||||
* for all YAML files in the specified subdirctory, for example .continue/assistants or .continue/prompts
|
||||
* for all YAML/Markdown files in the specified subdirectory, for example .continue/assistants or .continue/prompts
|
||||
*/
|
||||
export async function getAllDotContinueDefinitionFiles(
|
||||
ide: IDE,
|
||||
|
@ -101,15 +114,14 @@ export async function getAllDotContinueDefinitionFiles(
|
|||
subDirName,
|
||||
);
|
||||
|
||||
// Get all assistant files from the directories
|
||||
const assistantFiles = (
|
||||
await Promise.all(fullDirs.map((dir) => getDefinitionFilesInDir(ide, dir)))
|
||||
// Get all definition files from the directories
|
||||
const definitionFiles = (
|
||||
await Promise.all(
|
||||
fullDirs.map((dir) =>
|
||||
getDefinitionFilesInDir(ide, dir, options.fileExtType),
|
||||
),
|
||||
)
|
||||
).flat();
|
||||
|
||||
return await Promise.all(
|
||||
assistantFiles.map(async (file) => {
|
||||
const content = await ide.readFile(file.path);
|
||||
return { path: file.path, content };
|
||||
}),
|
||||
);
|
||||
return definitionFiles;
|
||||
}
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
export * from "./createMarkdownRule";
|
||||
export * from "./loadMarkdownRules";
|
||||
export * from "./parseMarkdownRule";
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
import { ConfigValidationError } from "@continuedev/config-yaml";
|
||||
import {
|
||||
ConfigValidationError,
|
||||
markdownToRule,
|
||||
} from "@continuedev/config-yaml";
|
||||
import { IDE, RuleWithSource } from "../..";
|
||||
import { walkDirs } from "../../indexing/walkDir";
|
||||
import { RULES_MARKDOWN_FILENAME } from "../../llm/rules/constants";
|
||||
import { getUriPathBasename } from "../../util/uri";
|
||||
import { convertMarkdownRuleToContinueRule } from "./parseMarkdownRule";
|
||||
|
||||
/**
|
||||
* Loads rules from rules.md files colocated in the codebase
|
||||
|
@ -29,9 +31,9 @@ export async function loadCodebaseRules(ide: IDE): Promise<{
|
|||
for (const filePath of rulesMdFiles) {
|
||||
try {
|
||||
const content = await ide.readFile(filePath);
|
||||
const rule = convertMarkdownRuleToContinueRule(filePath, content);
|
||||
const rule = markdownToRule(content, { uriType: "file", filePath });
|
||||
|
||||
rules.push(rule);
|
||||
rules.push({ ...rule, source: "rules-block" });
|
||||
} catch (e) {
|
||||
errors.push({
|
||||
fatal: false,
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
import { markdownToRule } from "@continuedev/config-yaml";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { IDE } from "../..";
|
||||
import { walkDirs } from "../../indexing/walkDir";
|
||||
import { loadCodebaseRules } from "./loadCodebaseRules";
|
||||
import { convertMarkdownRuleToContinueRule } from "./parseMarkdownRule";
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock("../../indexing/walkDir", () => ({
|
||||
walkDirs: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("./parseMarkdownRule", () => ({
|
||||
convertMarkdownRuleToContinueRule: vi.fn(),
|
||||
vi.mock("@continuedev/config-yaml", () => ({
|
||||
markdownToRule: vi.fn(),
|
||||
}));
|
||||
|
||||
describe("loadCodebaseRules", () => {
|
||||
|
@ -82,10 +82,10 @@ describe("loadCodebaseRules", () => {
|
|||
return Promise.resolve(mockRuleContent[path] || "");
|
||||
});
|
||||
|
||||
// Mock convertMarkdownRuleToContinueRule to return converted rules
|
||||
(convertMarkdownRuleToContinueRule as any).mockImplementation(
|
||||
(path: string, content: string) => {
|
||||
return mockConvertedRules[path];
|
||||
// Mock markdownToRule to return converted rules
|
||||
(markdownToRule as any).mockImplementation(
|
||||
(content: string, options: any) => {
|
||||
return mockConvertedRules[options.filePath];
|
||||
},
|
||||
);
|
||||
});
|
||||
|
@ -107,7 +107,7 @@ describe("loadCodebaseRules", () => {
|
|||
expect(mockIde.readFile).toHaveBeenCalledWith(".continue/rules.md");
|
||||
|
||||
// Should convert all rules
|
||||
expect(convertMarkdownRuleToContinueRule).toHaveBeenCalledTimes(4);
|
||||
expect(markdownToRule).toHaveBeenCalledTimes(4);
|
||||
|
||||
// Should return all rules
|
||||
expect(rules).toHaveLength(4);
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import { ConfigValidationError } from "@continuedev/config-yaml";
|
||||
import {
|
||||
ConfigValidationError,
|
||||
markdownToRule,
|
||||
} from "@continuedev/config-yaml";
|
||||
import { IDE, RuleWithSource } from "../..";
|
||||
import { getAllDotContinueDefinitionFiles } from "../loadLocalAssistants";
|
||||
import { convertMarkdownRuleToContinueRule } from "./parseMarkdownRule";
|
||||
|
||||
/**
|
||||
* Loads rules from markdown files in the .continue/rules directory
|
||||
|
@ -17,7 +19,7 @@ export async function loadMarkdownRules(ide: IDE): Promise<{
|
|||
// Get all .md files from .continue/rules
|
||||
const markdownFiles = await getAllDotContinueDefinitionFiles(
|
||||
ide,
|
||||
{ includeGlobal: true, includeWorkspace: true },
|
||||
{ includeGlobal: true, includeWorkspace: true, fileExtType: "markdown" },
|
||||
"rules",
|
||||
);
|
||||
|
||||
|
@ -27,8 +29,11 @@ export async function loadMarkdownRules(ide: IDE): Promise<{
|
|||
// Process each markdown file
|
||||
for (const file of mdFiles) {
|
||||
try {
|
||||
const rule = convertMarkdownRuleToContinueRule(file.path, file.content);
|
||||
rules.push(rule);
|
||||
const rule = markdownToRule(file.content, {
|
||||
uriType: "file",
|
||||
filePath: file.path,
|
||||
});
|
||||
rules.push({ ...rule, source: "rules-block" });
|
||||
} catch (e) {
|
||||
errors.push({
|
||||
fatal: false,
|
||||
|
|
|
@ -1,192 +0,0 @@
|
|||
import {
|
||||
convertMarkdownRuleToContinueRule,
|
||||
parseMarkdownRule,
|
||||
} from "./parseMarkdownRule";
|
||||
|
||||
describe("parseMarkdownRule", () => {
|
||||
it("should correctly parse markdown with YAML frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = parseMarkdownRule(content);
|
||||
expect(result.frontmatter).toEqual({ globs: "**/test/**/*.kt" });
|
||||
expect(result.markdown).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
});
|
||||
|
||||
it("should handle missing frontmatter", () => {
|
||||
const content = `# Test Rule
|
||||
|
||||
This is a test rule without frontmatter.`;
|
||||
|
||||
const result = parseMarkdownRule(content);
|
||||
expect(result.frontmatter).toEqual({});
|
||||
expect(result.markdown).toBe(content);
|
||||
});
|
||||
|
||||
it("should handle empty frontmatter", () => {
|
||||
const content = `---
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule with empty frontmatter.`;
|
||||
|
||||
const result = parseMarkdownRule(content);
|
||||
|
||||
// Log exact strings for debugging
|
||||
console.log("Actual:", JSON.stringify(result.markdown));
|
||||
console.log(
|
||||
"Expected:",
|
||||
JSON.stringify(
|
||||
"# Test Rule\n\nThis is a test rule with empty frontmatter.",
|
||||
),
|
||||
);
|
||||
|
||||
expect(result.frontmatter).toEqual({});
|
||||
expect(result.markdown).toBe(
|
||||
"# Test Rule\n\nThis is a test rule with empty frontmatter.",
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle frontmatter with whitespace", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = parseMarkdownRule(content);
|
||||
expect(result.frontmatter).toEqual({ globs: "**/test/**/*.kt" });
|
||||
expect(result.markdown).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
});
|
||||
|
||||
it("should handle Windows line endings (CRLF)", () => {
|
||||
// Using \r\n for CRLF line endings
|
||||
const content = `---\r
|
||||
globs: "**/test/**/*.kt"\r
|
||||
---\r
|
||||
\r
|
||||
# Test Rule\r
|
||||
\r
|
||||
This is a test rule.`;
|
||||
|
||||
const result = parseMarkdownRule(content);
|
||||
expect(result.frontmatter).toEqual({ globs: "**/test/**/*.kt" });
|
||||
// The result should be normalized to \n
|
||||
expect(result.markdown).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
});
|
||||
|
||||
it("should handle malformed frontmatter", () => {
|
||||
const content = `---
|
||||
globs: - "**/test/**/*.kt"
|
||||
invalid: yaml: content
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
// Should treat as only markdown when frontmatter is malformed
|
||||
const result = parseMarkdownRule(content);
|
||||
expect(result.frontmatter).toEqual({});
|
||||
expect(result.markdown).toBe(content);
|
||||
});
|
||||
});
|
||||
|
||||
describe("convertMarkdownRuleToContinueRule", () => {
|
||||
it("should convert markdown with frontmatter to a rule", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
name: Custom Name
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = convertMarkdownRuleToContinueRule(
|
||||
"/path/to/rule.md",
|
||||
content,
|
||||
);
|
||||
expect(result.rule).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
expect(result.globs).toBe("**/test/**/*.kt");
|
||||
expect(result.name).toBe("Custom Name");
|
||||
expect(result.source).toBe("rules-block");
|
||||
expect(result.ruleFile).toBe("/path/to/rule.md");
|
||||
});
|
||||
|
||||
it("should use the first heading as name if not in frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
# Test Rule Title
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = convertMarkdownRuleToContinueRule(
|
||||
"/path/to/rule.md",
|
||||
content,
|
||||
);
|
||||
expect(result.name).toBe("Test Rule Title");
|
||||
});
|
||||
|
||||
it("should use filename as name if no heading or frontmatter name", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
This is a test rule without a heading.`;
|
||||
|
||||
const result = convertMarkdownRuleToContinueRule(
|
||||
"/path/to/custom-rule.md",
|
||||
content,
|
||||
);
|
||||
expect(result.name).toBe("custom-rule");
|
||||
});
|
||||
|
||||
it("should include description from frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
name: Test Rule
|
||||
description: This is a rule description from frontmatter
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is the content of the rule.`;
|
||||
|
||||
const result = convertMarkdownRuleToContinueRule(
|
||||
"/path/to/rule.md",
|
||||
content,
|
||||
);
|
||||
expect(result.description).toBe(
|
||||
"This is a rule description from frontmatter",
|
||||
);
|
||||
});
|
||||
|
||||
it("should include `alwaysApply` from frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
name: Test Rule
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a rule with alwaysApply explicitly set to false.`;
|
||||
|
||||
const result = convertMarkdownRuleToContinueRule(
|
||||
"/path/to/rule.md",
|
||||
content,
|
||||
);
|
||||
expect(result.alwaysApply).toBe(false);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,21 @@
|
|||
import {
|
||||
RULE_FILE_EXTENSION,
|
||||
sanitizeRuleName,
|
||||
} from "@continuedev/config-yaml";
|
||||
import { joinPathsToUri } from "../../util/uri";
|
||||
|
||||
/**
|
||||
* Creates the file path for a rule in the workspace .continue/rules directory
|
||||
*/
|
||||
export function createRuleFilePath(
|
||||
workspaceDir: string,
|
||||
ruleName: string,
|
||||
): string {
|
||||
const safeRuleName = sanitizeRuleName(ruleName);
|
||||
return joinPathsToUri(
|
||||
workspaceDir,
|
||||
".continue",
|
||||
"rules",
|
||||
`${safeRuleName}.${RULE_FILE_EXTENSION}`,
|
||||
);
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import { createRuleFilePath } from "./utils";
|
||||
|
||||
describe("createRuleFilePath", () => {
|
||||
it("should create correct rule file path", () => {
|
||||
const result = createRuleFilePath("/workspace", "My Test Rule");
|
||||
expect(result).toBe("/workspace/.continue/rules/my-test-rule.md");
|
||||
});
|
||||
|
||||
it("should handle special characters in rule name", () => {
|
||||
const result = createRuleFilePath("/home/user", "Rule with @#$% chars");
|
||||
expect(result).toBe("/home/user/.continue/rules/rule-with-chars.md");
|
||||
});
|
||||
|
||||
it("should handle edge case rule names", () => {
|
||||
const result = createRuleFilePath("/test", " Multiple Spaces ");
|
||||
expect(result).toBe("/test/.continue/rules/multiple-spaces.md");
|
||||
});
|
||||
});
|
|
@ -211,6 +211,6 @@ export function migrateJsonSharedConfig(filepath: string, ide: IDE): void {
|
|||
new GlobalContext().updateSharedConfig(shareConfigUpdates);
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error(`Migration: Failed to parse config.json: ${e}`);
|
||||
console.error(`Migration: Failed to parse config.json: ${e}`);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
import { BlockType } from "@continuedev/config-yaml";
|
||||
import { BlockType, RULE_FILE_EXTENSION } from "@continuedev/config-yaml";
|
||||
import { describe, expect, test } from "@jest/globals";
|
||||
import { RULE_FILE_EXTENSION } from "../markdown";
|
||||
import { findAvailableFilename, getFileContent } from "./workspaceBlocks";
|
||||
|
||||
describe("getFileContent", () => {
|
||||
test("returns markdown content for rules block type", () => {
|
||||
const result = getFileContent("rules");
|
||||
expect(result).toContain("# New Rule");
|
||||
expect(result).toContain("Your rule content");
|
||||
expect(result).toContain("A description of your rule");
|
||||
});
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
import { BlockType, ConfigYaml } from "@continuedev/config-yaml";
|
||||
import {
|
||||
BlockType,
|
||||
ConfigYaml,
|
||||
createRuleMarkdown,
|
||||
RULE_FILE_EXTENSION,
|
||||
} from "@continuedev/config-yaml";
|
||||
import * as YAML from "yaml";
|
||||
import { IDE } from "../..";
|
||||
import { joinPathsToUri } from "../../util/uri";
|
||||
import { RULE_FILE_EXTENSION, createRuleMarkdown } from "../markdown";
|
||||
|
||||
const BLOCK_TYPE_CONFIG: Record<
|
||||
BlockType,
|
||||
|
|
|
@ -123,7 +123,6 @@ describe("LocalPlatformClient", () => {
|
|||
expect(
|
||||
(resolvedFQSNs[0] as SecretResult & { value: unknown })?.value,
|
||||
).toBe(secretValue);
|
||||
console.log("debug1 resolved fqsn", resolvedFQSNs);
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ async function loadConfigYaml(options: {
|
|||
for (const blockType of BLOCK_TYPES) {
|
||||
const localBlocks = await getAllDotContinueDefinitionFiles(
|
||||
ide,
|
||||
{ includeGlobal: true, includeWorkspace: true },
|
||||
{ includeGlobal: true, includeWorkspace: true, fileExtType: "yaml" },
|
||||
blockType,
|
||||
);
|
||||
allLocalBlocks.push(
|
||||
|
@ -134,6 +134,7 @@ async function loadConfigYaml(options: {
|
|||
rootPath,
|
||||
}),
|
||||
{
|
||||
renderSecrets: true,
|
||||
currentUserSlug: "",
|
||||
onPremProxyUrl: null,
|
||||
orgScopeId,
|
||||
|
@ -142,9 +143,7 @@ async function loadConfigYaml(options: {
|
|||
controlPlaneClient,
|
||||
ide,
|
||||
),
|
||||
renderSecrets: true,
|
||||
injectBlocks: allLocalBlocks,
|
||||
asConfigResult: true,
|
||||
},
|
||||
);
|
||||
config = unrollResult.config;
|
||||
|
@ -153,13 +152,8 @@ async function loadConfigYaml(options: {
|
|||
}
|
||||
}
|
||||
|
||||
if (config) {
|
||||
isAssistantUnrolledNonNullable(config)
|
||||
? errors.push(...validateConfigYaml(config))
|
||||
: errors.push({
|
||||
fatal: true,
|
||||
message: "Assistant includes blocks that don't exist",
|
||||
});
|
||||
if (config && isAssistantUnrolledNonNullable(config)) {
|
||||
errors.push(...validateConfigYaml(config));
|
||||
}
|
||||
|
||||
if (errors?.some((error) => error.fatal)) {
|
||||
|
@ -223,7 +217,8 @@ async function configYamlToContinueConfig(options: {
|
|||
config: continueConfig,
|
||||
errors: [
|
||||
{
|
||||
message: "Found missing blocks in config.yaml",
|
||||
message:
|
||||
"Failed to load config due to missing blocks, see which blocks are missing below",
|
||||
fatal: true,
|
||||
},
|
||||
],
|
||||
|
|
44
core/core.ts
44
core/core.ts
|
@ -49,7 +49,7 @@ import {
|
|||
|
||||
import { ConfigYaml } from "@continuedev/config-yaml";
|
||||
import { getDiffFn, GitDiffCache } from "./autocomplete/snippets/gitDiffCache";
|
||||
import { isLocalAssistantFile } from "./config/loadLocalAssistants";
|
||||
import { isLocalDefinitionFile } from "./config/loadLocalAssistants";
|
||||
import {
|
||||
setupLocalConfig,
|
||||
setupProviderConfig,
|
||||
|
@ -145,8 +145,8 @@ export class Core {
|
|||
this.messenger,
|
||||
);
|
||||
|
||||
MCPManagerSingleton.getInstance().onConnectionsRefreshed = async () => {
|
||||
await this.configHandler.reloadConfig();
|
||||
MCPManagerSingleton.getInstance().onConnectionsRefreshed = () => {
|
||||
void this.configHandler.reloadConfig();
|
||||
};
|
||||
|
||||
this.codeBaseIndexer = new CodebaseIndexer(
|
||||
|
@ -156,24 +156,26 @@ export class Core {
|
|||
this.globalContext.get("indexingPaused"),
|
||||
);
|
||||
|
||||
this.configHandler.onConfigUpdate(async (result) => {
|
||||
const serializedResult = await this.configHandler.getSerializedConfig();
|
||||
this.messenger.send("configUpdate", {
|
||||
result: serializedResult,
|
||||
profileId:
|
||||
this.configHandler.currentProfile?.profileDescription.id || null,
|
||||
organizations: this.configHandler.getSerializedOrgs(),
|
||||
selectedOrgId: this.configHandler.currentOrg.id,
|
||||
});
|
||||
|
||||
// update additional submenu context providers registered via VSCode API
|
||||
const additionalProviders =
|
||||
this.configHandler.getAdditionalSubmenuContextProviders();
|
||||
if (additionalProviders.length > 0) {
|
||||
this.messenger.send("refreshSubmenuItems", {
|
||||
providers: additionalProviders,
|
||||
this.configHandler.onConfigUpdate((result) => {
|
||||
void (async () => {
|
||||
const serializedResult = await this.configHandler.getSerializedConfig();
|
||||
this.messenger.send("configUpdate", {
|
||||
result: serializedResult,
|
||||
profileId:
|
||||
this.configHandler.currentProfile?.profileDescription.id || null,
|
||||
organizations: this.configHandler.getSerializedOrgs(),
|
||||
selectedOrgId: this.configHandler.currentOrg.id,
|
||||
});
|
||||
}
|
||||
|
||||
// update additional submenu context providers registered via VSCode API
|
||||
const additionalProviders =
|
||||
this.configHandler.getAdditionalSubmenuContextProviders();
|
||||
if (additionalProviders.length > 0) {
|
||||
this.messenger.send("refreshSubmenuItems", {
|
||||
providers: additionalProviders,
|
||||
});
|
||||
}
|
||||
})();
|
||||
});
|
||||
|
||||
// Dev Data Logger
|
||||
|
@ -606,7 +608,7 @@ export class Core {
|
|||
// If it's a local assistant being created, we want to reload all assistants so it shows up in the list
|
||||
let localAssistantCreated = false;
|
||||
for (const uri of data.uris) {
|
||||
if (isLocalAssistantFile(uri)) {
|
||||
if (isLocalDefinitionFile(uri)) {
|
||||
localAssistantCreated = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,8 +97,8 @@ export class LanceDbIndex implements CodebaseIndex {
|
|||
contents TEXT NOT NULL
|
||||
)`);
|
||||
|
||||
await new Promise((resolve) =>
|
||||
migrate(
|
||||
await new Promise((resolve) => {
|
||||
void migrate(
|
||||
"lancedb_sqlite_artifact_id_column",
|
||||
async () => {
|
||||
try {
|
||||
|
@ -118,8 +118,8 @@ export class LanceDbIndex implements CodebaseIndex {
|
|||
}
|
||||
},
|
||||
() => resolve(undefined),
|
||||
),
|
||||
);
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
private async computeRows(items: PathAndCacheKey[]): Promise<LanceDbRow[]> {
|
||||
|
|
|
@ -50,21 +50,23 @@ export async function* chunkDocument({
|
|||
maxChunkSize,
|
||||
)) {
|
||||
chunkPromises.push(
|
||||
new Promise(async (resolve) => {
|
||||
if ((await countTokensAsync(chunkWithoutId.content)) > maxChunkSize) {
|
||||
// console.debug(
|
||||
// `Chunk with more than ${maxChunkSize} tokens constructed: `,
|
||||
// filepath,
|
||||
// countTokens(chunkWithoutId.content),
|
||||
// );
|
||||
return resolve(undefined);
|
||||
}
|
||||
resolve({
|
||||
...chunkWithoutId,
|
||||
digest,
|
||||
index,
|
||||
filepath,
|
||||
});
|
||||
new Promise((resolve) => {
|
||||
void (async () => {
|
||||
if ((await countTokensAsync(chunkWithoutId.content)) > maxChunkSize) {
|
||||
// console.debug(
|
||||
// `Chunk with more than ${maxChunkSize} tokens constructed: `,
|
||||
// filepath,
|
||||
// countTokens(chunkWithoutId.content),
|
||||
// );
|
||||
return resolve(undefined);
|
||||
}
|
||||
resolve({
|
||||
...chunkWithoutId,
|
||||
digest,
|
||||
index,
|
||||
filepath,
|
||||
});
|
||||
})();
|
||||
}),
|
||||
);
|
||||
index++;
|
||||
|
|
|
@ -186,7 +186,9 @@ export default class DocsService {
|
|||
private async init(configHandler: ConfigHandler) {
|
||||
const result = await configHandler.loadConfig();
|
||||
await this.handleConfigUpdate(result);
|
||||
configHandler.onConfigUpdate(this.handleConfigUpdate.bind(this));
|
||||
configHandler.onConfigUpdate(
|
||||
this.handleConfigUpdate.bind(this) as (arg: any) => void,
|
||||
);
|
||||
}
|
||||
|
||||
readonly statuses: Map<string, IndexingStatus> = new Map();
|
||||
|
|
|
@ -6,8 +6,8 @@ import { editConfigFile, migrate } from "../../util/paths.js";
|
|||
import DocsService, { SqliteDocsRow } from "./DocsService.js";
|
||||
|
||||
export async function runLanceMigrations(table: Table) {
|
||||
await new Promise((resolve) =>
|
||||
migrate(
|
||||
await new Promise((resolve) => {
|
||||
void migrate(
|
||||
"rename_baseurl_column_for_lance_docs",
|
||||
async () => {
|
||||
try {
|
||||
|
@ -21,8 +21,8 @@ export async function runLanceMigrations(table: Table) {
|
|||
}
|
||||
},
|
||||
() => resolve(undefined),
|
||||
),
|
||||
);
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
export async function runSqliteMigrations(db: Database) {
|
||||
|
|
|
@ -29,7 +29,5 @@ export default {
|
|||
globalSetup: "<rootDir>/test/jest.global-setup.ts",
|
||||
setupFilesAfterEnv: ["<rootDir>/test/jest.setup-after-env.js"],
|
||||
maxWorkers: 1, // equivalent to CLI --runInBand
|
||||
modulePathIgnorePatterns: [
|
||||
"<rootDir>/config/yaml/LocalPlatformClient.test.ts",
|
||||
],
|
||||
testMatch: ["**/*.test.ts"],
|
||||
};
|
||||
|
|
|
@ -52,6 +52,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [
|
|||
"msty",
|
||||
"anthropic",
|
||||
"bedrock",
|
||||
"cohere",
|
||||
"sagemaker",
|
||||
"continue-proxy",
|
||||
"mistral",
|
||||
|
@ -65,6 +66,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [
|
|||
const PROVIDER_SUPPORTS_IMAGES: string[] = [
|
||||
"openai",
|
||||
"ollama",
|
||||
"cohere",
|
||||
"gemini",
|
||||
"msty",
|
||||
"anthropic",
|
||||
|
@ -89,6 +91,8 @@ const MODEL_SUPPORTS_IMAGES: string[] = [
|
|||
"gpt-4o-mini",
|
||||
"gpt-4-vision",
|
||||
"claude-3",
|
||||
"c4ai-aya-vision-8b",
|
||||
"c4ai-aya-vision-32b",
|
||||
"gemini-ultra",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
|
@ -140,6 +144,7 @@ function modelSupportsImages(
|
|||
const PARALLEL_PROVIDERS: string[] = [
|
||||
"anthropic",
|
||||
"bedrock",
|
||||
"cohere",
|
||||
"sagemaker",
|
||||
"deepinfra",
|
||||
"gemini",
|
||||
|
@ -176,6 +181,7 @@ function autodetectTemplateType(model: string): TemplateType | undefined {
|
|||
if (
|
||||
lower.includes("gpt") ||
|
||||
lower.includes("command") ||
|
||||
lower.includes("aya") ||
|
||||
lower.includes("chat-bison") ||
|
||||
lower.includes("pplx") ||
|
||||
lower.includes("gemini") ||
|
||||
|
|
|
@ -1068,9 +1068,17 @@ export abstract class BaseLLM implements ILLM {
|
|||
documents: chunks.map((chunk) => chunk.content),
|
||||
});
|
||||
|
||||
// Put them in the order they were given
|
||||
const sortedResults = results.data.sort((a, b) => a.index - b.index);
|
||||
return sortedResults.map((result) => result.relevance_score);
|
||||
// Standard OpenAI format
|
||||
if (results.data && Array.isArray(results.data)) {
|
||||
return results.data
|
||||
.sort((a, b) => a.index - b.index)
|
||||
.map((result) => result.relevance_score);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`Unexpected rerank response format from ${this.providerName}. ` +
|
||||
`Expected 'data' array but got: ${JSON.stringify(Object.keys(results))}`,
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
|
|
|
@ -161,12 +161,7 @@ function testLLM(
|
|||
group: "Hello",
|
||||
},
|
||||
],
|
||||
toolChoice: {
|
||||
type: "function",
|
||||
function: {
|
||||
name: "say_hello",
|
||||
},
|
||||
},
|
||||
toolChoice: { type: "function", function: { name: "say_hello" } },
|
||||
},
|
||||
)) {
|
||||
const typedChunk = chunk as AssistantChatMessage;
|
||||
|
@ -211,10 +206,7 @@ describe("LLM", () => {
|
|||
model: "claude-3-5-sonnet-latest",
|
||||
apiKey: process.env.ANTHROPIC_API_KEY,
|
||||
}),
|
||||
{
|
||||
skip: false,
|
||||
testToolCall: true,
|
||||
},
|
||||
{ skip: false, testToolCall: true },
|
||||
);
|
||||
testLLM(new OpenAI({ apiKey: process.env.OPENAI_API_KEY, model: "gpt-4o" }), {
|
||||
skip: false,
|
||||
|
@ -240,12 +232,7 @@ describe("LLM", () => {
|
|||
apiKey: process.env.MISTRAL_API_KEY,
|
||||
model: "codestral-latest",
|
||||
}),
|
||||
{
|
||||
testFim: true,
|
||||
skip: false,
|
||||
testToolCall: true,
|
||||
timeout: 60000,
|
||||
},
|
||||
{ testFim: true, skip: false, testToolCall: true, timeout: 60000 },
|
||||
);
|
||||
testLLM(
|
||||
new Azure({
|
||||
|
@ -260,11 +247,10 @@ describe("LLM", () => {
|
|||
);
|
||||
testLLM(
|
||||
new Azure({
|
||||
apiKey: process.env.AZURE_FOUNDRY_API_KEY,
|
||||
model: "codestral-latest",
|
||||
apiBase:
|
||||
"https://codestral-2501-continue-testing.eastus.models.ai.azure.com",
|
||||
apiType: "azure-foundry",
|
||||
apiKey: process.env.AZURE_FOUNDRY_CODESTRAL_API_KEY,
|
||||
model: "Codestral-2501",
|
||||
apiBase: "https://continue-foundry-resource.services.ai.azure.com",
|
||||
env: { apiType: "azure-foundry", apiVersion: "2024-05-01-preview" },
|
||||
}),
|
||||
{ testFim: false, skip: false, timeout: 20000 },
|
||||
);
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import { streamJSON } from "@continuedev/fetch";
|
||||
import { streamSse } from "@continuedev/fetch";
|
||||
import {
|
||||
ChatMessage,
|
||||
Chunk,
|
||||
CompletionOptions,
|
||||
LLMOptions,
|
||||
MessageContent,
|
||||
} from "../../index.js";
|
||||
import { renderChatMessage, stripImages } from "../../util/messageContent.js";
|
||||
import { BaseLLM } from "../index.js";
|
||||
|
@ -11,21 +12,89 @@ import { BaseLLM } from "../index.js";
|
|||
class Cohere extends BaseLLM {
|
||||
static providerName = "cohere";
|
||||
static defaultOptions: Partial<LLMOptions> = {
|
||||
apiBase: "https://api.cohere.ai/v1",
|
||||
apiBase: "https://api.cohere.ai/v2",
|
||||
maxEmbeddingBatchSize: 96,
|
||||
};
|
||||
static maxStopSequences = 5;
|
||||
|
||||
private _convertMessages(msgs: ChatMessage[]): any[] {
|
||||
const messages = [];
|
||||
let lastToolPlan: MessageContent | undefined;
|
||||
for (const m of msgs) {
|
||||
if (m.role === "system" || !m.content) {
|
||||
if (!m.content) {
|
||||
continue;
|
||||
}
|
||||
messages.push({
|
||||
role: m.role === "assistant" ? "chatbot" : m.role,
|
||||
message: m.content,
|
||||
});
|
||||
switch (m.role) {
|
||||
case "user":
|
||||
if (typeof m.content === "string") {
|
||||
messages.push({
|
||||
role: m.role,
|
||||
content: m.content,
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
messages.push({
|
||||
role: m.role,
|
||||
content: m.content.map((part) => {
|
||||
if (part.type === "imageUrl") {
|
||||
return {
|
||||
type: "image_url",
|
||||
image_url: { url: part.imageUrl.url },
|
||||
};
|
||||
}
|
||||
return part;
|
||||
}),
|
||||
});
|
||||
break;
|
||||
case "thinking":
|
||||
lastToolPlan = m.content;
|
||||
break;
|
||||
case "assistant":
|
||||
if (m.toolCalls) {
|
||||
if (!lastToolPlan) {
|
||||
throw new Error("No tool plan found");
|
||||
}
|
||||
messages.push({
|
||||
role: m.role,
|
||||
tool_calls: m.toolCalls.map((toolCall) => ({
|
||||
id: toolCall.id,
|
||||
type: "function",
|
||||
function: {
|
||||
name: toolCall.function?.name,
|
||||
arguments: toolCall.function?.arguments,
|
||||
},
|
||||
})),
|
||||
// Ideally the tool plan would be in this message, but it is
|
||||
// split in another, usually the previous, this one's content is
|
||||
// a space.
|
||||
// tool_plan: m.content,
|
||||
tool_plan: lastToolPlan,
|
||||
});
|
||||
lastToolPlan = undefined;
|
||||
break;
|
||||
}
|
||||
messages.push({
|
||||
role: m.role,
|
||||
content: m.content,
|
||||
});
|
||||
break;
|
||||
case "system":
|
||||
messages.push({
|
||||
role: m.role,
|
||||
content: stripImages(m.content),
|
||||
});
|
||||
break;
|
||||
case "tool":
|
||||
messages.push({
|
||||
role: m.role,
|
||||
content: m.content,
|
||||
tool_call_id: m.toolCallId,
|
||||
});
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
|
@ -41,7 +110,14 @@ class Cohere extends BaseLLM {
|
|||
stop_sequences: options.stop?.slice(0, Cohere.maxStopSequences),
|
||||
frequency_penalty: options.frequencyPenalty,
|
||||
presence_penalty: options.presencePenalty,
|
||||
raw_prompting: options.raw,
|
||||
tools: options.tools?.map((tool) => ({
|
||||
type: "function",
|
||||
function: {
|
||||
name: tool.function.name,
|
||||
parameters: tool.function.parameters,
|
||||
description: tool.function.description,
|
||||
},
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -67,19 +143,12 @@ class Cohere extends BaseLLM {
|
|||
...this.requestOptions?.headers,
|
||||
};
|
||||
|
||||
let preamble: string | undefined = undefined;
|
||||
const systemMessage = messages.find((m) => m.role === "system")?.content;
|
||||
if (systemMessage) {
|
||||
preamble = stripImages(systemMessage);
|
||||
}
|
||||
const resp = await this.fetch(new URL("chat", this.apiBase), {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify({
|
||||
...this._convertArgs(options),
|
||||
message: messages.pop()?.content,
|
||||
chat_history: this._convertMessages(messages),
|
||||
preamble,
|
||||
messages: this._convertMessages(messages),
|
||||
}),
|
||||
signal,
|
||||
});
|
||||
|
@ -90,13 +159,97 @@ class Cohere extends BaseLLM {
|
|||
|
||||
if (options.stream === false) {
|
||||
const data = await resp.json();
|
||||
yield { role: "assistant", content: data.text };
|
||||
if (data.message.tool_calls) {
|
||||
yield {
|
||||
// Use the "thinking" role for `tool_plan`, since there is no such
|
||||
// role in the Cohere API at the moment and it is a "a
|
||||
// chain-of-thought style reflection".
|
||||
role: "thinking",
|
||||
content: data.message.tool_plan,
|
||||
};
|
||||
yield {
|
||||
role: "assistant",
|
||||
content: "",
|
||||
toolCalls: data.message.tool_calls.map((toolCall: any) => ({
|
||||
id: toolCall.id,
|
||||
type: "function",
|
||||
function: {
|
||||
name: toolCall.function?.name,
|
||||
arguments: toolCall.function?.arguments,
|
||||
},
|
||||
})),
|
||||
};
|
||||
return;
|
||||
}
|
||||
yield { role: "assistant", content: data.message.content[0].text };
|
||||
return;
|
||||
}
|
||||
|
||||
for await (const value of streamJSON(resp)) {
|
||||
if (value.event_type === "text-generation") {
|
||||
yield { role: "assistant", content: value.text };
|
||||
let lastToolUseId: string | undefined;
|
||||
let lastToolUseName: string | undefined;
|
||||
for await (const value of streamSse(resp)) {
|
||||
// https://docs.cohere.com/v2/docs/streaming#stream-events
|
||||
switch (value.type) {
|
||||
// https://docs.cohere.com/v2/docs/streaming#content-delta
|
||||
case "content-delta":
|
||||
yield {
|
||||
role: "assistant",
|
||||
content: value.delta.message.content.text,
|
||||
};
|
||||
break;
|
||||
// https://docs.cohere.com/reference/chat-stream#request.body.messages.assistant.tool_plan
|
||||
case "tool-plan-delta":
|
||||
// Use the "thinking" role for `tool_plan`, since there is no such
|
||||
// role in the Cohere API at the moment and it is a "a
|
||||
// chain-of-thought style reflection".
|
||||
yield {
|
||||
role: "thinking",
|
||||
content: value.delta.message.tool_plan,
|
||||
};
|
||||
break;
|
||||
case "tool-call-start":
|
||||
lastToolUseId = value.delta.message.tool_calls.id;
|
||||
lastToolUseName = value.delta.message.tool_calls.function.name;
|
||||
yield {
|
||||
role: "assistant",
|
||||
content: "",
|
||||
toolCalls: [
|
||||
{
|
||||
id: lastToolUseId,
|
||||
type: "function",
|
||||
function: {
|
||||
name: lastToolUseName,
|
||||
arguments: value.delta.message.tool_calls.function.arguments,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
break;
|
||||
case "tool-call-delta":
|
||||
if (!lastToolUseId || !lastToolUseName) {
|
||||
throw new Error("No tool use found");
|
||||
}
|
||||
yield {
|
||||
role: "assistant",
|
||||
content: "",
|
||||
toolCalls: [
|
||||
{
|
||||
id: lastToolUseId,
|
||||
type: "function",
|
||||
function: {
|
||||
name: lastToolUseName,
|
||||
arguments: value.delta.message.tool_calls.function.arguments,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
break;
|
||||
case "tool-call-end":
|
||||
lastToolUseId = undefined;
|
||||
lastToolUseName = undefined;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,25 @@
|
|||
import { LLMOptions } from "../../index.js";
|
||||
import { Chunk, LLMOptions } from "../../index.js";
|
||||
|
||||
import OpenAI from "./OpenAI.js";
|
||||
|
||||
// vLLM-specific rerank response types
|
||||
interface VllmRerankItem {
|
||||
index: number;
|
||||
document: {
|
||||
text: string;
|
||||
};
|
||||
relevance_score: number;
|
||||
}
|
||||
|
||||
interface VllmRerankResponse {
|
||||
id: string;
|
||||
model: string;
|
||||
usage: {
|
||||
total_tokens: number;
|
||||
};
|
||||
results: VllmRerankItem[];
|
||||
}
|
||||
|
||||
class Vllm extends OpenAI {
|
||||
static providerName = "vllm";
|
||||
constructor(options: LLMOptions) {
|
||||
|
@ -16,6 +34,28 @@ class Vllm extends OpenAI {
|
|||
return false;
|
||||
}
|
||||
|
||||
async rerank(query: string, chunks: Chunk[]): Promise<number[]> {
|
||||
if (this.useOpenAIAdapterFor.includes("rerank") && this.openaiAdapter) {
|
||||
const results = (await this.openaiAdapter.rerank({
|
||||
model: this.model,
|
||||
query,
|
||||
documents: chunks.map((chunk) => chunk.content),
|
||||
})) as unknown as VllmRerankResponse;
|
||||
|
||||
// vLLM uses 'results' array instead of 'data'
|
||||
if (results.results && Array.isArray(results.results)) {
|
||||
const sortedResults = results.results.sort((a, b) => a.index - b.index);
|
||||
return sortedResults.map((result) => result.index);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`vLLM rerank response missing 'results' array. Got: ${JSON.stringify(Object.keys(results))}`,
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error("vLLM rerank requires OpenAI adapter");
|
||||
}
|
||||
|
||||
private _setupCompletionOptions() {
|
||||
this.fetch(this._getEndpoint("models"), {
|
||||
method: "GET",
|
||||
|
|
|
@ -109,6 +109,19 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe("cohere", () => {
|
||||
const supportsFn = PROVIDER_TOOL_SUPPORT["cohere"];
|
||||
|
||||
it("should return true for Command models", () => {
|
||||
expect(supportsFn("command-r")).toBe(true);
|
||||
expect(supportsFn("command-a")).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false for other models", () => {
|
||||
expect(supportsFn("c4ai-aya-expanse-32b")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("gemini", () => {
|
||||
const supportsFn = PROVIDER_TOOL_SUPPORT["gemini"];
|
||||
|
||||
|
@ -221,6 +234,7 @@ describe("PROVIDER_TOOL_SUPPORT", () => {
|
|||
expect(supportsFn("qwen2")).toBe(true);
|
||||
expect(supportsFn("mixtral-8x7b")).toBe(true);
|
||||
expect(supportsFn("command-r")).toBe(true);
|
||||
expect(supportsFn("command-a")).toBe(true);
|
||||
expect(supportsFn("smollm2")).toBe(true);
|
||||
expect(supportsFn("hermes3")).toBe(true);
|
||||
expect(supportsFn("athene-v2")).toBe(true);
|
||||
|
|
|
@ -71,6 +71,9 @@ export const PROVIDER_TOOL_SUPPORT: Record<string, (model: string) => boolean> =
|
|||
|
||||
return false;
|
||||
},
|
||||
cohere: (model) => {
|
||||
return model.toLowerCase().startsWith("command");
|
||||
},
|
||||
gemini: (model) => {
|
||||
// All gemini models support function calling
|
||||
return model.toLowerCase().includes("gemini");
|
||||
|
@ -144,6 +147,7 @@ export const PROVIDER_TOOL_SUPPORT: Record<string, (model: string) => boolean> =
|
|||
"qwen3",
|
||||
"mixtral",
|
||||
"command-r",
|
||||
"command-a",
|
||||
"smollm2",
|
||||
"hermes3",
|
||||
"athene-v2",
|
||||
|
@ -226,6 +230,7 @@ export const PROVIDER_TOOL_SUPPORT: Record<string, (model: string) => boolean> =
|
|||
"qwen/qwen3",
|
||||
"qwen/qwen-",
|
||||
"cohere/command-r",
|
||||
"cohere/command-a",
|
||||
"ai21/jamba-1.6",
|
||||
"mistralai/mistral",
|
||||
"mistralai/ministral",
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
import fs from "fs";
|
||||
import path from "path";
|
||||
|
||||
// Sets up the GLOBAL directory for testing - equivalent to ~/.continue
|
||||
// IMPORTANT: the CONTINUE_GLOBAL_DIR environment variable is used in utils/paths for getting all local paths
|
||||
export default async function () {
|
||||
process.env.CONTINUE_GLOBAL_DIR = path.join(__dirname, ".continue-test");
|
||||
if (fs.existsSync(process.env.CONTINUE_GLOBAL_DIR)) {
|
||||
fs.rmdirSync(process.env.CONTINUE_GLOBAL_DIR, { recursive: true });
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
import { TextDecoder, TextEncoder } from "util";
|
||||
|
||||
import fetch, { Request, Response } from "node-fetch";
|
||||
import { beforeAll } from "vitest";
|
||||
|
||||
beforeAll(() => {
|
||||
// @ts-ignore
|
||||
globalThis.fetch = fetch;
|
||||
// @ts-ignore
|
||||
globalThis.Request = Request;
|
||||
// @ts-ignore
|
||||
globalThis.Response = Response;
|
||||
globalThis.TextEncoder = TextEncoder;
|
||||
// @ts-ignore
|
||||
globalThis.TextDecoder = TextDecoder;
|
||||
});
|
|
@ -1,5 +1,5 @@
|
|||
import { parseMarkdownRule } from "@continuedev/config-yaml";
|
||||
import { jest } from "@jest/globals";
|
||||
import { parseMarkdownRule } from "../../config/markdown";
|
||||
import { createRuleBlockImpl } from "./createRuleBlock";
|
||||
|
||||
const mockIde = {
|
||||
|
@ -40,7 +40,6 @@ test("createRuleBlockImpl should create a rule with glob pattern", async () => {
|
|||
globs: "**/*.{ts,tsx}",
|
||||
});
|
||||
|
||||
expect(markdown).toContain("# TypeScript Rule");
|
||||
expect(markdown).toContain("Use interfaces for object shapes");
|
||||
});
|
||||
|
||||
|
@ -76,7 +75,6 @@ test("createRuleBlockImpl should create a rule with description pattern", async
|
|||
description: "This is a detailed explanation of the rule",
|
||||
});
|
||||
|
||||
expect(markdown).toContain("# Description Test");
|
||||
expect(markdown).toContain("This is the rule content");
|
||||
});
|
||||
|
||||
|
@ -100,7 +98,6 @@ test("createRuleBlockImpl should include both globs and description in frontmatt
|
|||
globs: "**/*.js",
|
||||
});
|
||||
|
||||
expect(markdown).toContain("# Complete Rule");
|
||||
expect(markdown).toContain("Follow this standard");
|
||||
});
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import { createRuleMarkdown } from "@continuedev/config-yaml";
|
||||
import { ToolImpl } from ".";
|
||||
import { RuleWithSource } from "../..";
|
||||
import { createRuleFilePath, createRuleMarkdown } from "../../config/markdown";
|
||||
import { createRuleFilePath } from "../../config/markdown/utils";
|
||||
|
||||
export type CreateRuleBlockArgs = Pick<
|
||||
Required<RuleWithSource>,
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import { ToolImpl } from ".";
|
||||
import { parseMarkdownRule } from "../../config/markdown/parseMarkdownRule";
|
||||
|
||||
export const requestRuleImpl: ToolImpl = async (args, extras) => {
|
||||
// Find the rule by name in the config
|
||||
|
@ -11,14 +10,11 @@ export const requestRuleImpl: ToolImpl = async (args, extras) => {
|
|||
);
|
||||
}
|
||||
|
||||
const fileContent = await extras.ide.readFile(rule.ruleFile);
|
||||
const { markdown, frontmatter } = parseMarkdownRule(fileContent);
|
||||
|
||||
return [
|
||||
{
|
||||
name: frontmatter.name ?? "",
|
||||
description: frontmatter.description ?? "",
|
||||
content: markdown,
|
||||
name: rule.name ?? "",
|
||||
description: rule.description ?? "",
|
||||
content: rule.rule,
|
||||
uri: {
|
||||
type: "file",
|
||||
value: rule.ruleFile,
|
||||
|
|
|
@ -10,6 +10,16 @@ import {
|
|||
|
||||
const asyncExec = util.promisify(childProcess.exec);
|
||||
|
||||
// Add color-supporting environment variables
|
||||
const getColorEnv = () => ({
|
||||
...process.env,
|
||||
FORCE_COLOR: "1",
|
||||
COLORTERM: "truecolor",
|
||||
TERM: "xterm-256color",
|
||||
CLICOLOR: "1",
|
||||
CLICOLOR_FORCE: "1",
|
||||
});
|
||||
|
||||
const ENABLED_FOR_REMOTES = [
|
||||
"",
|
||||
"local",
|
||||
|
@ -56,10 +66,11 @@ export const runTerminalCommandImpl: ToolImpl = async (args, extras) => {
|
|||
}
|
||||
}
|
||||
|
||||
// Use spawn instead of exec to get streaming output
|
||||
// Use spawn with color environment
|
||||
const childProc = childProcess.spawn(args.command, {
|
||||
cwd,
|
||||
shell: true,
|
||||
env: getColorEnv(), // Add enhanced environment for colors
|
||||
});
|
||||
|
||||
childProc.stdout?.on("data", (data) => {
|
||||
|
@ -130,26 +141,7 @@ export const runTerminalCommandImpl: ToolImpl = async (args, extras) => {
|
|||
return;
|
||||
}
|
||||
|
||||
if (!waitForCompletion) {
|
||||
// Already resolved, just update the UI with final output
|
||||
if (extras.onPartialOutput) {
|
||||
const status =
|
||||
code === 0 || !code
|
||||
? "\nBackground command completed"
|
||||
: `\nBackground command failed with exit code ${code}`;
|
||||
extras.onPartialOutput({
|
||||
toolCallId,
|
||||
contextItems: [
|
||||
{
|
||||
name: "Terminal",
|
||||
description: "Terminal command output",
|
||||
content: terminalOutput,
|
||||
status: status,
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (waitForCompletion) {
|
||||
// Normal completion, resolve now
|
||||
if (code === 0) {
|
||||
const status = "Command completed";
|
||||
|
@ -172,6 +164,25 @@ export const runTerminalCommandImpl: ToolImpl = async (args, extras) => {
|
|||
},
|
||||
]);
|
||||
}
|
||||
} else {
|
||||
// Already resolved, just update the UI with final output
|
||||
if (extras.onPartialOutput) {
|
||||
const status =
|
||||
code === 0 || !code
|
||||
? "\nBackground command completed"
|
||||
: `\nBackground command failed with exit code ${code}`;
|
||||
extras.onPartialOutput({
|
||||
toolCallId,
|
||||
contextItems: [
|
||||
{
|
||||
name: "Terminal",
|
||||
description: "Terminal command output",
|
||||
content: terminalOutput,
|
||||
status: status,
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -197,14 +208,43 @@ export const runTerminalCommandImpl: ToolImpl = async (args, extras) => {
|
|||
const workspaceDirs = await extras.ide.getWorkspaceDirs();
|
||||
const cwd = fileURLToPath(workspaceDirs[0]);
|
||||
|
||||
if (!waitForCompletion) {
|
||||
if (waitForCompletion) {
|
||||
// Standard execution, waiting for completion
|
||||
try {
|
||||
// Use color environment for exec as well
|
||||
const output = await asyncExec(args.command, {
|
||||
cwd,
|
||||
env: getColorEnv(),
|
||||
});
|
||||
const status = "Command completed";
|
||||
return [
|
||||
{
|
||||
name: "Terminal",
|
||||
description: "Terminal command output",
|
||||
content: output.stdout ?? "",
|
||||
status: status,
|
||||
},
|
||||
];
|
||||
} catch (error: any) {
|
||||
const status = `Command failed with: ${error.message || error.toString()}`;
|
||||
return [
|
||||
{
|
||||
name: "Terminal",
|
||||
description: "Terminal command output",
|
||||
content: error.stderr ?? error.toString(),
|
||||
status: status,
|
||||
},
|
||||
];
|
||||
}
|
||||
} else {
|
||||
// For non-streaming but also not waiting for completion, use spawn
|
||||
// but don't attach any listeners other than error
|
||||
try {
|
||||
// Use spawn instead of exec but don't wait
|
||||
// Use spawn with color environment
|
||||
const childProc = childProcess.spawn(args.command, {
|
||||
cwd,
|
||||
shell: true,
|
||||
env: getColorEnv(), // Add color environment
|
||||
// Detach the process so it's not tied to the parent
|
||||
detached: true,
|
||||
// Redirect to /dev/null equivalent (works cross-platform)
|
||||
|
@ -246,30 +286,6 @@ export const runTerminalCommandImpl: ToolImpl = async (args, extras) => {
|
|||
},
|
||||
];
|
||||
}
|
||||
} else {
|
||||
// Standard execution, waiting for completion
|
||||
try {
|
||||
const output = await asyncExec(args.command, { cwd });
|
||||
const status = "Command completed";
|
||||
return [
|
||||
{
|
||||
name: "Terminal",
|
||||
description: "Terminal command output",
|
||||
content: output.stdout ?? "",
|
||||
status: status,
|
||||
},
|
||||
];
|
||||
} catch (error: any) {
|
||||
const status = `Command failed with: ${error.message || error.toString()}`;
|
||||
return [
|
||||
{
|
||||
name: "Terminal",
|
||||
description: "Terminal command output",
|
||||
content: error.stderr ?? error.toString(),
|
||||
status: status,
|
||||
},
|
||||
];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
import { execSync } from "child_process";
|
||||
import { homedir } from "os";
|
||||
|
||||
/**
|
||||
* Gets the PATH env var from the user's login shell on non-Windows platforms.
|
||||
* Windows is not implemented primarily because it is not needed at the moment.
|
||||
* @returns The enhanced PATH from the user's shell, or the current PATH if it cannot be determined
|
||||
*/
|
||||
export function getEnvPathFromUserShell(): string | undefined {
|
||||
if (process.platform === "win32") {
|
||||
console.warn(`${getEnvPathFromUserShell.name} not implemented for Windows`);
|
||||
|
@ -16,7 +12,8 @@ export function getEnvPathFromUserShell(): string | undefined {
|
|||
}
|
||||
|
||||
try {
|
||||
const command = `${process.env.SHELL} -l -c 'echo $PATH'`;
|
||||
// Source common profile files
|
||||
const command = `${process.env.SHELL} -l -c 'for f in ~/.zprofile ~/.zshrc ~/.bash_profile ~/.bashrc; do [ -f "$f" ] && source "$f" 2>/dev/null; done; echo $PATH'`;
|
||||
|
||||
const stdout = execSync(command, {
|
||||
encoding: "utf8",
|
||||
|
|
|
@ -181,7 +181,7 @@ export async function getQueryForFile(
|
|||
}
|
||||
|
||||
const sourcePath = path.join(
|
||||
__dirname,
|
||||
process.env.NODE_ENV === "test" ? process.cwd() : __dirname,
|
||||
"..",
|
||||
...(process.env.NODE_ENV === "test"
|
||||
? ["extensions", "vscode", "tree-sitter"]
|
||||
|
@ -201,7 +201,7 @@ async function loadLanguageForFileExt(
|
|||
fileExtension: string,
|
||||
): Promise<Language> {
|
||||
const wasmPath = path.join(
|
||||
__dirname,
|
||||
process.env.NODE_ENV === "test" ? process.cwd() : __dirname,
|
||||
...(process.env.NODE_ENV === "test"
|
||||
? ["node_modules", "tree-sitter-wasms", "out"]
|
||||
: ["tree-sitter-wasms"]),
|
||||
|
|
|
@ -2,6 +2,13 @@ import { defineConfig } from "vitest/config";
|
|||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
testTransformMode: {
|
||||
web: ["/.[jt]s?$/"],
|
||||
ssr: ["/.[jt]s?$/"],
|
||||
},
|
||||
globalSetup: "./test/vitest.global-setup.ts",
|
||||
setupFiles: "./test/vitest.setup.ts",
|
||||
fileParallelism: false,
|
||||
include: ["**/*.vitest.ts"],
|
||||
},
|
||||
});
|
||||
|
|
|
@ -7,45 +7,175 @@ keywords: [tool, use, function calling, claude, automatic]
|
|||
import TabItem from "@theme/TabItem";
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
||||
## MCP
|
||||
# MCP
|
||||
|
||||
Currently custom tools can be configured using the [Model Context Protocol](https://modelcontextprotocol.io/introduction), a standard proposed by Anthropic to unify prompts, context, and tool use.
|
||||
As AI systems get better, they're still held back by their training data and
|
||||
can't access real-time information or specialized tools. The [Model Context
|
||||
Protocol](https://modelcontextprotocol.io/introduction) (MCP) fixes this by
|
||||
letting AI models connect with outside data sources, tools, and environments.
|
||||
This allows smooth sharing of information and abilities between AI systems and
|
||||
the wider digital world. This standard, created by Anthropic to bring together
|
||||
prompts, context, and tool use, is key for building truly useful AI experiences
|
||||
that can be set up with custom tools.
|
||||
|
||||
MCP Servers can be added to hub Assistants using `mcpServers` blocks. You can explore available MCP server blocks [here](https://hub.continue.dev/explore/mcp).
|
||||
## How it works
|
||||
|
||||
Currently custom tools can be configured using the Model Context
|
||||
Protocol standard to unify prompts, context, and tool use.
|
||||
|
||||
MCP Servers can be added to hub Assistants using `mcpServers` blocks. You can
|
||||
explore available MCP server blocks
|
||||
[here](https://hub.continue.dev/explore/mcp).
|
||||
|
||||
:::info
|
||||
MCP can only be used in the **agent** mode.
|
||||
:::
|
||||
|
||||
To set up your own MCP server, read the [MCP quickstart](https://modelcontextprotocol.io/quickstart) and then [create an `mcpServers` block](https://hub.continue.dev/new?type=block&blockType=mcpServers) or add the following to your [config file](./configuration.md):
|
||||
## Quick Start
|
||||
|
||||
<Tabs groupId="config-example">
|
||||
Below is a quick example of setting up a new MCP server for use in your assistant:
|
||||
|
||||
1. Create a folder called `.continue/mcpServers` at the top level of your workspace
|
||||
2. Add a file called `playwright-mcp.yaml` to this folder.
|
||||
3. Write the following contents to `playwright-mcp.yaml` and save.
|
||||
|
||||
```yaml title=".continue/mcpServers/playwright-mcp.yaml"
|
||||
name: Playwright mcpServer
|
||||
version: 0.0.1
|
||||
schema: v1
|
||||
mcpServers:
|
||||
- name: Browser search
|
||||
command: npx
|
||||
args:
|
||||
- "@playwright/mcp@latest"
|
||||
```
|
||||
|
||||
Now test your MCP server by prompting the following command:
|
||||
|
||||
```
|
||||
Open the browser and navigate Hacker News. Save the top 10 headlines in a hn.txt file.
|
||||
```
|
||||
|
||||
The result will be a generated file called `hn.txt` in the current working directory.
|
||||
|
||||

|
||||
|
||||
## Using MCP Servers
|
||||
|
||||
To set up your own MCP server, read the [MCP
|
||||
quickstart](https://modelcontextprotocol.io/quickstart) and then [create an
|
||||
`mcpServers`
|
||||
block](https://hub.continue.dev/new?type=block&blockType=mcpServers) or add a local MCP
|
||||
server block to your [config file](./configuration.md):
|
||||
|
||||
<Tabs groupId="config.yaml">
|
||||
<TabItem value="yaml" label="YAML">
|
||||
```yaml title="config.yaml"
|
||||
mcpServers:
|
||||
- name: My MCP Server
|
||||
command: uvx
|
||||
- name: SQLite MCP
|
||||
command: npx
|
||||
args:
|
||||
- mcp-server-sqlite
|
||||
- --db-path
|
||||
- /Users/NAME/test.db
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="json" label="JSON">
|
||||
```json title="config.json"
|
||||
{
|
||||
"experimental": {
|
||||
"modelContextProtocolServers": [
|
||||
{
|
||||
"transport": {
|
||||
"type": "stdio",
|
||||
"command": "uvx",
|
||||
"args": ["mcp-server-sqlite", "--db-path", "/Users/NAME/test.db"]
|
||||
}
|
||||
- "-y"
|
||||
- "mcp-sqlite"
|
||||
- "/path/to/your/database.db"
|
||||
|
||||
````
|
||||
</TabItem>
|
||||
<TabItem value="json" label="JSON">
|
||||
```json title="config.json"
|
||||
{
|
||||
"experimental": {
|
||||
"modelContextProtocolServers": [
|
||||
{
|
||||
"transport": {
|
||||
"type": "stdio",
|
||||
"command": "uvx",
|
||||
"args": ["mcp-server-sqlite", "--db-path", "/path/to/your/database.db"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
}
|
||||
````
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Syntax
|
||||
|
||||
MCP blocks follow the established syntax for blocks, with a few additional properties specific to MCP servers.
|
||||
|
||||
- `name` (**required**): A display name for the MCP server.
|
||||
- `command` (**required**): The command to run to start the MCP server.
|
||||
- `type` (optional): The type of the MCP server: `sse`, `stdio`, `streamable-http`
|
||||
- `args` (optional): Arguments to pass to the command.
|
||||
- `env` (optional): Secrets to be injected into the command as environment variables.
|
||||
|
||||
### Transport Types
|
||||
|
||||
MCP now supports remote server connections through HTTP-based transports, expanding beyond the traditional local stdio transport method. This enables integration with cloud-hosted MCP servers and distributed architectures.
|
||||
|
||||
#### Server-Sent Events (SSE) Transport
|
||||
|
||||
For real-time streaming communication, use the SSE transport:
|
||||
|
||||
```yaml
|
||||
mcpServers:
|
||||
- name: Name
|
||||
type: sse
|
||||
url: https://....
|
||||
```
|
||||
|
||||
#### Standard Input/Output (stdio)
|
||||
|
||||
For local MCP servers that communicate via standard input and output:
|
||||
|
||||
```yaml
|
||||
mcpServers:
|
||||
- name: Name
|
||||
type: stdio
|
||||
command: npx
|
||||
args:
|
||||
- "@modelcontextprotocol/server-sqlite"
|
||||
- "/path/to/your/database.db"
|
||||
```
|
||||
|
||||
#### Streamable HTTP Transport
|
||||
|
||||
For standard HTTP-based communication with streaming capabilities:
|
||||
|
||||
```yaml
|
||||
mcpServers:
|
||||
- name: Name
|
||||
type: streamable-http
|
||||
url: https://....
|
||||
```
|
||||
|
||||
These remote transport options allow you to connect to MCP servers hosted on remote infrastructure, enabling more flexible deployment architectures and shared server resources across multiple clients.
|
||||
|
||||
For detailed information about transport mechanisms and their use cases, refer to the official MCP documentation on [transports](https://modelcontextprotocol.io/docs/concepts/transports#server-sent-events-sse).
|
||||
|
||||
### Working with Secrets
|
||||
|
||||
With some MCP servers you will need to use API keys or other secrets. You can leverage locally stored environments secrets
|
||||
as well as access hosted secrets in the Continue Hub. To leverage Hub secrets, you can use the `inputs` property in your MCP env block instead of `secrets`.
|
||||
|
||||
```yaml
|
||||
mcpServers:
|
||||
- name: Supabase MCP
|
||||
command: npx
|
||||
args:
|
||||
- -y
|
||||
- "@supabase/mcp-server-supabase@latest"
|
||||
- --access-token
|
||||
- ${{ secrets.SUPABASE_TOKEN }}
|
||||
env:
|
||||
SUPABASE_TOKEN: ${{ secrets.SUPABASE_TOKEN }}
|
||||
- name: GitHub
|
||||
command: npx
|
||||
args:
|
||||
- "-y"
|
||||
- "@modelcontextprotocol/server-github"
|
||||
env:
|
||||
GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.GITHUB_PERSONAL_ACCESS_TOKEN }}
|
||||
```
|
||||
|
|
|
@ -7,15 +7,15 @@ Before using Cohere, visit the [Cohere dashboard](https://dashboard.cohere.com/a
|
|||
|
||||
## Chat model
|
||||
|
||||
We recommend configuring **Command-R Plus** as your chat model.
|
||||
We recommend configuring **Command A** as your chat model.
|
||||
|
||||
<Tabs groupId="config-example">
|
||||
<TabItem value="yaml" label="YAML">
|
||||
```yaml title="config.yaml"
|
||||
models:
|
||||
- name: Cohere
|
||||
- name: Command A 03-2025
|
||||
provider: cohere
|
||||
model: command-r-plus
|
||||
model: command-a-03-2025
|
||||
apiKey: <YOUR_COHERE_API_KEY>
|
||||
```
|
||||
</TabItem>
|
||||
|
@ -24,9 +24,9 @@ We recommend configuring **Command-R Plus** as your chat model.
|
|||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Cohere",
|
||||
"title": "Command A 03-2025",
|
||||
"provider": "cohere",
|
||||
"model": "command-r-plus",
|
||||
"model": "command-a-03-2025",
|
||||
"apiKey": "<YOUR_COHERE_API_KEY>"
|
||||
}
|
||||
]
|
||||
|
@ -43,15 +43,15 @@ Cohere currently does not offer any autocomplete models.
|
|||
|
||||
## Embeddings model
|
||||
|
||||
We recommend configuring **embed-english-v3.0** as your embeddings model.
|
||||
We recommend configuring **embed-v4.0** as your embeddings model.
|
||||
|
||||
<Tabs groupId="config-example">
|
||||
<TabItem value="yaml" label="YAML">
|
||||
```yaml title="config.yaml"
|
||||
models:
|
||||
- name: Cohere Embed
|
||||
- name: Cohere Embed v4.0
|
||||
provider: cohere
|
||||
model: embed-english-v3.0
|
||||
model: embed-v4.0
|
||||
apiKey: <YOUR_COHERE_API_KEY>
|
||||
roles:
|
||||
- embed
|
||||
|
@ -62,7 +62,7 @@ We recommend configuring **embed-english-v3.0** as your embeddings model.
|
|||
{
|
||||
"embeddingsProvider": {
|
||||
"provider": "cohere",
|
||||
"model": "embed-english-v3.0",
|
||||
"model": "embed-v4.0",
|
||||
"apiKey": "<YOUR_COHERE_API_KEY>"
|
||||
}
|
||||
}
|
||||
|
@ -72,15 +72,15 @@ We recommend configuring **embed-english-v3.0** as your embeddings model.
|
|||
|
||||
## Reranking model
|
||||
|
||||
We recommend configuring **rerank-english-v3.0** as your reranking model.
|
||||
We recommend configuring **rerank-v3.5** as your reranking model.
|
||||
|
||||
<Tabs groupId="config-example">
|
||||
<TabItem value="yaml" label="YAML">
|
||||
```yaml title="config.yaml"
|
||||
models:
|
||||
- name: Cohere Reranker
|
||||
- name: Cohere Rerank v3.5
|
||||
provider: cohere
|
||||
model: rerank-english-v3.0
|
||||
model: rerank-v3.5
|
||||
apiKey: <YOUR_COHERE_API_KEY>
|
||||
roles:
|
||||
- rerank
|
||||
|
@ -92,7 +92,7 @@ We recommend configuring **rerank-english-v3.0** as your reranking model.
|
|||
"reranker": {
|
||||
"name": "cohere",
|
||||
"params": {
|
||||
"model": "rerank-english-v3.0",
|
||||
"model": "rerank-v3.5",
|
||||
"apiKey": "<YOUR_COHERE_API_KEY>"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,6 +100,8 @@ We recommend configuring **Nomic Embed Text** as your embeddings model.
|
|||
|
||||
## Reranking model
|
||||
|
||||
Continue automatically handles vLLM's response format (which uses `results` instead of `data`).
|
||||
|
||||
[Click here](../../model-roles/reranking.mdx) to see a list of reranking model providers.
|
||||
|
||||
The continue implementation uses [OpenAI](../top-level/openai.mdx) under the hood. [View the source](https://github.com/continuedev/continue/blob/main/core/llm/llms/Vllm.ts)
|
||||
|
|
|
@ -12,15 +12,15 @@ You can get an API key from the [Anthropic console](https://console.anthropic.co
|
|||
|
||||
## Chat model
|
||||
|
||||
We recommend configuring **Claude 3.5 Sonnet** as your chat model.
|
||||
We recommend configuring **Claude 4 Sonnet** as your chat model.
|
||||
|
||||
<Tabs groupId="config-example">
|
||||
<TabItem value="yaml" label="YAML">
|
||||
```yaml title="config.yaml"
|
||||
models:
|
||||
- name: Claude 3.5 Sonnet
|
||||
- name: Claude 4 Sonnet
|
||||
provider: anthropic
|
||||
model: claude-3-5-sonnet-latest
|
||||
model: claude-sonnet-4-20250514
|
||||
apiKey: <YOUR_ANTHROPIC_API_KEY>
|
||||
```
|
||||
</TabItem>
|
||||
|
@ -29,9 +29,9 @@ We recommend configuring **Claude 3.5 Sonnet** as your chat model.
|
|||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Claude 3.5 Sonnet",
|
||||
"title": "Claude 4 Sonnet",
|
||||
"provider": "anthropic",
|
||||
"model": "claude-3-5-sonnet-latest",
|
||||
"model": "claude-sonnet-4-latest",
|
||||
"apiKey": "<YOUR_ANTHROPIC_API_KEY>"
|
||||
}
|
||||
]
|
||||
|
@ -60,9 +60,16 @@ Anthropic currently does not offer any reranking models.
|
|||
|
||||
## Prompt caching
|
||||
|
||||
Anthropic supports [prompt caching with Claude](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching).
|
||||
Anthropic supports [prompt caching with Claude](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching), which allows Claude models to cache system messages and conversation history between requests to improve performance and reduce costs.
|
||||
|
||||
To enable caching of the system message and the turn-by-turn conversation, update your your model configuration as following:
|
||||
Prompt caching is generally available for:
|
||||
|
||||
- Claude 4 Sonnet
|
||||
- Claude 3.7 Sonnet
|
||||
- Claude 3.5 Sonnet
|
||||
- Claude 3.5 Haiku
|
||||
|
||||
To enable caching of the system message and the turn-by-turn conversation, update your model configuration as follows:
|
||||
|
||||
<Tabs groupId="config-example">
|
||||
<TabItem value="yaml" label="YAML">
|
||||
|
@ -70,13 +77,12 @@ To enable caching of the system message and the turn-by-turn conversation, updat
|
|||
models:
|
||||
- name: Anthropic
|
||||
provider: anthropic
|
||||
model: claude-3-5-sonnet-latest
|
||||
model: claude-sonnet-4-20250514
|
||||
apiKey: <YOUR_ANTHROPIC_API_KEY>
|
||||
roles:
|
||||
- chat
|
||||
cacheBehavior:
|
||||
cacheSystemMessage: true
|
||||
cacheConversation: true
|
||||
defaultCompletionOptions:
|
||||
promptCaching: true
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="json" label="JSON">
|
||||
|
@ -90,7 +96,10 @@ To enable caching of the system message and the turn-by-turn conversation, updat
|
|||
},
|
||||
"title": "Anthropic",
|
||||
"provider": "anthropic",
|
||||
"model": "claude-3-5-sonnet-latest",
|
||||
"model": "claude-sonnet-4-latest",
|
||||
"defaultCompletionOptions": {
|
||||
"promptCaching": true
|
||||
},
|
||||
"apiKey": "<YOUR_ANTHROPIC_API_KEY>"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
title: Apply Role
|
||||
description: Apply model role
|
||||
keywords: [apply, model, role]
|
||||
sidebar_position: 4
|
||||
---
|
||||
|
||||
When editing code, Chat and Edit model output often doesn't clearly align with existing code. A model with the `apply` role is used to generate a more precise diff to apply changes to a file.
|
||||
|
|
|
@ -162,6 +162,12 @@ Most Continue features will work normally, including autocomplete and chat. Howe
|
|||
|
||||
For more details about this requirement, see the [LanceDB issue #2195](https://github.com/lancedb/lance/issues/2195).
|
||||
|
||||
### How do I reset the state of the extension?
|
||||
|
||||
Continue stores it's data in the `~/.continue` directory (%USERPROFILE%\.continue` on Windows).
|
||||
|
||||
If you'd like to perform a clean reset of the extension, including removing all configuration files, indices, etc, you can remove this directory, uninstall, and then reinstall.
|
||||
|
||||
## Still having trouble?
|
||||
|
||||
You can also join our Discord community [here](https://discord.gg/vapESyrFmJ) for additional support and discussions. Alternatively, you can create a GitHub issue [here](https://github.com/continuedev/continue/issues/new?assignees=&labels=bug&projects=&template=bug-report-%F0%9F%90%9B.md&title=), providing details of your problem, and we'll be able to help you out more quickly.
|
||||
|
|
|
@ -203,6 +203,14 @@ const config = {
|
|||
},
|
||||
}),
|
||||
plugins: [
|
||||
[
|
||||
"posthog-docusaurus",
|
||||
{
|
||||
apiKey: process.env.POSTHOG_API_KEY || "DEV",
|
||||
appUrl: "https://us.i.posthog.com",
|
||||
enableInDevelopment: false,
|
||||
},
|
||||
],
|
||||
[
|
||||
"@docusaurus/plugin-client-redirects",
|
||||
{
|
||||
|
|
|
@ -4,15 +4,15 @@
|
|||
|
||||
## 聊天模型
|
||||
|
||||
我们推荐配置 **Command-R Plus** 作为你的聊天模型。
|
||||
我们推荐配置 **Command-A** 作为你的聊天模型。
|
||||
|
||||
```json title="config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Cohere",
|
||||
"title": "Command A 03-2025",
|
||||
"provider": "cohere",
|
||||
"model": "command-r-plus",
|
||||
"model": "command-a-03-2025",
|
||||
"apiKey": "YOUR_API_KEY"
|
||||
}
|
||||
]
|
||||
|
@ -27,13 +27,13 @@ Cohere 当前没有提供任何自动补全模型。
|
|||
|
||||
## 嵌入模型
|
||||
|
||||
我们推荐配置 **embed-english-v3.0** 作为你的嵌入模型。
|
||||
我们推荐配置 **embed-v4.0** 作为你的嵌入模型。
|
||||
|
||||
```json title="config.json"
|
||||
{
|
||||
"embeddingsProvider": {
|
||||
"provider": "cohere",
|
||||
"model": "embed-english-v3.0",
|
||||
"model": "embed-v4.0",
|
||||
"apiKey": "<COHERE_API_KEY>"
|
||||
}
|
||||
}
|
||||
|
@ -41,14 +41,14 @@ Cohere 当前没有提供任何自动补全模型。
|
|||
|
||||
## 重排序模型
|
||||
|
||||
我们推荐配置 **rerank-english-v3.0** 作为你的重排序模型。
|
||||
我们推荐配置 **rerank-v3.5** 作为你的重排序模型。
|
||||
|
||||
```json title="config.json"
|
||||
{
|
||||
"embeddingsProvider": {
|
||||
"provider": "cohere",
|
||||
"params": {
|
||||
"model": "rerank-english-v3.0",
|
||||
"model": "rerank-v3.5",
|
||||
"apiKey": "<COHERE_API_KEY>"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
"@docusaurus/preset-classic": "^3.7.0",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.1.0",
|
||||
"posthog-docusaurus": "^2.0.4",
|
||||
"prism-react-renderer": "^2.3.1",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
|
@ -14990,6 +14991,15 @@
|
|||
"postcss": "^8.4.31"
|
||||
}
|
||||
},
|
||||
"node_modules/posthog-docusaurus": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/posthog-docusaurus/-/posthog-docusaurus-2.0.4.tgz",
|
||||
"integrity": "sha512-xnEVCBovSuvQvYXGny03CDTc0yZCl7O3Mz21sJpXmE1Gvs21gM33WzWaA9Cm6WvWGYZtQy8t8/g8OjCkmTWlXA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=10.15.1"
|
||||
}
|
||||
},
|
||||
"node_modules/pretty-error": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz",
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
"@docusaurus/preset-classic": "^3.7.0",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.1.0",
|
||||
"posthog-docusaurus": "^2.0.4",
|
||||
"prism-react-renderer": "^2.3.1",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 162 KiB |
|
@ -3,7 +3,7 @@ pluginGroup=com.github.continuedev.continueintellijextension
|
|||
pluginName=continue-intellij-extension
|
||||
pluginRepositoryUrl=https://github.com/continuedev/continue
|
||||
# SemVer format -> https://semver.org
|
||||
pluginVersion=1.0.22
|
||||
pluginVersion=1.0.24
|
||||
# Supported build number ranges and IntelliJ Platform versions -> https://plugins.jetbrains.com/docs/intellij/build-number-ranges.html
|
||||
pluginSinceBuild=223
|
||||
# IntelliJ Platform Properties -> https://plugins.jetbrains.com/docs/intellij/tools-gradle-intellij-plugin.html#configuration-intellij-extension
|
||||
|
|
|
@ -169,7 +169,11 @@ class ApplyToFileHandler(
|
|||
ide: IDE,
|
||||
params: ApplyToFileParams
|
||||
) {
|
||||
val editorUtils = EditorUtils.getOrOpenEditor(project, params.filepath)
|
||||
val editorUtils = if (EditorUtils.editorFileExist(params.filepath))
|
||||
EditorUtils.getOrOpenEditor(project, params.filepath)
|
||||
else
|
||||
EditorUtils.getEditorByCreateFile(project, params.filepath)
|
||||
|
||||
val diffStreamService = project.getService(DiffStreamService::class.java)
|
||||
|
||||
val handler = ApplyToFileHandler(
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.github.continuedev.continueintellijextension.editor
|
|||
|
||||
import com.github.continuedev.continueintellijextension.utils.toUriOrNull
|
||||
import com.intellij.openapi.application.ApplicationManager
|
||||
import com.intellij.openapi.application.runWriteAction
|
||||
import com.intellij.openapi.command.WriteCommandAction
|
||||
import com.intellij.openapi.editor.Editor
|
||||
import com.intellij.openapi.editor.SelectionModel
|
||||
|
@ -14,6 +15,8 @@ import com.intellij.openapi.util.TextRange
|
|||
import com.intellij.openapi.vfs.VirtualFileManager
|
||||
import com.intellij.ui.JBColor
|
||||
import com.intellij.openapi.editor.ScrollType
|
||||
import com.intellij.openapi.vfs.LocalFileSystem
|
||||
import java.io.File
|
||||
|
||||
/**
|
||||
* Utility class for working with Editor instances.
|
||||
|
@ -166,5 +169,38 @@ class EditorUtils(val editor: Editor) {
|
|||
val editor = FileEditorManager.getInstance(project).selectedTextEditor ?: return null
|
||||
return EditorUtils(editor)
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets editor for the filepath is existed
|
||||
*/
|
||||
fun editorFileExist(filepath: String?): Boolean {
|
||||
return !filepath.isNullOrEmpty() && VirtualFileManager.getInstance().findFileByUrl(filepath) != null
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets editor for the no exist filepath and returns an EditorUtils instance
|
||||
*/
|
||||
fun getEditorByCreateFile(project: Project, filepath: String?): EditorUtils? {
|
||||
if (!filepath.isNullOrEmpty()) {
|
||||
// file not exists and create
|
||||
val ioFile = File(filepath.replace("file:///", ""))
|
||||
if (!ioFile.exists()) {
|
||||
ioFile.parentFile?.mkdirs()
|
||||
ioFile.createNewFile()
|
||||
}
|
||||
// IntelliJ refresh VirtualFile
|
||||
val virtualFile = LocalFileSystem.getInstance().refreshAndFindFileByIoFile(ioFile)
|
||||
if (virtualFile != null) {
|
||||
ApplicationManager.getApplication().invokeAndWait {
|
||||
runWriteAction {
|
||||
FileEditorManager.getInstance(project).openFile(virtualFile, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val editor = FileEditorManager.getInstance(project).selectedTextEditor ?: return null
|
||||
return EditorUtils(editor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
"extends": ["../../.eslintrc.shared.json"],
|
||||
"rules": {
|
||||
"@typescript-eslint/naming-convention": "off",
|
||||
"@typescript-eslint/no-floating-promises": "warn"
|
||||
"@typescript-eslint/no-floating-promises": "warn",
|
||||
"@typescript-eslint/no-misused-promises": "warn"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -868,7 +868,21 @@
|
|||
"then": {
|
||||
"properties": {
|
||||
"model": {
|
||||
"enum": ["command-r", "command-r-plus"]
|
||||
"enum": [
|
||||
"c4ai-aya-expanse-8b",
|
||||
"c4ai-aya-expanse-32b",
|
||||
"c4ai-aya-vision-8b",
|
||||
"c4ai-aya-vision-32b",
|
||||
"command-r",
|
||||
"command-r-plus",
|
||||
"command-r-03-2024",
|
||||
"command-r-plus-04-2024",
|
||||
"command-r-08-2024",
|
||||
"command-r-plus-08-2024",
|
||||
"command-r7b-12-2024",
|
||||
"command-r7b-arabic-02-2025",
|
||||
"command-a-03-2025"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,7 @@
|
|||
"name": "continue",
|
||||
"icon": "media/icon.png",
|
||||
"author": "Continue Dev, Inc",
|
||||
"version": "1.1.47",
|
||||
"version": "1.1.49",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/continuedev/continue"
|
||||
|
@ -624,6 +624,7 @@
|
|||
"lint:fix": "eslint . --ext ts --fix",
|
||||
"build-test": "npm run esbuild && node esbuild.test.mjs",
|
||||
"test": "npm run build-test && node ./out/runTestOnVSCodeHost.js",
|
||||
"vitest": "vitest run",
|
||||
"quick-test": "npm run build-test && node ./out/runTestOnVSCodeHost.js",
|
||||
"write-build-timestamp": "node scripts/write-build-timestamp.js",
|
||||
"prepackage": "node scripts/prepackage.js",
|
||||
|
@ -732,8 +733,14 @@
|
|||
"uuid": "^9.0.1",
|
||||
"uuidv4": "^6.2.13",
|
||||
"vectordb": "^0.4.20",
|
||||
"vitest": "^3.1.4",
|
||||
"vscode-languageclient": "^8.0.2",
|
||||
"ws": "^8.13.0",
|
||||
"yarn": "^1.22.21"
|
||||
},
|
||||
"overrides": {
|
||||
"vitest": {
|
||||
"@types/node": "^16.18.119"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,8 +76,10 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
string,
|
||||
{ promise: Promise<string>; cancel: EventEmitter<void> }
|
||||
>();
|
||||
private _refreshInterval: NodeJS.Timeout | null = null;
|
||||
|
||||
private static EXPIRATION_TIME_MS = 1000 * 60 * 15; // 15 minutes
|
||||
private static REFRESH_INTERVAL_MS = 1000 * 60 * 10; // 10 minutes
|
||||
|
||||
private secretStorage: SecretStorage;
|
||||
|
||||
|
@ -96,6 +98,14 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
);
|
||||
|
||||
this.secretStorage = new SecretStorage(context);
|
||||
|
||||
// Immediately refresh any existing sessions
|
||||
this.refreshSessions();
|
||||
|
||||
// Set up a regular interval to refresh tokens
|
||||
this._refreshInterval = setInterval(() => {
|
||||
this.refreshSessions();
|
||||
}, WorkOsAuthProvider.REFRESH_INTERVAL_MS);
|
||||
}
|
||||
|
||||
private decodeJwt(jwt: string): Record<string, any> | null {
|
||||
|
@ -110,16 +120,6 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
}
|
||||
}
|
||||
|
||||
private getExpirationTimeMs(jwt: string): number {
|
||||
const decodedToken = this.decodeJwt(jwt);
|
||||
if (!decodedToken) {
|
||||
return WorkOsAuthProvider.EXPIRATION_TIME_MS;
|
||||
}
|
||||
return decodedToken.exp && decodedToken.iat
|
||||
? (decodedToken.exp - decodedToken.iat) * 1000
|
||||
: WorkOsAuthProvider.EXPIRATION_TIME_MS;
|
||||
}
|
||||
|
||||
private jwtIsExpiredOrInvalid(jwt: string): boolean {
|
||||
const decodedToken = this.decodeJwt(jwt);
|
||||
if (!decodedToken) {
|
||||
|
@ -128,13 +128,14 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
return decodedToken.exp * 1000 < Date.now();
|
||||
}
|
||||
|
||||
private async debugAccessTokenValidity(jwt: string, refreshToken: string) {
|
||||
const expiredOrInvalid = this.jwtIsExpiredOrInvalid(jwt);
|
||||
if (expiredOrInvalid) {
|
||||
console.debug("Invalid JWT");
|
||||
} else {
|
||||
console.debug("Valid JWT");
|
||||
private getExpirationTimeMs(jwt: string): number {
|
||||
const decodedToken = this.decodeJwt(jwt);
|
||||
if (!decodedToken) {
|
||||
return WorkOsAuthProvider.EXPIRATION_TIME_MS;
|
||||
}
|
||||
return decodedToken.exp && decodedToken.iat
|
||||
? (decodedToken.exp - decodedToken.iat) * 1000
|
||||
: WorkOsAuthProvider.EXPIRATION_TIME_MS;
|
||||
}
|
||||
|
||||
private async storeSessions(value: ContinueAuthenticationSession[]) {
|
||||
|
@ -197,7 +198,12 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
const finalSessions = [];
|
||||
for (const session of sessions) {
|
||||
try {
|
||||
const newSession = await this._refreshSession(session.refreshToken);
|
||||
// For expired tokens, don't use retries - if refresh fails, we drop the session
|
||||
const refreshMethod = this.jwtIsExpiredOrInvalid(session.accessToken)
|
||||
? this._refreshSession.bind(this) // Direct refresh for expired tokens
|
||||
: this._refreshSessionWithRetry.bind(this); // Retry for valid tokens
|
||||
|
||||
const newSession = await refreshMethod(session.refreshToken);
|
||||
finalSessions.push({
|
||||
...session,
|
||||
accessToken: newSession.accessToken,
|
||||
|
@ -205,36 +211,65 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
expiresInMs: newSession.expiresInMs,
|
||||
});
|
||||
} catch (e: any) {
|
||||
// If the refresh token doesn't work, we just drop the session
|
||||
// If refresh fails (after retries for valid tokens), drop the session
|
||||
console.debug(`Error refreshing session token: ${e.message}`);
|
||||
await this.debugAccessTokenValidity(
|
||||
session.accessToken,
|
||||
session.refreshToken,
|
||||
);
|
||||
this._sessionChangeEmitter.fire({
|
||||
added: [],
|
||||
removed: [session],
|
||||
changed: [],
|
||||
});
|
||||
// We don't need to refresh the sessions again, since we'll get a new one when we need it
|
||||
// setTimeout(() => this._refreshSessions(), 60 * 1000);
|
||||
// return;
|
||||
}
|
||||
}
|
||||
|
||||
await this.storeSessions(finalSessions);
|
||||
this._sessionChangeEmitter.fire({
|
||||
added: [],
|
||||
removed: [],
|
||||
changed: finalSessions,
|
||||
});
|
||||
}
|
||||
|
||||
if (finalSessions[0]?.expiresInMs) {
|
||||
setTimeout(
|
||||
async () => {
|
||||
await this._refreshSessions();
|
||||
},
|
||||
(finalSessions[0].expiresInMs * 2) / 3,
|
||||
);
|
||||
private async _refreshSessionWithRetry(
|
||||
refreshToken: string,
|
||||
attempt = 1,
|
||||
maxAttempts = 3,
|
||||
baseDelay = 1000,
|
||||
): Promise<{
|
||||
accessToken: string;
|
||||
refreshToken: string;
|
||||
expiresInMs: number;
|
||||
}> {
|
||||
try {
|
||||
return await this._refreshSession(refreshToken);
|
||||
} catch (error: any) {
|
||||
// Don't retry for auth errors (401 Unauthorized) or if we've reached max attempts
|
||||
if (
|
||||
error.message?.includes("401") ||
|
||||
error.message?.includes("Invalid refresh token") ||
|
||||
error.message?.includes("Unauthorized") ||
|
||||
attempt >= maxAttempts
|
||||
) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// For network errors or transient server issues, retry with backoff
|
||||
// Calculate exponential backoff delay with jitter
|
||||
const delay =
|
||||
baseDelay * Math.pow(2, attempt - 1) * (0.5 + Math.random() * 0.5);
|
||||
|
||||
// Schedule retry after delay
|
||||
return new Promise((resolve, reject) => {
|
||||
setTimeout(() => {
|
||||
this._refreshSessionWithRetry(
|
||||
refreshToken,
|
||||
attempt + 1,
|
||||
maxAttempts,
|
||||
baseDelay,
|
||||
)
|
||||
.then(resolve)
|
||||
.catch(reject);
|
||||
}, delay);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -323,11 +358,6 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
changed: [],
|
||||
});
|
||||
|
||||
setTimeout(
|
||||
() => this._refreshSessions(),
|
||||
(this.getExpirationTimeMs(session.accessToken) * 2) / 3,
|
||||
);
|
||||
|
||||
return session;
|
||||
} catch (e) {
|
||||
window.showErrorMessage(`Sign in failed: ${e}`);
|
||||
|
@ -360,6 +390,10 @@ export class WorkOsAuthProvider implements AuthenticationProvider, Disposable {
|
|||
* Dispose the registered services
|
||||
*/
|
||||
public async dispose() {
|
||||
if (this._refreshInterval) {
|
||||
clearInterval(this._refreshInterval);
|
||||
this._refreshInterval = null;
|
||||
}
|
||||
this._disposable.dispose();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,805 @@
|
|||
// @ts-nocheck
|
||||
import fetch from "node-fetch";
|
||||
import { afterEach, beforeEach, expect, it, vi } from "vitest";
|
||||
import { EventEmitter } from "vscode";
|
||||
|
||||
// Don't import WorkOsAuthProvider directly here
|
||||
|
||||
// Mock the modules we need
|
||||
vi.mock("vscode", () => ({
|
||||
authentication: {
|
||||
registerAuthenticationProvider: vi.fn(),
|
||||
},
|
||||
window: {
|
||||
registerUriHandler: vi.fn(),
|
||||
},
|
||||
EventEmitter: vi.fn(() => ({
|
||||
event: { dispose: vi.fn() },
|
||||
fire: vi.fn(),
|
||||
})),
|
||||
Disposable: {
|
||||
from: vi.fn(() => ({ dispose: vi.fn() })),
|
||||
},
|
||||
env: {
|
||||
uriScheme: "vscode",
|
||||
},
|
||||
}));
|
||||
|
||||
// Properly mock node-fetch
|
||||
vi.mock("node-fetch", () => {
|
||||
return {
|
||||
__esModule: true,
|
||||
default: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("core/control-plane/env", () => ({
|
||||
getControlPlaneEnvSync: vi.fn(() => ({
|
||||
AUTH_TYPE: "workos",
|
||||
APP_URL: "https://continue.dev",
|
||||
CONTROL_PLANE_URL: "https://api.continue.dev",
|
||||
WORKOS_CLIENT_ID: "client_123",
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("crypto", () => ({
|
||||
createHash: vi.fn(() => ({
|
||||
update: vi.fn(() => ({
|
||||
digest: vi.fn(() => Buffer.from("test-hash")),
|
||||
})),
|
||||
})),
|
||||
}));
|
||||
|
||||
// Create a simple SecretStorage mock that we can control
|
||||
const mockSecretStorageGet = vi.fn();
|
||||
const mockSecretStorageStore = vi.fn();
|
||||
|
||||
// Mock SecretStorage class
|
||||
vi.mock("./SecretStorage", () => {
|
||||
return {
|
||||
SecretStorage: vi.fn().mockImplementation(() => ({
|
||||
store: mockSecretStorageStore,
|
||||
get: mockSecretStorageGet,
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
// Helper to create valid and expired JWTs
|
||||
function createJwt({ expired }: { expired: boolean }): string {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const header = { alg: "HS256", typ: "JWT" };
|
||||
const payload = {
|
||||
sub: "user123",
|
||||
iat: now,
|
||||
exp: expired ? now - 3600 : now + 3600, // Expired 1 hour ago or valid for 1 hour
|
||||
};
|
||||
|
||||
const base64Header = Buffer.from(JSON.stringify(header))
|
||||
.toString("base64")
|
||||
.replace(/=/g, "");
|
||||
const base64Payload = Buffer.from(JSON.stringify(payload))
|
||||
.toString("base64")
|
||||
.replace(/=/g, "");
|
||||
const signature = "dummysignature";
|
||||
|
||||
return `${base64Header}.${base64Payload}.${signature}`;
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
// Set up fake timers before each test
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.clearAllTimers();
|
||||
vi.useRealTimers(); // Restore real timers after each test
|
||||
});
|
||||
|
||||
it("should refresh tokens on initialization when sessions exist", async () => {
|
||||
// Mock setInterval to prevent the refresh interval
|
||||
const originalSetInterval = global.setInterval;
|
||||
global.setInterval = vi.fn().mockReturnValue(123 as any);
|
||||
|
||||
// Setup existing sessions with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const mockSession = {
|
||||
id: "test-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "refresh-token",
|
||||
expiresInMs: 3600000, // 1 hour
|
||||
account: { label: "Test User", id: "user@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Setup successful token refresh
|
||||
fetchMock.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
accessToken: createJwt({ expired: false }),
|
||||
refreshToken: "new-refresh-token",
|
||||
}),
|
||||
text: async () => "",
|
||||
});
|
||||
|
||||
// Create a mock UriHandler
|
||||
const mockUriHandler = {
|
||||
event: new EventEmitter(),
|
||||
handleCallback: vi.fn(),
|
||||
};
|
||||
|
||||
// Create a mock ExtensionContext
|
||||
const mockContext = {
|
||||
secrets: {
|
||||
store: vi.fn(),
|
||||
get: vi.fn(),
|
||||
},
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Set up our SecretStorage mock to return the session
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([mockSession]));
|
||||
|
||||
// Import WorkOsAuthProvider after setting up all mocks
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
|
||||
// Create provider instance - this will automatically call refreshSessions
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for all promises to resolve, including any nested promise chains
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify that the token refresh endpoint was called
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.any(URL),
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: expect.stringContaining("refresh-token"),
|
||||
}),
|
||||
);
|
||||
|
||||
// Restore setInterval
|
||||
global.setInterval = originalSetInterval;
|
||||
|
||||
// Clean up
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should not remove sessions during transient network errors", async () => {
|
||||
// Setup existing sessions with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const mockSession = {
|
||||
id: "test-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "refresh-token",
|
||||
expiresInMs: 300000, // 5 minutes
|
||||
account: { label: "Test User", id: "user@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// First refresh attempt fails with network error
|
||||
fetchMock.mockRejectedValueOnce(new Error("Network error"));
|
||||
|
||||
// Second refresh attempt should succeed
|
||||
fetchMock.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
accessToken: createJwt({ expired: false }),
|
||||
refreshToken: "new-refresh-token",
|
||||
}),
|
||||
text: async () => "",
|
||||
});
|
||||
|
||||
// Create a mock UriHandler
|
||||
const mockUriHandler = {
|
||||
event: new EventEmitter(),
|
||||
handleCallback: vi.fn(),
|
||||
};
|
||||
|
||||
// Create a mock ExtensionContext
|
||||
const mockContext = {
|
||||
secrets: {
|
||||
store: vi.fn(),
|
||||
get: vi.fn(),
|
||||
},
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Set up our SecretStorage mock to return the session
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([mockSession]));
|
||||
|
||||
// Import WorkOsAuthProvider after setting up all mocks
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
|
||||
// Allow setInterval to work normally with fake timers
|
||||
// We're not mocking it anymore, instead we'll control when it fires
|
||||
|
||||
// Create provider instance - this will automatically call refreshSessions with the network error
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Run microtasks to process promises from the initial refresh call
|
||||
await Promise.resolve();
|
||||
|
||||
// Check that sessions were not cleared after network error
|
||||
expect(mockSecretStorageStore).not.toHaveBeenCalledWith(
|
||||
expect.anything(),
|
||||
expect.stringMatching(/\[\]/),
|
||||
);
|
||||
|
||||
// Reset the fetch mock call count to verify the next call
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Advance timers to the next refresh interval to simulate the timer firing
|
||||
vi.advanceTimersByTime(WorkOsAuthProvider.REFRESH_INTERVAL_MS);
|
||||
|
||||
// Run microtasks to process promises from the interval-triggered refresh
|
||||
await Promise.resolve();
|
||||
|
||||
// Verify the second attempt was made via the interval
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.any(URL),
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
headers: expect.objectContaining({
|
||||
"Content-Type": "application/json",
|
||||
}),
|
||||
body: expect.stringContaining("refresh-token"),
|
||||
}),
|
||||
);
|
||||
|
||||
// Clean up
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should refresh tokens at regular intervals rather than based on expiration", async () => {
|
||||
// Setup existing sessions with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const mockSession = {
|
||||
id: "test-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "refresh-token",
|
||||
expiresInMs: 3600000, // 1 hour
|
||||
account: { label: "Test User", id: "user@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Setup successful token refresh responses for multiple calls
|
||||
fetchMock.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
accessToken: createJwt({ expired: false }),
|
||||
refreshToken: "new-refresh-token",
|
||||
}),
|
||||
text: async () => "",
|
||||
});
|
||||
|
||||
// Create a mock UriHandler
|
||||
const mockUriHandler = {
|
||||
event: new EventEmitter(),
|
||||
handleCallback: vi.fn(),
|
||||
};
|
||||
|
||||
// Create a mock ExtensionContext
|
||||
const mockContext = {
|
||||
secrets: {
|
||||
store: vi.fn(),
|
||||
get: vi.fn(),
|
||||
},
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Set up our SecretStorage mock to return the session
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([mockSession]));
|
||||
|
||||
// Import WorkOsAuthProvider after setting up all mocks
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
|
||||
// Capture the original setInterval to restore it later
|
||||
const originalSetInterval = global.setInterval;
|
||||
|
||||
// Create our own implementation of setInterval that we can control better
|
||||
let intervalCallback: Function;
|
||||
global.setInterval = vi.fn((callback, ms) => {
|
||||
intervalCallback = callback;
|
||||
return 123 as any; // Return a dummy interval ID
|
||||
});
|
||||
|
||||
// Create provider instance - this will automatically call refreshSessions
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for all promises to resolve, including any nested promise chains
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// First refresh should happen immediately on initialization
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Verify that setInterval was called to set up regular refreshes
|
||||
expect(global.setInterval).toHaveBeenCalled();
|
||||
|
||||
// Get the interval time from the call to setInterval
|
||||
const intervalTime = (global.setInterval as any).mock.calls[0][1];
|
||||
|
||||
// Should be a reasonable interval (less than the expiration time)
|
||||
expect(intervalTime).toBeLessThan(mockSession.expiresInMs);
|
||||
|
||||
// Now manually trigger the interval callback - First interval
|
||||
intervalCallback();
|
||||
|
||||
// Wait for all promises to resolve
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify that refresh was called again when the interval callback fired
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Check that we're making refresh calls to the right endpoint with the right data
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ pathname: "/auth/refresh" }),
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
headers: expect.objectContaining({
|
||||
"Content-Type": "application/json",
|
||||
}),
|
||||
body: expect.stringContaining("refresh-token"),
|
||||
}),
|
||||
);
|
||||
|
||||
// Clear mock calls for the second interval test
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Trigger the callback again - Second interval
|
||||
intervalCallback();
|
||||
|
||||
// Wait for all promises to resolve
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify the refresh was called a second time
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Verify the second call has the same correct parameters
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ pathname: "/auth/refresh" }),
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
headers: expect.objectContaining({
|
||||
"Content-Type": "application/json",
|
||||
}),
|
||||
body: expect.stringContaining("refresh-token"),
|
||||
}),
|
||||
);
|
||||
|
||||
// Restore the original setInterval
|
||||
global.setInterval = originalSetInterval;
|
||||
|
||||
// Clean up
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should remove session if token refresh fails with authentication error", async () => {
|
||||
// Setup existing sessions with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const mockSession = {
|
||||
id: "test-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "invalid-refresh-token",
|
||||
expiresInMs: 300000, // 5 minutes
|
||||
account: { label: "Test User", id: "user@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Setup refresh to fail with 401 unauthorized
|
||||
fetchMock.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 401,
|
||||
text: async () => "Invalid refresh token",
|
||||
});
|
||||
|
||||
// Create a mock UriHandler
|
||||
const mockUriHandler = {
|
||||
event: new EventEmitter(),
|
||||
handleCallback: vi.fn(),
|
||||
};
|
||||
|
||||
// Create a mock ExtensionContext
|
||||
const mockContext = {
|
||||
secrets: {
|
||||
store: vi.fn(),
|
||||
get: vi.fn(),
|
||||
},
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Mock setInterval to prevent continuous refreshes
|
||||
const originalSetInterval = global.setInterval;
|
||||
global.setInterval = vi.fn().mockReturnValue(123 as any);
|
||||
|
||||
// Set up our SecretStorage mock to return the session
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([mockSession]));
|
||||
mockSecretStorageStore.mockClear();
|
||||
|
||||
// Import WorkOsAuthProvider after setting up all mocks
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
|
||||
// Create provider instance - this will automatically call refreshSessions
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for all promises to resolve, including any nested promise chains
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify that the token refresh endpoint was called
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.any(URL),
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: expect.stringContaining("invalid-refresh-token"),
|
||||
}),
|
||||
);
|
||||
|
||||
// Verify sessions were removed due to auth error
|
||||
expect(mockSecretStorageStore).toHaveBeenCalledWith(
|
||||
"workos.sessions", // Use the hard-coded key that matches our mock
|
||||
expect.stringMatching(/\[\]/),
|
||||
);
|
||||
|
||||
// Restore setInterval
|
||||
global.setInterval = originalSetInterval;
|
||||
|
||||
// Clean up
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should remove session if token refresh returns Unauthorized error message", async () => {
|
||||
// Setup existing sessions with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const mockSession = {
|
||||
id: "test-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "invalid-refresh-token",
|
||||
expiresInMs: 300000, // 5 minutes
|
||||
account: { label: "Test User", id: "user@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Setup refresh to return an error containing "Unauthorized" in the message
|
||||
// Status code doesn't matter here, what matters is the error message text
|
||||
fetchMock.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 403, // Could be any error code
|
||||
text: async () => "Unauthorized",
|
||||
});
|
||||
|
||||
// Create a mock UriHandler
|
||||
const mockUriHandler = {
|
||||
event: new EventEmitter(),
|
||||
handleCallback: vi.fn(),
|
||||
};
|
||||
|
||||
// Create a mock ExtensionContext
|
||||
const mockContext = {
|
||||
secrets: {
|
||||
store: vi.fn(),
|
||||
get: vi.fn(),
|
||||
},
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Mock setInterval to prevent continuous refreshes
|
||||
const originalSetInterval = global.setInterval;
|
||||
global.setInterval = vi.fn().mockReturnValue(123 as any);
|
||||
|
||||
// Set up our SecretStorage mock to return the session
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([mockSession]));
|
||||
mockSecretStorageStore.mockClear();
|
||||
|
||||
// Import WorkOsAuthProvider after setting up all mocks
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
|
||||
// Create provider instance - this will automatically call refreshSessions
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for all promises to resolve, including any nested promise chains
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify that the token refresh endpoint was called
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
expect.any(URL),
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: expect.stringContaining("invalid-refresh-token"),
|
||||
}),
|
||||
);
|
||||
|
||||
// Verify sessions were removed due to Unauthorized error message
|
||||
expect(mockSecretStorageStore).toHaveBeenCalledWith(
|
||||
"workos.sessions", // Use the hard-coded key that matches our mock
|
||||
expect.stringMatching(/\[\]/),
|
||||
);
|
||||
|
||||
// Restore setInterval
|
||||
global.setInterval = originalSetInterval;
|
||||
|
||||
// Clean up
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should preserve valid tokens during network errors by retrying", async () => {
|
||||
// Mock Date.now to return a fixed timestamp for token validation
|
||||
const originalDateNow = Date.now;
|
||||
const currentTimestamp = Date.now();
|
||||
Date.now = vi.fn(() => currentTimestamp);
|
||||
|
||||
// Setup with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const validSession = {
|
||||
id: "valid-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "valid-refresh-token",
|
||||
expiresInMs: 3600000,
|
||||
account: { label: "Valid User", id: "valid@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Create mock objects
|
||||
const mockUriHandler = { event: new EventEmitter(), handleCallback: vi.fn() };
|
||||
const mockContext = {
|
||||
secrets: { store: vi.fn(), get: vi.fn() },
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Mock setInterval and setTimeout
|
||||
const originalSetInterval = global.setInterval;
|
||||
global.setInterval = vi.fn().mockReturnValue(123 as any);
|
||||
|
||||
const originalSetTimeout = global.setTimeout;
|
||||
global.setTimeout = vi.fn((callback) => {
|
||||
callback();
|
||||
return 123 as any;
|
||||
});
|
||||
|
||||
// Network error followed by success
|
||||
fetchMock.mockRejectedValueOnce(new Error("Network error"));
|
||||
fetchMock.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
accessToken: createJwt({ expired: false }),
|
||||
refreshToken: "new-refresh-token",
|
||||
}),
|
||||
text: async () => "",
|
||||
});
|
||||
|
||||
// Setup storage
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([validSession]));
|
||||
mockSecretStorageStore.mockClear();
|
||||
|
||||
// Import and create provider
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for promises to resolve
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Check that a non-empty session array was stored (session was preserved)
|
||||
const storeCall = mockSecretStorageStore.mock.calls[0];
|
||||
expect(storeCall[0]).toBe("workos.sessions");
|
||||
expect(JSON.parse(storeCall[1])).toHaveLength(1); // Should contain one session
|
||||
|
||||
// Restore originals
|
||||
global.setTimeout = originalSetTimeout;
|
||||
global.setInterval = originalSetInterval;
|
||||
Date.now = originalDateNow;
|
||||
|
||||
// Clean up provider
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should remove expired tokens when refresh fails", async () => {
|
||||
// Mock Date.now to return a time that makes tokens appear expired
|
||||
const originalDateNow = Date.now;
|
||||
const futureTime = Date.now() + 7200000; // 2 hours in the future
|
||||
Date.now = vi.fn(() => futureTime);
|
||||
|
||||
// Setup with an expired token
|
||||
const expiredToken = createJwt({ expired: true });
|
||||
const expiredSession = {
|
||||
id: "expired-id",
|
||||
accessToken: expiredToken,
|
||||
refreshToken: "expired-refresh-token",
|
||||
expiresInMs: 3600000,
|
||||
account: { label: "Expired User", id: "expired@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Create mock objects
|
||||
const mockUriHandler = { event: new EventEmitter(), handleCallback: vi.fn() };
|
||||
const mockContext = {
|
||||
secrets: { store: vi.fn(), get: vi.fn() },
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Mock setInterval
|
||||
const originalSetInterval = global.setInterval;
|
||||
global.setInterval = vi.fn().mockReturnValue(123 as any);
|
||||
|
||||
// Refresh will fail with network error
|
||||
fetchMock.mockRejectedValueOnce(new Error("Network error"));
|
||||
|
||||
// Setup storage
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([expiredSession]));
|
||||
mockSecretStorageStore.mockClear();
|
||||
|
||||
// Import and create provider
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for promises to resolve
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Check that an empty session array was stored (session was removed)
|
||||
const storeCall = mockSecretStorageStore.mock.calls[0];
|
||||
expect(storeCall[0]).toBe("workos.sessions");
|
||||
expect(JSON.parse(storeCall[1])).toHaveLength(0); // Should be empty
|
||||
|
||||
// Restore originals
|
||||
global.setInterval = originalSetInterval;
|
||||
Date.now = originalDateNow;
|
||||
|
||||
// Clean up provider
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
||||
|
||||
it("should implement exponential backoff for failed refresh attempts", async () => {
|
||||
// Setup existing sessions with a valid token
|
||||
const validToken = createJwt({ expired: false });
|
||||
const mockSession = {
|
||||
id: "test-id",
|
||||
accessToken: validToken,
|
||||
refreshToken: "refresh-token",
|
||||
expiresInMs: 300000, // 5 minutes
|
||||
account: { label: "Test User", id: "user@example.com" },
|
||||
scopes: [],
|
||||
loginNeeded: false,
|
||||
};
|
||||
|
||||
// Setup fetch mock
|
||||
const fetchMock = fetch as any;
|
||||
fetchMock.mockClear();
|
||||
|
||||
// Setup repeated network errors followed by success
|
||||
fetchMock.mockRejectedValueOnce(new Error("Network error 1"));
|
||||
fetchMock.mockRejectedValueOnce(new Error("Network error 2"));
|
||||
fetchMock.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
accessToken: createJwt({ expired: false }),
|
||||
refreshToken: "new-refresh-token",
|
||||
}),
|
||||
text: async () => "",
|
||||
});
|
||||
|
||||
// Create a mock UriHandler
|
||||
const mockUriHandler = {
|
||||
event: new EventEmitter(),
|
||||
handleCallback: vi.fn(),
|
||||
};
|
||||
|
||||
// Create a mock ExtensionContext
|
||||
const mockContext = {
|
||||
secrets: {
|
||||
store: vi.fn(),
|
||||
get: vi.fn(),
|
||||
},
|
||||
subscriptions: [],
|
||||
};
|
||||
|
||||
// Mock setInterval to prevent continuous refreshes
|
||||
const originalSetInterval = global.setInterval;
|
||||
global.setInterval = vi.fn().mockReturnValue(123 as any);
|
||||
|
||||
// Track setTimeout calls
|
||||
const setTimeoutSpy = vi.spyOn(global, "setTimeout");
|
||||
|
||||
// Set up our SecretStorage mock to return the session
|
||||
mockSecretStorageGet.mockResolvedValue(JSON.stringify([mockSession]));
|
||||
|
||||
// Import WorkOsAuthProvider after setting up all mocks
|
||||
const { WorkOsAuthProvider } = await import("./WorkOsAuthProvider");
|
||||
|
||||
// Create provider instance - this will automatically call refreshSessions
|
||||
const provider = new WorkOsAuthProvider(mockContext, mockUriHandler);
|
||||
|
||||
// Wait for all promises to resolve for the initial refresh attempt
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify the first fetch attempt was made
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Trigger first retry
|
||||
vi.advanceTimersByTime(1000); // Initial backoff
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify the second fetch attempt was made
|
||||
expect(fetchMock).toHaveBeenCalledTimes(2);
|
||||
|
||||
// Trigger second retry
|
||||
vi.advanceTimersByTime(2000); // Double the backoff
|
||||
await new Promise(process.nextTick);
|
||||
|
||||
// Verify the third fetch attempt was made
|
||||
expect(fetchMock).toHaveBeenCalledTimes(3);
|
||||
|
||||
// Verify setTimeout was called with increasing delays
|
||||
expect(setTimeoutSpy).toHaveBeenCalledTimes(2);
|
||||
|
||||
// Check that the backoff periods increased
|
||||
const firstDelay = setTimeoutSpy.mock.calls[0][1];
|
||||
const secondDelay = setTimeoutSpy.mock.calls[1][1];
|
||||
|
||||
// Check that backoff increased
|
||||
expect(secondDelay).toBeGreaterThan(firstDelay);
|
||||
|
||||
// Restore setInterval
|
||||
global.setInterval = originalSetInterval;
|
||||
|
||||
// Clean up
|
||||
if (provider._refreshInterval) {
|
||||
clearInterval(provider._refreshInterval);
|
||||
provider._refreshInterval = null;
|
||||
}
|
||||
});
|
|
@ -0,0 +1,8 @@
|
|||
import { defineConfig } from "vitest/config";
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
include: ["**/*.vitest.ts"],
|
||||
environment: "node",
|
||||
},
|
||||
});
|
|
@ -25,10 +25,12 @@
|
|||
"@tiptap/starter-kit": "^2.1.13",
|
||||
"@tiptap/suggestion": "^2.1.13",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"anser": "^2.3.2",
|
||||
"clsx": "^2.1.1",
|
||||
"core": "file:../core",
|
||||
"dompurify": "^3.0.6",
|
||||
"downshift": "^7.6.0",
|
||||
"escape-carriage": "^1.3.1",
|
||||
"lodash": "^4.17.21",
|
||||
"lowlight": "^3.3.0",
|
||||
"minisearch": "^7.0.2",
|
||||
|
@ -212,7 +214,7 @@
|
|||
},
|
||||
"../packages/config-yaml": {
|
||||
"name": "@continuedev/config-yaml",
|
||||
"version": "1.0.88",
|
||||
"version": "1.0.92",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@continuedev/config-types": "^1.0.14",
|
||||
|
@ -3737,6 +3739,12 @@
|
|||
"url": "https://github.com/sponsors/epoberezkin"
|
||||
}
|
||||
},
|
||||
"node_modules/anser": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/anser/-/anser-2.3.2.tgz",
|
||||
"integrity": "sha512-PMqBCBvrOVDRqLGooQb+z+t1Q0PiPyurUQeZRR5uHBOVZcW8B04KMmnT12USnhpNX2wCPagWzLVppQMUG3u0Dw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/ansi-regex": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
||||
|
@ -5200,6 +5208,12 @@
|
|||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/escape-carriage": {
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/escape-carriage/-/escape-carriage-1.3.1.tgz",
|
||||
"integrity": "sha512-GwBr6yViW3ttx1kb7/Oh+gKQ1/TrhYwxKqVmg5gS+BK+Qe2KrOa/Vh7w3HPBvgGf0LfcDGoY9I6NHKoA5Hozhw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/escape-string-regexp": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
|
||||
|
|
|
@ -34,10 +34,12 @@
|
|||
"@tiptap/starter-kit": "^2.1.13",
|
||||
"@tiptap/suggestion": "^2.1.13",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"anser": "^2.3.2",
|
||||
"clsx": "^2.1.1",
|
||||
"core": "file:../core",
|
||||
"dompurify": "^3.0.6",
|
||||
"downshift": "^7.6.0",
|
||||
"escape-carriage": "^1.3.1",
|
||||
"lodash": "^4.17.21",
|
||||
"lowlight": "^3.3.0",
|
||||
"minisearch": "^7.0.2",
|
||||
|
|
|
@ -0,0 +1,279 @@
|
|||
import Anser, { AnserJsonEntry } from "anser";
|
||||
import { escapeCarriageReturn } from "escape-carriage";
|
||||
import * as React from "react";
|
||||
import styled from "styled-components";
|
||||
import {
|
||||
defaultBorderRadius,
|
||||
vscBackground,
|
||||
vscEditorBackground,
|
||||
vscForeground,
|
||||
} from "../../components";
|
||||
import { getFontSize } from "../../util";
|
||||
|
||||
const AnsiSpan = styled.span<{
|
||||
bg?: string;
|
||||
fg?: string;
|
||||
decoration?: string;
|
||||
}>`
|
||||
${({ bg }) => bg && `background-color: rgb(${bg});`}
|
||||
${({ fg }) => fg && `color: rgb(${fg});`}
|
||||
${({ decoration }) => {
|
||||
switch (decoration) {
|
||||
case "bold":
|
||||
return "font-weight: bold;";
|
||||
case "dim":
|
||||
return "opacity: 0.5;";
|
||||
case "italic":
|
||||
return "font-style: italic;";
|
||||
case "hidden":
|
||||
return "visibility: hidden;";
|
||||
case "strikethrough":
|
||||
return "text-decoration: line-through;";
|
||||
case "underline":
|
||||
return "text-decoration: underline;";
|
||||
case "blink":
|
||||
return "text-decoration: blink;";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}}
|
||||
`;
|
||||
|
||||
const AnsiLink = styled.a`
|
||||
color: var(--vscode-textLink-foreground, #3794ff);
|
||||
text-decoration: none;
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
`;
|
||||
|
||||
// Using the same styled component structure as StyledMarkdown
|
||||
const StyledAnsi = styled.div<{
|
||||
fontSize?: number;
|
||||
whiteSpace: string;
|
||||
bgColor: string;
|
||||
}>`
|
||||
pre {
|
||||
white-space: ${(props) => props.whiteSpace};
|
||||
background-color: ${vscEditorBackground};
|
||||
border-radius: ${defaultBorderRadius};
|
||||
border: 1px solid
|
||||
var(--vscode-editorWidget-border, rgba(127, 127, 127, 0.3));
|
||||
max-width: calc(100vw - 24px);
|
||||
overflow-x: scroll;
|
||||
overflow-y: hidden;
|
||||
padding: 8px;
|
||||
}
|
||||
|
||||
code {
|
||||
span.line:empty {
|
||||
display: none;
|
||||
}
|
||||
word-wrap: break-word;
|
||||
border-radius: ${defaultBorderRadius};
|
||||
background-color: ${vscEditorBackground};
|
||||
font-size: ${getFontSize() - 2}px;
|
||||
font-family: var(--vscode-editor-font-family);
|
||||
}
|
||||
|
||||
code:not(pre > code) {
|
||||
font-family: var(--vscode-editor-font-family);
|
||||
color: var(--vscode-input-placeholderForeground);
|
||||
}
|
||||
|
||||
background-color: ${(props) => props.bgColor};
|
||||
font-family:
|
||||
var(--vscode-font-family),
|
||||
system-ui,
|
||||
-apple-system,
|
||||
BlinkMacSystemFont,
|
||||
"Segoe UI",
|
||||
Roboto,
|
||||
Oxygen,
|
||||
Ubuntu,
|
||||
Cantarell,
|
||||
"Open Sans",
|
||||
"Helvetica Neue",
|
||||
sans-serif;
|
||||
font-size: ${(props) => props.fontSize || getFontSize()}px;
|
||||
padding-left: 8px;
|
||||
padding-right: 8px;
|
||||
color: ${vscForeground};
|
||||
line-height: 1.5;
|
||||
|
||||
> *:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
`;
|
||||
|
||||
/**
|
||||
* Converts ANSI strings into JSON output.
|
||||
* @name ansiToJSON
|
||||
* @function
|
||||
* @param {String} input The input string.
|
||||
* @param {boolean} use_classes If `true`, HTML classes will be appended
|
||||
* to the HTML output.
|
||||
* @return {Array} The parsed input.
|
||||
*/
|
||||
function ansiToJSON(
|
||||
input: string,
|
||||
use_classes: boolean = false,
|
||||
): AnserJsonEntry[] {
|
||||
input = escapeCarriageReturn(fixBackspace(input));
|
||||
return Anser.ansiToJson(input, {
|
||||
json: true,
|
||||
remove_empty: true,
|
||||
use_classes,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a class string.
|
||||
* @name createClass
|
||||
* @function
|
||||
* @param {AnserJsonEntry} bundle
|
||||
* @return {String} class name(s)
|
||||
*/
|
||||
function createClass(bundle: AnserJsonEntry): string | null {
|
||||
let classNames: string = "";
|
||||
|
||||
if (bundle.bg) {
|
||||
classNames += `${bundle.bg}-bg `;
|
||||
}
|
||||
if (bundle.fg) {
|
||||
classNames += `${bundle.fg}-fg `;
|
||||
}
|
||||
if (bundle.decoration) {
|
||||
classNames += `ansi-${bundle.decoration} `;
|
||||
}
|
||||
|
||||
if (classNames === "") {
|
||||
return null;
|
||||
}
|
||||
|
||||
classNames = classNames.substring(0, classNames.length - 1);
|
||||
return classNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an Anser bundle into a React Node.
|
||||
* @param linkify whether links should be converting into clickable anchor tags.
|
||||
* @param useClasses should render the span with a class instead of style.
|
||||
* @param bundle Anser output.
|
||||
* @param key
|
||||
*/
|
||||
|
||||
function convertBundleIntoReact(
|
||||
linkify: boolean,
|
||||
useClasses: boolean,
|
||||
bundle: AnserJsonEntry,
|
||||
key: number,
|
||||
): JSX.Element {
|
||||
const className = useClasses ? createClass(bundle) : null;
|
||||
// Convert bundle.decoration to string or undefined (not null) to match the prop type
|
||||
const decorationProp = bundle.decoration
|
||||
? String(bundle.decoration)
|
||||
: undefined;
|
||||
|
||||
if (!linkify) {
|
||||
return (
|
||||
<AnsiSpan
|
||||
key={key}
|
||||
className={className || undefined}
|
||||
bg={useClasses ? undefined : bundle.bg}
|
||||
fg={useClasses ? undefined : bundle.fg}
|
||||
decoration={decorationProp}
|
||||
>
|
||||
{bundle.content}
|
||||
</AnsiSpan>
|
||||
);
|
||||
}
|
||||
|
||||
const content: React.ReactNode[] = [];
|
||||
const linkRegex =
|
||||
/(\s|^)(https?:\/\/(?:www\.|(?!www))[^\s.]+\.[^\s]{2,}|www\.[^\s]+\.[^\s]{2,})/g;
|
||||
|
||||
let index = 0;
|
||||
let match: RegExpExecArray | null;
|
||||
while ((match = linkRegex.exec(bundle.content)) !== null) {
|
||||
const [, pre, url] = match;
|
||||
|
||||
const startIndex = match.index + pre.length;
|
||||
if (startIndex > index) {
|
||||
content.push(bundle.content.substring(index, startIndex));
|
||||
}
|
||||
|
||||
// Make sure the href we generate from the link is fully qualified. We assume http
|
||||
// if it starts with a www because many sites don't support https
|
||||
const href = url.startsWith("www.") ? `http://${url}` : url;
|
||||
|
||||
content.push(
|
||||
<AnsiLink key={index} href={href} target="_blank">
|
||||
{url}
|
||||
</AnsiLink>,
|
||||
);
|
||||
|
||||
index = linkRegex.lastIndex;
|
||||
}
|
||||
|
||||
if (index < bundle.content.length) {
|
||||
content.push(bundle.content.substring(index));
|
||||
}
|
||||
|
||||
return (
|
||||
<AnsiSpan
|
||||
key={key}
|
||||
className={className || undefined}
|
||||
bg={useClasses ? undefined : bundle.bg}
|
||||
fg={useClasses ? undefined : bundle.fg}
|
||||
decoration={decorationProp}
|
||||
>
|
||||
{content}
|
||||
</AnsiSpan>
|
||||
);
|
||||
}
|
||||
|
||||
declare interface Props {
|
||||
children?: string;
|
||||
linkify?: boolean;
|
||||
className?: string;
|
||||
useClasses?: boolean;
|
||||
}
|
||||
|
||||
export default function Ansi(props: Props): JSX.Element {
|
||||
const { className, useClasses, children, linkify } = props;
|
||||
|
||||
// Create the ANSI content
|
||||
const ansiContent = ansiToJSON(children ?? "", useClasses ?? false).map(
|
||||
(bundle, i) =>
|
||||
convertBundleIntoReact(linkify ?? false, useClasses ?? false, bundle, i),
|
||||
);
|
||||
|
||||
return (
|
||||
<StyledAnsi
|
||||
contentEditable="false"
|
||||
fontSize={getFontSize()}
|
||||
whiteSpace="pre-wrap"
|
||||
bgColor={vscBackground}
|
||||
>
|
||||
<pre>
|
||||
<code className={className}>{ansiContent}</code>
|
||||
</pre>
|
||||
</StyledAnsi>
|
||||
);
|
||||
}
|
||||
|
||||
// This is copied from the Jupyter Classic source code
|
||||
// notebook/static/base/js/utils.js to handle \b in a way
|
||||
// that is **compatible with Jupyter classic**. One can
|
||||
// argue that this behavior is questionable:
|
||||
// https://stackoverflow.com/questions/55440152/multiple-b-doesnt-work-as-expected-in-jupyter#
|
||||
function fixBackspace(txt: string) {
|
||||
let tmp = txt;
|
||||
do {
|
||||
txt = tmp;
|
||||
// Cancel out anything-but-newline followed by backspace
|
||||
tmp = txt.replace(/[^\n]\x08/gm, "");
|
||||
} while (tmp.length < txt.length);
|
||||
return txt;
|
||||
}
|
|
@ -913,12 +913,68 @@ export const models: { [key: string]: ModelPackage } = {
|
|||
providerOptions: ["gemini"],
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandR: {
|
||||
title: "Command R",
|
||||
c4aiAyaExpanse8B: {
|
||||
title: "C4AI Aya Expanse 8B",
|
||||
description:
|
||||
"Aya Expanse is a massively multilingual large language model excelling in enterprise-scale tasks.",
|
||||
params: {
|
||||
model: "c4ai-aya-expanse-8b",
|
||||
contextLength: 8_000,
|
||||
title: "C4AI Aya Expanse 8B",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
c4aiAyaExpanse32B: {
|
||||
title: "C4AI Aya Expanse 32B",
|
||||
description:
|
||||
"Aya Expanse is a massively multilingual large language model excelling in enterprise-scale tasks.",
|
||||
params: {
|
||||
model: "c4ai-aya-expanse-32b",
|
||||
contextLength: 128_000,
|
||||
title: "C4AI Aya Expanse 32B",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
c4aiAyaVision8B: {
|
||||
title: "C4AI Aya Vision 8B",
|
||||
description:
|
||||
"Aya Vision is a state-of-the-art multimodal and massively multilingual large language model excelling at critical benchmarks for language, text, and image capabilities.",
|
||||
params: {
|
||||
model: "c4ai-aya-vision-8b",
|
||||
contextLength: 16_000,
|
||||
title: "C4AI Aya Vision 8B",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
c4aiAyaVision32B: {
|
||||
title: "C4AI Aya Vision 32B",
|
||||
description:
|
||||
"Aya Vision is a state-of-the-art multimodal and massively multilingual large language model excelling at critical benchmarks for language, text, and image capabilities.",
|
||||
params: {
|
||||
model: "c4ai-aya-vision-32b",
|
||||
contextLength: 16_000,
|
||||
title: "C4AI Aya Vision 32B",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandR032024: {
|
||||
title: "Command R 03-2024",
|
||||
description:
|
||||
"Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.",
|
||||
params: {
|
||||
model: "command-r",
|
||||
model: "command-r-03-2024",
|
||||
contextLength: 128_000,
|
||||
title: "Command R",
|
||||
apiKey: "",
|
||||
|
@ -927,12 +983,12 @@ export const models: { [key: string]: ModelPackage } = {
|
|||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandRPlus: {
|
||||
title: "Command R+",
|
||||
commandRPlus042024: {
|
||||
title: "Command R+ 04-2024",
|
||||
description:
|
||||
"Command R+ is a state-of-the-art RAG-optimized model designed to tackle enterprise-grade workloads",
|
||||
"Command R+ is a state-of-the-art RAG-optimized model designed to tackle enterprise-grade workloads.",
|
||||
params: {
|
||||
model: "command-r-plus",
|
||||
model: "command-r-plus-04-2024",
|
||||
contextLength: 128_000,
|
||||
title: "Command R+",
|
||||
apiKey: "",
|
||||
|
@ -941,6 +997,76 @@ export const models: { [key: string]: ModelPackage } = {
|
|||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandR082024: {
|
||||
title: "Command R 08-2024",
|
||||
description:
|
||||
"Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.",
|
||||
params: {
|
||||
model: "command-r-08-2024",
|
||||
contextLength: 128_000,
|
||||
title: "Command R 08-2024",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandRPlus082024: {
|
||||
title: "Command R+ 08-2024",
|
||||
description:
|
||||
"Command R+ is a state-of-the-art RAG-optimized model designed to tackle enterprise-grade workloads.",
|
||||
params: {
|
||||
model: "command-r-plus-08-2024",
|
||||
contextLength: 128_000,
|
||||
title: "Command R+ 08-2024",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandR7B122024: {
|
||||
title: "Command R7B 12-2024",
|
||||
description:
|
||||
"The smallest model in our R series delivers top-tier speed, efficiency, and quality to build powerful AI applications on commodity GPUs and edge devices.",
|
||||
params: {
|
||||
model: "command-r7b-12-2024",
|
||||
contextLength: 128_000,
|
||||
title: "Command R7B 12-2024",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandR7BArabic022025: {
|
||||
title: "Command R7B Arabic 02-2025",
|
||||
description:
|
||||
"Our state-of-the-art lightweight multilingual AI model has been optimized for advanced Arabic language capabilities to support enterprises in the MENA region.",
|
||||
params: {
|
||||
model: "command-r7b-arabic-02-2025",
|
||||
contextLength: 128_000,
|
||||
title: "Command R7B Arabic 02-2025",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
commandA032025: {
|
||||
title: "Command A 03-2025",
|
||||
description:
|
||||
"Command A is Cohere’s most performant model to date, excelling at real world enterprise tasks including tool use, retrieval augmented generation (RAG), agents, and multilingual use cases.",
|
||||
params: {
|
||||
model: "command-a-03-2025",
|
||||
contextLength: 256_000,
|
||||
title: "Command A 03-2025",
|
||||
apiKey: "",
|
||||
},
|
||||
providerOptions: ["cohere"],
|
||||
icon: "cohere.png",
|
||||
isOpenSource: false,
|
||||
},
|
||||
gpt4turbo: {
|
||||
title: "GPT-4 Turbo",
|
||||
description:
|
||||
|
|
|
@ -349,7 +349,19 @@ Select the \`GPT-4o\` model below to complete your provider configuration, but n
|
|||
},
|
||||
...completionParamsInputsConfigs,
|
||||
],
|
||||
packages: [models.commandR, models.commandRPlus],
|
||||
packages: [
|
||||
models.commandA032025,
|
||||
models.commandR7BArabic022025,
|
||||
models.commandR7B122024,
|
||||
models.commandRPlus082024,
|
||||
models.commandR082024,
|
||||
models.commandRPlus042024,
|
||||
models.commandR032024,
|
||||
models.c4aiAyaVision32B,
|
||||
models.c4aiAyaVision8B,
|
||||
models.c4aiAyaExpanse32B,
|
||||
models.c4aiAyaExpanse8B,
|
||||
],
|
||||
apiKeyUrl: "https://docs.cohere.com/v2/docs/rate-limits",
|
||||
},
|
||||
groq: {
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
import { AnyAction, ThunkDispatch } from "@reduxjs/toolkit";
|
||||
import { ToolCallState } from "core";
|
||||
import { Fragment } from "react";
|
||||
import { useDispatch } from "react-redux";
|
||||
import styled from "styled-components";
|
||||
import { vscForeground } from "../../../components";
|
||||
import Ansi from "../../../components/ansiTerminal/Ansi";
|
||||
import StyledMarkdownPreview from "../../../components/StyledMarkdownPreview";
|
||||
import { moveTerminalProcessToBackground } from "../../../redux/thunks/moveTerminalProcessToBackground";
|
||||
import { useAppDispatch } from "../../../redux/hooks";
|
||||
import { moveTerminalProcessToBackground } from "../../../redux/thunks/moveTerminalProcessToBackground";
|
||||
|
||||
interface RunTerminalCommandToolCallProps {
|
||||
command: string;
|
||||
|
@ -15,13 +14,12 @@ interface RunTerminalCommandToolCallProps {
|
|||
|
||||
const CommandStatus = styled.div`
|
||||
font-size: 12px;
|
||||
color: #666;
|
||||
color: var(--vscode-descriptionForeground, ${vscForeground}88);
|
||||
margin-top: 8px;
|
||||
padding-left: 8px;
|
||||
padding-right: 8px;
|
||||
padding-bottom: 8px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding-left: 8px;
|
||||
padding-right: 8px;
|
||||
`;
|
||||
|
||||
const StatusIcon = styled.span<{
|
||||
|
@ -33,21 +31,32 @@ const StatusIcon = styled.span<{
|
|||
margin-right: 8px;
|
||||
background-color: ${(props) =>
|
||||
props.status === "running"
|
||||
? "#4caf50"
|
||||
? "var(--vscode-testing-runAction, #4caf50)"
|
||||
: props.status === "completed"
|
||||
? "#4caf50"
|
||||
? "var(--vscode-testing-iconPassed, #4caf50)"
|
||||
: props.status === "background"
|
||||
? "#2196f3"
|
||||
: "#f44336"};
|
||||
? "var(--vscode-statusBarItem-prominentBackground, #2196f3)"
|
||||
: "var(--vscode-testing-iconFailed, #f44336)"};
|
||||
${(props) =>
|
||||
props.status === "running" ? "animation: pulse 1.5s infinite;" : ""}
|
||||
`;
|
||||
|
||||
// Removed unused styled components
|
||||
// Waiting message styled for consistency
|
||||
const WaitingMessage = styled.div`
|
||||
padding: 8px;
|
||||
padding-left: 16px;
|
||||
padding-right: 16px;
|
||||
margin-top: 8px;
|
||||
`;
|
||||
|
||||
// For consistency with the rest of the styled components
|
||||
const AnsiWrapper = styled.div`
|
||||
margin-top: 8px;
|
||||
`;
|
||||
|
||||
const BackgroundLink = styled.a`
|
||||
font-size: 12px;
|
||||
color: #0077cc;
|
||||
color: var(--vscode-textLink-foreground, #3794ff);
|
||||
margin-left: 12px;
|
||||
cursor: pointer;
|
||||
text-decoration: none;
|
||||
|
@ -57,6 +66,11 @@ const BackgroundLink = styled.a`
|
|||
}
|
||||
`;
|
||||
|
||||
// Just spacing between terminal components and the next toolcall
|
||||
const TerminalContainer = styled.div`
|
||||
margin-bottom: 16px;
|
||||
`;
|
||||
|
||||
export function RunTerminalCommand(props: RunTerminalCommandToolCallProps) {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
|
@ -82,16 +96,24 @@ export function RunTerminalCommand(props: RunTerminalCommandToolCallProps) {
|
|||
}
|
||||
|
||||
return (
|
||||
<Fragment>
|
||||
<TerminalContainer>
|
||||
{/* Command */}
|
||||
<StyledMarkdownPreview
|
||||
isRenderingInStepContainer
|
||||
source={`\`\`\`bash .sh\n$ ${props.command ?? ""}${
|
||||
hasOutput || isRunning
|
||||
? `\n${terminalContent || "Waiting for output..."}`
|
||||
: ""
|
||||
}\n\`\`\``}
|
||||
source={`\`\`\`bash .sh\n$ ${props.command ?? ""}\n\`\`\``}
|
||||
/>
|
||||
|
||||
{/* Terminal output with ANSI support */}
|
||||
{isRunning && !hasOutput && (
|
||||
<WaitingMessage>Waiting for output...</WaitingMessage>
|
||||
)}
|
||||
{hasOutput && (
|
||||
<AnsiWrapper>
|
||||
<Ansi>{terminalContent}</Ansi>
|
||||
</AnsiWrapper>
|
||||
)}
|
||||
|
||||
{/* Status information */}
|
||||
{(statusMessage || isRunning) && (
|
||||
<CommandStatus>
|
||||
<StatusIcon status={statusType} />
|
||||
|
@ -112,6 +134,6 @@ export function RunTerminalCommand(props: RunTerminalCommandToolCallProps) {
|
|||
)}
|
||||
</CommandStatus>
|
||||
)}
|
||||
</Fragment>
|
||||
</TerminalContainer>
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "@continuedev/config-yaml",
|
||||
"version": "1.0.88",
|
||||
"version": "1.0.93",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@continuedev/config-yaml",
|
||||
"version": "1.0.88",
|
||||
"version": "1.0.93",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@continuedev/config-types": "^1.0.14",
|
||||
|
|
|
@ -1,10 +1,17 @@
|
|||
{
|
||||
"name": "@continuedev/config-yaml",
|
||||
"version": "1.0.88",
|
||||
"version": "1.0.94",
|
||||
"description": "",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": {
|
||||
"browser": "./dist/browser.js",
|
||||
"node": "./dist/index.js",
|
||||
"default": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"bin": {
|
||||
"@continuedev/config-yaml": "./dist/cli.js"
|
||||
},
|
||||
|
|
|
@ -119,7 +119,7 @@ describe("E2E Scenarios", () => {
|
|||
},
|
||||
);
|
||||
|
||||
expect(unrolledConfig.rules?.[0]).toBeNull();
|
||||
expect(unrolledConfig.config?.rules?.[0]).toBeNull();
|
||||
});
|
||||
|
||||
it("should correctly unroll assistant", async () => {
|
||||
|
@ -142,19 +142,21 @@ describe("E2E Scenarios", () => {
|
|||
},
|
||||
);
|
||||
|
||||
// Test that packages were correctly unrolled and params replaced
|
||||
expect(unrolledConfig.models?.length).toBe(4);
|
||||
const config = unrolledConfig.config;
|
||||
|
||||
const openAiModel = unrolledConfig.models?.[0]!;
|
||||
// Test that packages were correctly unrolled and params replaced
|
||||
expect(config?.models?.length).toBe(4);
|
||||
|
||||
const openAiModel = config?.models?.[0]!;
|
||||
expect(openAiModel.apiKey).toBe("sk-123");
|
||||
|
||||
const geminiModel = unrolledConfig.models?.[1]!;
|
||||
const geminiModel = config?.models?.[1]!;
|
||||
expect(geminiModel.provider).toBe("continue-proxy");
|
||||
expect(geminiModel.apiKey).toBeUndefined();
|
||||
const geminiSecretLocation = "organization:test-org/GEMINI_API_KEY";
|
||||
expect((geminiModel as any).apiKeyLocation).toBe(geminiSecretLocation);
|
||||
|
||||
const anthropicModel = unrolledConfig.models?.[2]!;
|
||||
const anthropicModel = config?.models?.[2]!;
|
||||
expect(anthropicModel.provider).toBe("continue-proxy");
|
||||
expect(anthropicModel.apiKey).toBeUndefined();
|
||||
const anthropicSecretLocation =
|
||||
|
@ -163,15 +165,15 @@ describe("E2E Scenarios", () => {
|
|||
anthropicSecretLocation,
|
||||
);
|
||||
|
||||
const proxyOllamaModel = unrolledConfig.models?.[3]!;
|
||||
const proxyOllamaModel = config?.models?.[3]!;
|
||||
expect(proxyOllamaModel.provider).toBe("ollama");
|
||||
expect(proxyOllamaModel.defaultCompletionOptions?.stream).toBe(false);
|
||||
|
||||
expect(unrolledConfig.rules?.length).toBe(2);
|
||||
expect(unrolledConfig.docs?.[0]?.startUrl).toBe(
|
||||
expect(config?.rules?.length).toBe(2);
|
||||
expect(config?.docs?.[0]?.startUrl).toBe(
|
||||
"https://docs.python.org/release/3.13.1",
|
||||
);
|
||||
expect(unrolledConfig.docs?.[0]?.rootUrl).toBe(
|
||||
expect(config?.docs?.[0]?.rootUrl).toBe(
|
||||
"https://docs.python.org/release/3.13.1",
|
||||
);
|
||||
|
||||
|
@ -242,16 +244,18 @@ describe("E2E Scenarios", () => {
|
|||
},
|
||||
);
|
||||
|
||||
const config = unrolledConfig.config;
|
||||
|
||||
// The original rules array should have two items
|
||||
expect(unrolledConfig.rules?.length).toBe(3); // Now 3 with the injected block
|
||||
expect(config?.rules?.length).toBe(3); // Now 3 with the injected block
|
||||
|
||||
// Check the original doc is still there
|
||||
expect(unrolledConfig.docs?.[0]?.startUrl).toBe(
|
||||
expect(config?.docs?.[0]?.startUrl).toBe(
|
||||
"https://docs.python.org/release/3.13.1",
|
||||
);
|
||||
|
||||
// Check the injected doc block was added
|
||||
expect(unrolledConfig.rules?.[2]).toBe("Be kind");
|
||||
expect(config?.rules?.[2]).toBe("Be kind");
|
||||
});
|
||||
|
||||
it.skip("should prioritize org over user / package secrets", () => {});
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
// Browser-compatible exports (excludes RegistryClient which uses Node.js APIs)
|
||||
export * from "./converter.js";
|
||||
export * from "./interfaces/index.js";
|
||||
export * from "./interfaces/SecretResult.js";
|
||||
export * from "./interfaces/slugs.js";
|
||||
export * from "./load/clientRender.js";
|
||||
export * from "./load/getBlockType.js";
|
||||
export * from "./load/merge.js";
|
||||
export * from "./load/proxySecretResolution.js";
|
||||
export * from "./load/typeGuards.js";
|
||||
export * from "./load/unroll.js";
|
||||
export * from "./markdown/index.js";
|
||||
export * from "./modelName.js";
|
||||
// Note: registryClient.js is excluded because it uses Node.js fs/path APIs
|
||||
export * from "./schemas/data/index.js";
|
||||
export * from "./schemas/index.js";
|
||||
export * from "./schemas/models.js";
|
||||
export * from "./validation.js";
|
|
@ -1,16 +1,2 @@
|
|||
export * from "./converter.js";
|
||||
export * from "./interfaces/index.js";
|
||||
export * from "./interfaces/SecretResult.js";
|
||||
export * from "./interfaces/slugs.js";
|
||||
export * from "./load/clientRender.js";
|
||||
export * from "./load/getBlockType.js";
|
||||
export * from "./load/merge.js";
|
||||
export * from "./load/proxySecretResolution.js";
|
||||
export * from "./load/typeGuards.js";
|
||||
export * from "./load/unroll.js";
|
||||
export * from "./modelName.js";
|
||||
export * from "./browser.js";
|
||||
export * from "./registryClient.js";
|
||||
export * from "./schemas/data/index.js";
|
||||
export * from "./schemas/index.js";
|
||||
export * from "./schemas/models.js";
|
||||
export * from "./validation.js";
|
||||
|
|
|
@ -25,6 +25,15 @@ interface FileIdentifier extends BasePackageIdentifier {
|
|||
|
||||
export type PackageIdentifier = FullSlugIdentifier | FileIdentifier;
|
||||
|
||||
export function packageIdentifierToDisplayName(id: PackageIdentifier): string {
|
||||
switch (id.uriType) {
|
||||
case "file":
|
||||
return id.filePath;
|
||||
case "slug":
|
||||
return id.fullSlug.packageSlug;
|
||||
}
|
||||
}
|
||||
|
||||
export function encodePackageIdentifier(identifier: PackageIdentifier): string {
|
||||
switch (identifier.uriType) {
|
||||
case "slug":
|
||||
|
|
|
@ -9,6 +9,7 @@ import {
|
|||
FQSN,
|
||||
PackageIdentifier,
|
||||
} from "../interfaces/slugs.js";
|
||||
import { markdownToRule } from "../markdown/index.js";
|
||||
import {
|
||||
AssistantUnrolled,
|
||||
assistantUnrolledSchema,
|
||||
|
@ -195,7 +196,6 @@ async function extractRenderedSecretsMap(
|
|||
export interface BaseUnrollAssistantOptions {
|
||||
renderSecrets: boolean;
|
||||
injectBlocks?: PackageIdentifier[];
|
||||
asConfigResult?: true;
|
||||
}
|
||||
|
||||
export interface DoNotRenderSecretsUnrollAssistantOptions
|
||||
|
@ -217,24 +217,11 @@ export type UnrollAssistantOptions =
|
|||
| DoNotRenderSecretsUnrollAssistantOptions
|
||||
| RenderSecretsUnrollAssistantOptions;
|
||||
|
||||
// Overload to satisfy existing consumers of unrollAssistant.
|
||||
export async function unrollAssistant(
|
||||
id: PackageIdentifier,
|
||||
registry: Registry,
|
||||
options: UnrollAssistantOptions & { asConfigResult: true },
|
||||
): Promise<ConfigResult<AssistantUnrolled>>;
|
||||
|
||||
export async function unrollAssistant(
|
||||
id: PackageIdentifier,
|
||||
registry: Registry,
|
||||
options: UnrollAssistantOptions,
|
||||
): Promise<AssistantUnrolled>;
|
||||
|
||||
export async function unrollAssistant(
|
||||
id: PackageIdentifier,
|
||||
registry: Registry,
|
||||
options: UnrollAssistantOptions,
|
||||
): Promise<AssistantUnrolled | ConfigResult<AssistantUnrolled>> {
|
||||
): Promise<ConfigResult<AssistantUnrolled>> {
|
||||
// Request the content from the registry
|
||||
const rawContent = await registry.getContent(id);
|
||||
|
||||
|
@ -265,24 +252,19 @@ export async function unrollAssistantFromContent(
|
|||
rawYaml: string,
|
||||
registry: Registry,
|
||||
options: UnrollAssistantOptions,
|
||||
): Promise<AssistantUnrolled | ConfigResult<AssistantUnrolled>> {
|
||||
): Promise<ConfigResult<AssistantUnrolled>> {
|
||||
// Parse string to Zod-validated YAML
|
||||
let parsedYaml = parseConfigYaml(rawYaml);
|
||||
let parsedYaml = parseMarkdownRuleOrConfigYaml(rawYaml, id);
|
||||
|
||||
// Unroll blocks and convert their secrets to FQSNs
|
||||
const unrolledAssistant = await unrollBlocks(
|
||||
parsedYaml,
|
||||
registry,
|
||||
options.injectBlocks,
|
||||
options.asConfigResult ?? false,
|
||||
);
|
||||
const {
|
||||
config: unrolledAssistant,
|
||||
configLoadInterrupted,
|
||||
errors,
|
||||
} = await unrollBlocks(parsedYaml, registry, options.injectBlocks);
|
||||
|
||||
// Back to a string so we can fill in template variables
|
||||
const rawUnrolledYaml = options.asConfigResult
|
||||
? YAML.stringify(
|
||||
(unrolledAssistant as ConfigResult<AssistantUnrolled>).config,
|
||||
)
|
||||
: YAML.stringify(unrolledAssistant);
|
||||
const rawUnrolledYaml = YAML.stringify(unrolledAssistant);
|
||||
|
||||
// Convert all of the template variables to FQSNs
|
||||
// Secrets from the block will have the assistant slug prepended to the FQSN
|
||||
|
@ -291,7 +273,11 @@ export async function unrollAssistantFromContent(
|
|||
});
|
||||
|
||||
if (!options.renderSecrets) {
|
||||
return parseAssistantUnrolled(templatedYaml);
|
||||
return {
|
||||
config: parseAssistantUnrolled(templatedYaml),
|
||||
errors: [],
|
||||
configLoadInterrupted: false,
|
||||
};
|
||||
}
|
||||
|
||||
// Render secret values/locations for client
|
||||
|
@ -312,25 +298,18 @@ export async function unrollAssistantFromContent(
|
|||
options.onPremProxyUrl,
|
||||
);
|
||||
|
||||
if (options.asConfigResult) {
|
||||
return {
|
||||
config: finalConfig,
|
||||
errors: (unrolledAssistant as ConfigResult<AssistantUnrolled>).errors,
|
||||
configLoadInterrupted: (
|
||||
unrolledAssistant as ConfigResult<AssistantUnrolled>
|
||||
).configLoadInterrupted,
|
||||
};
|
||||
}
|
||||
|
||||
return finalConfig;
|
||||
return {
|
||||
config: finalConfig,
|
||||
errors,
|
||||
configLoadInterrupted,
|
||||
};
|
||||
}
|
||||
|
||||
export async function unrollBlocks(
|
||||
assistant: ConfigYaml,
|
||||
registry: Registry,
|
||||
injectBlocks: PackageIdentifier[] | undefined,
|
||||
asConfigError: boolean,
|
||||
): Promise<AssistantUnrolled | ConfigResult<AssistantUnrolled>> {
|
||||
): Promise<ConfigResult<AssistantUnrolled>> {
|
||||
const errors: ConfigValidationError[] = [];
|
||||
|
||||
const unrolledAssistant: AssistantUnrolled = {
|
||||
|
@ -432,7 +411,10 @@ export async function unrollBlocks(
|
|||
for (const injectBlock of injectBlocks ?? []) {
|
||||
try {
|
||||
const blockConfigYaml = await registry.getContent(injectBlock);
|
||||
const parsedBlock = parseConfigYaml(blockConfigYaml);
|
||||
const parsedBlock = parseMarkdownRuleOrConfigYaml(
|
||||
blockConfigYaml,
|
||||
injectBlock,
|
||||
);
|
||||
const blockType = getBlockType(parsedBlock);
|
||||
const resolvedBlock = await resolveBlock(
|
||||
injectBlock,
|
||||
|
@ -466,20 +448,16 @@ export async function unrollBlocks(
|
|||
}
|
||||
}
|
||||
|
||||
if (asConfigError) {
|
||||
const configResult: ConfigResult<AssistantUnrolled> = {
|
||||
config: undefined,
|
||||
errors: undefined,
|
||||
configLoadInterrupted: false,
|
||||
};
|
||||
configResult.config = unrolledAssistant;
|
||||
if (errors.length > 0) {
|
||||
configResult.errors = errors;
|
||||
}
|
||||
return configResult;
|
||||
const configResult: ConfigResult<AssistantUnrolled> = {
|
||||
config: undefined,
|
||||
errors: undefined,
|
||||
configLoadInterrupted: false,
|
||||
};
|
||||
configResult.config = unrolledAssistant;
|
||||
if (errors.length > 0) {
|
||||
configResult.errors = errors;
|
||||
}
|
||||
|
||||
return unrolledAssistant;
|
||||
return configResult;
|
||||
}
|
||||
|
||||
export async function resolveBlock(
|
||||
|
@ -503,7 +481,60 @@ export async function resolveBlock(
|
|||
secrets: extractFQSNMap(rawYaml, [id]),
|
||||
});
|
||||
|
||||
const parsedYaml = parseBlock(templatedYaml);
|
||||
return parseMarkdownRuleOrAssistantUnrolled(templatedYaml, id);
|
||||
}
|
||||
|
||||
function parseMarkdownRuleOrAssistantUnrolled(
|
||||
content: string,
|
||||
id: PackageIdentifier,
|
||||
): AssistantUnrolled {
|
||||
// Try to parse as YAML first, then as markdown rule if that fails
|
||||
let parsedYaml: AssistantUnrolled;
|
||||
try {
|
||||
parsedYaml = parseBlock(content);
|
||||
} catch (yamlError) {
|
||||
// If YAML parsing fails, try parsing as markdown rule
|
||||
try {
|
||||
const rule = markdownToRule(content, id);
|
||||
// Convert the rule object to the expected format
|
||||
parsedYaml = {
|
||||
name: rule.name,
|
||||
version: "1.0.0",
|
||||
rules: [rule],
|
||||
};
|
||||
} catch (markdownError) {
|
||||
// If both fail, throw the original YAML error
|
||||
throw yamlError;
|
||||
}
|
||||
}
|
||||
|
||||
return parsedYaml;
|
||||
}
|
||||
|
||||
function parseMarkdownRuleOrConfigYaml(
|
||||
content: string,
|
||||
id: PackageIdentifier,
|
||||
): ConfigYaml {
|
||||
// Try to parse as YAML first, then as markdown rule if that fails
|
||||
let parsedYaml: ConfigYaml;
|
||||
try {
|
||||
parsedYaml = parseConfigYaml(content);
|
||||
} catch (yamlError) {
|
||||
// If YAML parsing fails, try parsing as markdown rule
|
||||
try {
|
||||
const rule = markdownToRule(content, id);
|
||||
// Convert the rule object to the expected format
|
||||
parsedYaml = {
|
||||
name: rule.name,
|
||||
version: "1.0.0",
|
||||
rules: [rule],
|
||||
};
|
||||
} catch (markdownError) {
|
||||
// If both fail, throw the original YAML error
|
||||
throw yamlError;
|
||||
}
|
||||
}
|
||||
|
||||
return parsedYaml;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,16 @@
|
|||
import { PackageIdentifier } from "../browser.js";
|
||||
import {
|
||||
createMarkdownWithFrontmatter,
|
||||
createRuleFilePath,
|
||||
createRuleMarkdown,
|
||||
sanitizeRuleName,
|
||||
} from "./createMarkdownRule";
|
||||
import { parseMarkdownRule } from "./parseMarkdownRule";
|
||||
} from "./createMarkdownRule.js";
|
||||
import { markdownToRule } from "./markdownToRule.js";
|
||||
|
||||
// Mock package identifier for testing
|
||||
const mockPackageId: PackageIdentifier = {
|
||||
uriType: "file",
|
||||
filePath: "/path/to/file",
|
||||
};
|
||||
|
||||
describe("sanitizeRuleName", () => {
|
||||
it("should sanitize rule names for filenames", () => {
|
||||
|
@ -25,23 +31,6 @@ describe("sanitizeRuleName", () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe("createRuleFilePath", () => {
|
||||
it("should create correct rule file path", () => {
|
||||
const result = createRuleFilePath("/workspace", "My Test Rule");
|
||||
expect(result).toBe("/workspace/.continue/rules/my-test-rule.md");
|
||||
});
|
||||
|
||||
it("should handle special characters in rule name", () => {
|
||||
const result = createRuleFilePath("/home/user", "Rule with @#$% chars");
|
||||
expect(result).toBe("/home/user/.continue/rules/rule-with-chars.md");
|
||||
});
|
||||
|
||||
it("should handle edge case rule names", () => {
|
||||
const result = createRuleFilePath("/test", " Multiple Spaces ");
|
||||
expect(result).toBe("/test/.continue/rules/multiple-spaces.md");
|
||||
});
|
||||
});
|
||||
|
||||
describe("createMarkdownWithFrontmatter", () => {
|
||||
it("should create properly formatted markdown with frontmatter", () => {
|
||||
const frontmatter = {
|
||||
|
@ -92,10 +81,13 @@ Just markdown content.`;
|
|||
originalFrontmatter,
|
||||
originalMarkdown,
|
||||
);
|
||||
const parsed = parseMarkdownRule(created);
|
||||
const parsed = markdownToRule(created, mockPackageId);
|
||||
|
||||
expect(parsed.frontmatter).toEqual(originalFrontmatter);
|
||||
expect(parsed.markdown).toBe(originalMarkdown);
|
||||
expect(parsed.name).toBe(originalFrontmatter.name);
|
||||
expect(parsed.description).toBe(originalFrontmatter.description);
|
||||
expect(parsed.globs).toEqual(originalFrontmatter.globs);
|
||||
expect(parsed.alwaysApply).toBe(originalFrontmatter.alwaysApply);
|
||||
expect(parsed.rule).toBe(originalMarkdown);
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -107,23 +99,23 @@ describe("createRuleMarkdown", () => {
|
|||
alwaysApply: true,
|
||||
});
|
||||
|
||||
const parsed = parseMarkdownRule(result);
|
||||
const parsed = markdownToRule(result, mockPackageId);
|
||||
|
||||
expect(parsed.frontmatter.description).toBe("Test description");
|
||||
expect(parsed.frontmatter.globs).toEqual(["*.ts", "*.js"]);
|
||||
expect(parsed.frontmatter.alwaysApply).toBe(true);
|
||||
expect(parsed.markdown).toBe("# Test Rule\n\nThis is the rule content");
|
||||
expect(parsed.description).toBe("Test description");
|
||||
expect(parsed.globs).toEqual(["*.ts", "*.js"]);
|
||||
expect(parsed.alwaysApply).toBe(true);
|
||||
expect(parsed.rule).toBe("This is the rule content");
|
||||
});
|
||||
|
||||
it("should create rule markdown with minimal options", () => {
|
||||
const result = createRuleMarkdown("Simple Rule", "Simple content");
|
||||
|
||||
const parsed = parseMarkdownRule(result);
|
||||
const parsed = markdownToRule(result, mockPackageId);
|
||||
|
||||
expect(parsed.frontmatter.description).toBeUndefined();
|
||||
expect(parsed.frontmatter.globs).toBeUndefined();
|
||||
expect(parsed.frontmatter.alwaysApply).toBeUndefined();
|
||||
expect(parsed.markdown).toBe("# Simple Rule\n\nSimple content");
|
||||
expect(parsed.description).toBeUndefined();
|
||||
expect(parsed.globs).toBeUndefined();
|
||||
expect(parsed.alwaysApply).toBeUndefined();
|
||||
expect(parsed.rule).toBe("Simple content");
|
||||
});
|
||||
|
||||
it("should handle string globs", () => {
|
||||
|
@ -131,8 +123,8 @@ describe("createRuleMarkdown", () => {
|
|||
globs: "*.py",
|
||||
});
|
||||
|
||||
const parsed = parseMarkdownRule(result);
|
||||
expect(parsed.frontmatter.globs).toBe("*.py");
|
||||
const parsed = markdownToRule(result, mockPackageId);
|
||||
expect(parsed.globs).toBe("*.py");
|
||||
});
|
||||
|
||||
it("should trim description and globs", () => {
|
||||
|
@ -141,9 +133,9 @@ describe("createRuleMarkdown", () => {
|
|||
globs: " *.ts ",
|
||||
});
|
||||
|
||||
const parsed = parseMarkdownRule(result);
|
||||
expect(parsed.frontmatter.description).toBe("spaced description");
|
||||
expect(parsed.frontmatter.globs).toBe("*.ts");
|
||||
const parsed = markdownToRule(result, mockPackageId);
|
||||
expect(parsed.description).toBe("spaced description");
|
||||
expect(parsed.globs).toBe("*.ts");
|
||||
});
|
||||
|
||||
it("should handle alwaysApply false explicitly", () => {
|
||||
|
@ -151,7 +143,7 @@ describe("createRuleMarkdown", () => {
|
|||
alwaysApply: false,
|
||||
});
|
||||
|
||||
const parsed = parseMarkdownRule(result);
|
||||
expect(parsed.frontmatter.alwaysApply).toBe(false);
|
||||
const parsed = markdownToRule(result, mockPackageId);
|
||||
expect(parsed.alwaysApply).toBe(false);
|
||||
});
|
||||
});
|
|
@ -1,6 +1,5 @@
|
|||
import * as YAML from "yaml";
|
||||
import { joinPathsToUri } from "../../util/uri";
|
||||
import { RuleFrontmatter } from "./parseMarkdownRule";
|
||||
import { RuleFrontmatter } from "./markdownToRule.js";
|
||||
|
||||
export const RULE_FILE_EXTENSION = "md";
|
||||
|
||||
|
@ -15,22 +14,6 @@ export function sanitizeRuleName(name: string): string {
|
|||
.replace(/^-+|-+$/g, ""); // Remove leading/trailing dashes
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the file path for a rule in the workspace .continue/rules directory
|
||||
*/
|
||||
export function createRuleFilePath(
|
||||
workspaceDir: string,
|
||||
ruleName: string,
|
||||
): string {
|
||||
const safeRuleName = sanitizeRuleName(ruleName);
|
||||
return joinPathsToUri(
|
||||
workspaceDir,
|
||||
".continue",
|
||||
"rules",
|
||||
`${safeRuleName}.${RULE_FILE_EXTENSION}`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates markdown content with YAML frontmatter in the format expected by parseMarkdownRule
|
||||
*/
|
||||
|
@ -69,7 +52,5 @@ export function createRuleMarkdown(
|
|||
frontmatter.alwaysApply = options.alwaysApply;
|
||||
}
|
||||
|
||||
const markdownBody = `# ${name}\n\n${ruleContent}`;
|
||||
|
||||
return createMarkdownWithFrontmatter(frontmatter, markdownBody);
|
||||
return createMarkdownWithFrontmatter(frontmatter, ruleContent);
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
export * from "./createMarkdownRule.js";
|
||||
export * from "./markdownToRule.js";
|
|
@ -0,0 +1,173 @@
|
|||
import { PackageIdentifier } from "../browser.js";
|
||||
import { markdownToRule } from "./markdownToRule.js";
|
||||
|
||||
describe("markdownToRule", () => {
|
||||
// Use a mock PackageIdentifier for testing
|
||||
const mockId: PackageIdentifier = {
|
||||
uriType: "file",
|
||||
filePath: "/path/to/file",
|
||||
};
|
||||
|
||||
it("should convert markdown with frontmatter to a rule", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
name: Custom Name
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.rule).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
expect(result.globs).toBe("**/test/**/*.kt");
|
||||
expect(result.name).toBe("Custom Name");
|
||||
});
|
||||
|
||||
it("should correctly parse markdown with YAML frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.globs).toBe("**/test/**/*.kt");
|
||||
expect(result.rule).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should handle missing frontmatter", () => {
|
||||
const content = `# Test Rule
|
||||
|
||||
This is a test rule without frontmatter.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.globs).toBeUndefined();
|
||||
expect(result.rule).toBe(content);
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should handle empty frontmatter", () => {
|
||||
const content = `---
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule with empty frontmatter.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.globs).toBeUndefined();
|
||||
expect(result.rule).toBe(
|
||||
"# Test Rule\n\nThis is a test rule with empty frontmatter.",
|
||||
);
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should handle frontmatter with whitespace", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.globs).toBe("**/test/**/*.kt");
|
||||
expect(result.rule).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should handle Windows line endings (CRLF)", () => {
|
||||
// Using \r\n for CRLF line endings
|
||||
const content = `---\r
|
||||
globs: "**/test/**/*.kt"\r
|
||||
---\r
|
||||
\r
|
||||
# Test Rule\r
|
||||
\r
|
||||
This is a test rule.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.globs).toBe("**/test/**/*.kt");
|
||||
// The result should be normalized to \n
|
||||
expect(result.rule).toBe("# Test Rule\n\nThis is a test rule.");
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should handle malformed frontmatter", () => {
|
||||
const content = `---
|
||||
globs: - "**/test/**/*.kt"
|
||||
invalid: yaml: content
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
// Should treat as only markdown when frontmatter is malformed
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.globs).toBeUndefined();
|
||||
expect(result.rule).toBe(content);
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should use packageIdentifierToDisplayName if no heading or frontmatter name", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
# Test Rule Title
|
||||
|
||||
This is a test rule.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should use packageIdentifierToDisplayName if no heading or frontmatter name (no heading)", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
---
|
||||
|
||||
This is a test rule without a heading.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.name).toBe("/path/to/file"); // Should use packageIdentifierToDisplayName result
|
||||
});
|
||||
|
||||
it("should include description from frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
name: Test Rule
|
||||
description: This is a rule description from frontmatter
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is the content of the rule.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.description).toBe(
|
||||
"This is a rule description from frontmatter",
|
||||
);
|
||||
});
|
||||
|
||||
it("should include alwaysApply from frontmatter", () => {
|
||||
const content = `---
|
||||
globs: "**/test/**/*.kt"
|
||||
name: Test Rule
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Test Rule
|
||||
|
||||
This is a rule with alwaysApply explicitly set to false.`;
|
||||
|
||||
const result = markdownToRule(content, mockId);
|
||||
expect(result.alwaysApply).toBe(false);
|
||||
});
|
||||
});
|
|
@ -1,12 +1,15 @@
|
|||
import { basename } from "path";
|
||||
import * as YAML from "yaml";
|
||||
import { RuleWithSource } from "../..";
|
||||
import {
|
||||
PackageIdentifier,
|
||||
packageIdentifierToDisplayName,
|
||||
} from "../browser.js";
|
||||
import { RuleObject } from "../schemas/index.js";
|
||||
|
||||
export interface RuleFrontmatter {
|
||||
globs?: RuleWithSource["globs"];
|
||||
name?: RuleWithSource["name"];
|
||||
description?: RuleWithSource["description"];
|
||||
alwaysApply?: RuleWithSource["alwaysApply"];
|
||||
globs?: RuleObject["globs"];
|
||||
name?: RuleObject["name"];
|
||||
description?: RuleObject["description"];
|
||||
alwaysApply?: RuleObject["alwaysApply"];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -43,35 +46,17 @@ export function parseMarkdownRule(content: string): {
|
|||
return { frontmatter: {}, markdown: normalizedContent };
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a markdown file with YAML frontmatter to a RuleWithSource object
|
||||
*/
|
||||
export function convertMarkdownRuleToContinueRule(
|
||||
path: string,
|
||||
content: string,
|
||||
): RuleWithSource {
|
||||
const { frontmatter, markdown } = parseMarkdownRule(content);
|
||||
|
||||
// Try to extract title from first heading if no name in frontmatter
|
||||
let name = frontmatter.name;
|
||||
if (!name) {
|
||||
// Look for a heading in the markdown
|
||||
const headingMatch = markdown.match(/^#\s+(.+)$/m);
|
||||
if (headingMatch) {
|
||||
name = headingMatch[1].trim();
|
||||
} else {
|
||||
// Fall back to filename
|
||||
name = basename(path).replace(/\.md$/, "");
|
||||
}
|
||||
}
|
||||
export function markdownToRule(
|
||||
rule: string,
|
||||
id: PackageIdentifier,
|
||||
): RuleObject {
|
||||
const { frontmatter, markdown } = parseMarkdownRule(rule);
|
||||
|
||||
return {
|
||||
name,
|
||||
name: frontmatter.name ?? packageIdentifierToDisplayName(id),
|
||||
rule: markdown,
|
||||
globs: frontmatter.globs,
|
||||
description: frontmatter.description,
|
||||
alwaysApply: frontmatter.alwaysApply,
|
||||
source: "rules-block",
|
||||
ruleFile: path,
|
||||
};
|
||||
}
|
|
@ -39,6 +39,7 @@ const docSchema = z.object({
|
|||
});
|
||||
|
||||
export type DocsConfig = z.infer<typeof docSchema>;
|
||||
|
||||
const ruleObjectSchema = z.object({
|
||||
name: z.string(),
|
||||
rule: z.string(),
|
||||
|
@ -48,8 +49,23 @@ const ruleObjectSchema = z.object({
|
|||
});
|
||||
const ruleSchema = z.union([z.string(), ruleObjectSchema]);
|
||||
|
||||
/**
|
||||
* A schema for rules.json files
|
||||
*/
|
||||
export const rulesJsonSchema = z.object({
|
||||
name: z.string(),
|
||||
version: z.string(),
|
||||
author: z.string().optional(),
|
||||
license: z.string().optional(),
|
||||
rules: z.record(z.string(), z.string()).optional(),
|
||||
});
|
||||
|
||||
export type Rule = z.infer<typeof ruleSchema>;
|
||||
export type RuleObject = z.infer<typeof ruleObjectSchema>;
|
||||
/**
|
||||
* A schema for rules.json files
|
||||
*/
|
||||
export type RulesJson = z.infer<typeof rulesJsonSchema>;
|
||||
|
||||
const defaultUsesSchema = z.string();
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,29 +3,167 @@ import { ModelProvider } from "../types.js";
|
|||
export const Cohere: ModelProvider = {
|
||||
models: [
|
||||
{
|
||||
model: "command-r-plus",
|
||||
displayName: "Command R+",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4000,
|
||||
// recommendedFor: ["chat"],
|
||||
model: "command-a-03-2025",
|
||||
displayName: "Command A 03-2025",
|
||||
contextLength: 256000,
|
||||
maxCompletionTokens: 8192,
|
||||
description:
|
||||
"Command A is Cohere’s most performant model to date, excelling at real world enterprise tasks including tool use, retrieval augmented generation (RAG), agents, and multilingual use cases.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "command-r",
|
||||
displayName: "Command R",
|
||||
model: "command-r7b-arabic-12-2024",
|
||||
displayName: "Command R7B Arabic 02-2025",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Our state-of-the-art lightweight multilingual AI model has been optimized for advanced Arabic language capabilities to support enterprises in the MENA region.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "command-r7b-12-2024",
|
||||
displayName: "Command R7B 12-2024",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"The smallest model in our R series delivers top-tier speed, efficiency, and quality to build powerful AI applications on commodity GPUs and edge devices.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "command-r-plus-08-2024",
|
||||
displayName: "Command R+ 08-2024",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "command-r-08-2024",
|
||||
displayName: "Command R 08-2024",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Command R+ is a state-of-the-art RAG-optimized model designed to tackle enterprise-grade workloads.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "command-r-plus-04-2024",
|
||||
displayName: "Command R+ 04-2024",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Command R is a scalable generative model targeting RAG and Tool Use to enable production-scale AI for enterprise.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "command-r-03-2024",
|
||||
displayName: "Command R 03-2024",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Command R+ is a state-of-the-art RAG-optimized model designed to tackle enterprise-grade workloads.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "c4ai-aya-vision-32b",
|
||||
displayName: "C4AI Aya Vision 32B",
|
||||
contextLength: 16000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Aya Vision is a state-of-the-art multimodal and massively multilingual large language model excelling at critical benchmarks for language, text, and image capabilities.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "c4ai-aya-vision-8b",
|
||||
displayName: "C4AI Aya Vision 8B",
|
||||
contextLength: 16000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Aya Vision is a state-of-the-art multimodal and massively multilingual large language model excelling at critical benchmarks for language, text, and image capabilities.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "c4ai-aya-expanse-32b",
|
||||
displayName: "C4AI Aya Expanse 32B",
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Aya Expanse is a massively multilingual large language model excelling in enterprise-scale tasks.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "c4ai-aya-expanse-8b",
|
||||
displayName: "C4AI Aya Expanse 8B",
|
||||
contextLength: 8000,
|
||||
maxCompletionTokens: 4096,
|
||||
description:
|
||||
"Aya Expanse is a massively multilingual large language model excelling in enterprise-scale tasks.",
|
||||
recommendedFor: ["chat"],
|
||||
},
|
||||
{
|
||||
model: "embed-v4.0",
|
||||
displayName: "Embed v4.0",
|
||||
description:
|
||||
"A model that allows for text and images to be classified or turned into embeddings.",
|
||||
recommendedFor: ["embed"],
|
||||
contextLength: 128000,
|
||||
maxCompletionTokens: 4000,
|
||||
},
|
||||
{
|
||||
model: "embed-english-v3.0",
|
||||
displayName: "Embed English 3.0",
|
||||
// recommendedFor: ["embed"],
|
||||
displayName: "Embed English v3.0",
|
||||
description:
|
||||
"A model that allows for text to be classified or turned into embeddings. English only.",
|
||||
recommendedFor: ["embed"],
|
||||
contextLength: 512,
|
||||
},
|
||||
{
|
||||
model: "embed-english-light-v3.0",
|
||||
displayName: "Embed English Light v3.0",
|
||||
description:
|
||||
"A smaller, faster version of embed-english-v3.0. Almost as capable, but a lot faster. English only.",
|
||||
recommendedFor: ["embed"],
|
||||
contextLength: 512,
|
||||
},
|
||||
{
|
||||
model: "embed-multilingual-v3.0",
|
||||
displayName: "Embed Multilingual v3.0",
|
||||
description:
|
||||
"Provides multilingual classification and embedding support.",
|
||||
recommendedFor: ["embed"],
|
||||
contextLength: 512,
|
||||
},
|
||||
{
|
||||
model: "embed-multilingual-light-v3.0",
|
||||
displayName: "Embed Multilingual Light v3.0",
|
||||
description:
|
||||
"A smaller, faster version of embed-multilingual-v3.0. Almost as capable, but a lot faster.",
|
||||
recommendedFor: ["embed"],
|
||||
contextLength: 512,
|
||||
},
|
||||
{
|
||||
model: "rerank-v3.5",
|
||||
displayName: "Rerank v3.5",
|
||||
description:
|
||||
"A model for documents and semi-structured data (JSON). State-of-the-art performance in English and non-English languages.",
|
||||
recommendedFor: ["rerank"],
|
||||
contextLength: 4096,
|
||||
},
|
||||
{
|
||||
model: "rerank-english-v3.0",
|
||||
displayName: "Rerank English 3.0",
|
||||
// recommendedFor: ["rerank"],
|
||||
contextLength: 4000,
|
||||
displayName: "Rerank English v3.0",
|
||||
description:
|
||||
"A model that allows for re-ranking English Language documents and semi-structured data (JSON).",
|
||||
recommendedFor: ["rerank"],
|
||||
contextLength: 4096,
|
||||
},
|
||||
{
|
||||
model: "rerank-multilingual-v3.0",
|
||||
displayName: "Rerank Multilingual v3.0",
|
||||
description:
|
||||
"A model for documents and semi-structure data (JSON) that are not in English.",
|
||||
recommendedFor: ["rerank"],
|
||||
contextLength: 4096,
|
||||
},
|
||||
],
|
||||
id: "cohere",
|
||||
|
|
|
@ -85,7 +85,7 @@ const TESTS: Omit<ModelConfig & { options?: TestConfigOptions }, "name">[] = [
|
|||
},
|
||||
// {
|
||||
// provider: "cohere",
|
||||
// model: "embed-english-v3.0",
|
||||
// model: "embed-v4.0",
|
||||
// apiKey: process.env.COHERE_API_KEY!,
|
||||
// roles: ["embed"],
|
||||
// },
|
||||
|
@ -103,7 +103,7 @@ const TESTS: Omit<ModelConfig & { options?: TestConfigOptions }, "name">[] = [
|
|||
},
|
||||
// {
|
||||
// provider: "cohere",
|
||||
// model: "rerank-english-v3.0",
|
||||
// model: "rerank-v3.5",
|
||||
// apiKey: process.env.COHERE_API_KEY!,
|
||||
// roles: ["rerank"],
|
||||
// },
|
||||
|
|
Loading…
Reference in New Issue