Merge remote-tracking branch 'upstream/preview' into AddDocIndexingMaxDepth
This commit is contained in:
commit
78ef735634
|
@ -49,10 +49,13 @@
|
|||
"args": [
|
||||
// Pass a directory to manually test in
|
||||
"${workspaceFolder}/extensions/vscode",
|
||||
"${workspaceFolder}/extensions/vscode/manual-testing-sandbox/example.ts",
|
||||
"${workspaceFolder}/extensions/vscode/manual-testing-sandbox/test.js",
|
||||
"--extensionDevelopmentPath=${workspaceFolder}/extensions/vscode"
|
||||
],
|
||||
"outFiles": ["${workspaceFolder}/extensions/vscode/out/**/*.js"],
|
||||
"outFiles": [
|
||||
"${workspaceFolder}/extensions/vscode/out/**/*.js",
|
||||
"/Users/natesesti/.continue/config.ts"
|
||||
],
|
||||
"preLaunchTask": "vscode-extension:build"
|
||||
},
|
||||
// Has to be run after starting the server (separately or using the compound configuration)
|
||||
|
|
|
@ -58,15 +58,14 @@ Continue is continuously improving, but a feature isn't complete until it is ref
|
|||
|
||||
#### Pre-requisites
|
||||
|
||||
- You should have Node.js version 20.11.0 (LTS) or higher installed. You can get it on [nodejs.org](https://nodejs.org/en/download) or, if you are using NVM (Node Version Manager), you can set the correct version of Node.js for this project by running the following command in the root of the project:
|
||||
- Continue uses `pnpm` to manage node_modules. You can install `pnpm` globally with `npm install -g pnpm`, or another method described in [their docs](https://pnpm.io/installation#using-npm).
|
||||
|
||||
#### VS Code
|
||||
You should have Node.js version 20.11.0 (LTS) or higher installed. You can get it on [nodejs.org](https://nodejs.org/en/download) or, if you are using NVM (Node Version Manager), you can set the correct version of Node.js for this project by running the following command in the root of the project:
|
||||
|
||||
```bash
|
||||
nvm use
|
||||
```
|
||||
|
||||
#### VS Code
|
||||
|
||||
1. Clone and open in VS Code the Continue repo `https://github.com/continuedev/continue`
|
||||
|
||||
2. Open the VS Code command pallet (`cmd/ctrl+shift+p`) and select `Tasks: Run Task` and then select `install-all-dependencies`
|
||||
|
@ -80,7 +79,7 @@ nvm use
|
|||
1. The new VS Code window with the extension is referred to as the _Host VS Code_
|
||||
2. The window you started debugging from is referred to as the _Main VS Code_
|
||||
|
||||
4. To package the extension, run `pnpm package` in the `extensions/vscode` directory. This will generate `extensions/vscode/build/continue-patch.vsix`, which you can install by right-clicking and selecting "Install Extension VSIX".
|
||||
4. To package the extension, run `npm package` in the `extensions/vscode` directory. This will generate `extensions/vscode/build/continue-patch.vsix`, which you can install by right-clicking and selecting "Install Extension VSIX".
|
||||
|
||||
> Note: Breakpoints can be used in both the `core` and `extensions/vscode` folders while debugging, but are not currently supported inside of `gui` code. Hot-reloading is enabled with Vite, so if you make any changes to the `gui`, they should be automatically reflected without rebuilding. Similarly, any changes to `core` or `extensions/vscode` will be automatically included by just reloading the _Host VS Code_ window with cmd/ctrl+shift+p "Reload Window".
|
||||
|
||||
|
|
|
@ -15,14 +15,17 @@ function execCmdSync(cmd) {
|
|||
}
|
||||
|
||||
const esbuildOutputFile = "out/index.js";
|
||||
const platforms = ["darwin", "linux", "win32"];
|
||||
const architectures = ["x64", "arm64"];
|
||||
let targets = platforms.flatMap((platform) =>
|
||||
architectures.map((arch) => `${platform}-${arch}`),
|
||||
);
|
||||
const targets = [
|
||||
"darwin-x64",
|
||||
"darwin-arm64",
|
||||
"linux-x64",
|
||||
"linux-arm64",
|
||||
"win32-x64",
|
||||
];
|
||||
|
||||
const assetBackups = [
|
||||
"node_modules/win-ca/lib/crypt32-ia32.node.bak",
|
||||
"node_modules/win-ca/lib/crypt32-x64.node.bak"
|
||||
"node_modules/win-ca/lib/crypt32-x64.node.bak",
|
||||
];
|
||||
|
||||
let esbuildOnly = false;
|
||||
|
@ -91,7 +94,7 @@ async function installNodeModuleInTempDirAndCopyToCurrent(package, toCopy) {
|
|||
});
|
||||
} finally {
|
||||
// Clean up the temporary directory
|
||||
rimrafSync(tempDir);
|
||||
// rimrafSync(tempDir);
|
||||
|
||||
// Return to the original directory
|
||||
process.chdir(currentDir);
|
||||
|
@ -161,7 +164,7 @@ async function installNodeModuleInTempDirAndCopyToCurrent(package, toCopy) {
|
|||
|
||||
// delete asset backups generated by previous pkg invocations, if present
|
||||
for (const assetPath of assetBackups) {
|
||||
fs.rmSync(assetPath, {force: true});
|
||||
fs.rmSync(assetPath, { force: true });
|
||||
}
|
||||
|
||||
console.log("[info] Building with esbuild...");
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -47,7 +47,7 @@
|
|||
"posthog-node": "^3.6.3",
|
||||
"system-ca": "^1.0.2",
|
||||
"uuid": "^9.0.1",
|
||||
"vectordb": "^0.4.10",
|
||||
"vectordb": "^0.4.20",
|
||||
"win-ca": "^3.5.1"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,30 @@ ollama run starcoder:3b
|
|||
|
||||
Once it has been downloaded, you should begin to see completions in VS Code.
|
||||
|
||||
## Setting up with LM Studio
|
||||
|
||||
You can also set up tab-autocomplete with a local LM Studio instance by following these steps:
|
||||
|
||||
1. Download the latest version of LM Studio from [here](https://lmstudio.ai/)
|
||||
2. Download a model (e.g. search for `second-state/StarCoder2-3B-GGUF` and choose one of the options there)
|
||||
3. Go to the server section (button is on the left), select your model from the dropdown at the top, and click "Start Server"
|
||||
4. Go to the "My Models" section (button is on the left), find your selected model, and copy the name the path (example: `second-state/StarCoder2-3B-GGUF/starcoder2-3b-Q8_0.gguf`); this will be used as the "model" attribute in Continue
|
||||
5. Go to Continue and modify the configurations for a [custom model](#setting-up-a-custom-model)
|
||||
6. Set the "provider" to `lmstudio` and the "model" to the path copied earlier
|
||||
|
||||
Example:
|
||||
|
||||
```json title=~/.continue/config.json
|
||||
{
|
||||
"tabAutocompleteModel": {
|
||||
"title": "Starcoder2 3b",
|
||||
"model": "second-state/StarCoder2-3B-GGUF/starcoder2-3b-Q8_0.gguf",
|
||||
"provider": "lmstudio",
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Setting up a custom model
|
||||
|
||||
All of the configuration options available for chat models are available to use for tab-autocomplete. For example, if you wanted to use a remote vLLM instance you would edit your `config.json` like this (note that it is not inside the models array), filling in the correct model name and vLLM endpoint:
|
||||
|
|
|
@ -1,65 +1,83 @@
|
|||
import { CustomCommand, SlashCommand, SlashCommandDescription } from "../index.js";
|
||||
import {
|
||||
CustomCommand,
|
||||
SlashCommand,
|
||||
SlashCommandDescription,
|
||||
} from "../index.js";
|
||||
import { stripImages } from "../llm/countTokens.js";
|
||||
import { renderTemplatedString } from "../llm/llms/index.js";
|
||||
import SlashCommands from "./slash/index.js";
|
||||
|
||||
export function slashFromCustomCommand(
|
||||
customCommand: CustomCommand,
|
||||
customCommand: CustomCommand,
|
||||
): SlashCommand {
|
||||
return {
|
||||
name: customCommand.name,
|
||||
description: customCommand.description,
|
||||
run: async function* ({ input, llm, history, ide }) {
|
||||
// Remove slash command prefix from input
|
||||
let userInput = input;
|
||||
if (userInput.startsWith(`/${customCommand.name}`)) {
|
||||
userInput = userInput
|
||||
.slice(customCommand.name.length + 1, userInput.length)
|
||||
.trimStart();
|
||||
}
|
||||
return {
|
||||
name: customCommand.name,
|
||||
description: customCommand.description,
|
||||
run: async function* ({ input, llm, history, ide }) {
|
||||
// Remove slash command prefix from input
|
||||
let userInput = input;
|
||||
if (userInput.startsWith(`/${customCommand.name}`)) {
|
||||
userInput = userInput
|
||||
.slice(customCommand.name.length + 1, userInput.length)
|
||||
.trimStart();
|
||||
}
|
||||
|
||||
// Render prompt template
|
||||
const promptUserInput = await renderTemplatedString(
|
||||
customCommand.prompt,
|
||||
ide.readFile.bind(ide),
|
||||
{ input: userInput },
|
||||
);
|
||||
// Render prompt template
|
||||
const promptUserInput = await renderTemplatedString(
|
||||
customCommand.prompt,
|
||||
ide.readFile.bind(ide),
|
||||
{ input: userInput },
|
||||
);
|
||||
|
||||
const messages = [...history];
|
||||
// Find the last chat message with this slash command and replace it with the user input
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
const {role, content} = messages[i];
|
||||
if (role !== "user") {
|
||||
continue;
|
||||
}
|
||||
const messages = [...history];
|
||||
// Find the last chat message with this slash command and replace it with the user input
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
const { role, content } = messages[i];
|
||||
if (role !== "user") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (Array.isArray(content) && content.some((part) => part.text?.startsWith(`/${customCommand.name}`))) {
|
||||
messages[i] = { ...messages[i], content: content.map((part) => {
|
||||
return part.text?.startsWith(`/${customCommand.name}`) ? {...part, text: promptUserInput } : part;
|
||||
}) };
|
||||
break;
|
||||
} else if (typeof content === "string" && content.startsWith(`/${customCommand.name}`)) {
|
||||
messages[i] = { ...messages[i], content: promptUserInput };
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (
|
||||
Array.isArray(content) &&
|
||||
content.some((part) =>
|
||||
part.text?.startsWith(`/${customCommand.name}`),
|
||||
)
|
||||
) {
|
||||
messages[i] = {
|
||||
...messages[i],
|
||||
content: content.map((part) => {
|
||||
return part.text?.startsWith(`/${customCommand.name}`)
|
||||
? { ...part, text: promptUserInput }
|
||||
: part;
|
||||
}),
|
||||
};
|
||||
break;
|
||||
} else if (
|
||||
typeof content === "string" &&
|
||||
content.startsWith(`/${customCommand.name}`)
|
||||
) {
|
||||
messages[i] = { ...messages[i], content: promptUserInput };
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for await (const chunk of llm.streamChat(messages)) {
|
||||
yield stripImages(chunk.content);
|
||||
}
|
||||
},
|
||||
};
|
||||
for await (const chunk of llm.streamChat(messages)) {
|
||||
yield stripImages(chunk.content);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function slashCommandFromDescription(
|
||||
desc: SlashCommandDescription,
|
||||
desc: SlashCommandDescription,
|
||||
): SlashCommand | undefined {
|
||||
const cmd = SlashCommands.find((cmd) => cmd.name === desc.name);
|
||||
if (!cmd) {
|
||||
return undefined;
|
||||
}
|
||||
return {
|
||||
...cmd,
|
||||
params: desc.params,
|
||||
};
|
||||
const cmd = SlashCommands.find((cmd) => cmd.name === desc.name);
|
||||
if (!cmd) {
|
||||
return undefined;
|
||||
}
|
||||
return {
|
||||
...cmd,
|
||||
params: desc.params,
|
||||
description: desc.description ?? cmd.description,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,9 +6,9 @@ import {
|
|||
export const defaultConfig: SerializedContinueConfig = {
|
||||
models: [
|
||||
{
|
||||
title: "GPT-4-Turbo (Free Trial)",
|
||||
title: "GPT-4o (Free Trial)",
|
||||
provider: "free-trial",
|
||||
model: "gpt-4-turbo",
|
||||
model: "gpt-4o",
|
||||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses.",
|
||||
},
|
||||
|
@ -19,13 +19,6 @@ export const defaultConfig: SerializedContinueConfig = {
|
|||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses. Whenever you write a code block you include the language after the opening ticks.",
|
||||
},
|
||||
{
|
||||
title: "GPT-4o (Free Trial)",
|
||||
provider: "free-trial",
|
||||
model: "gpt-4o",
|
||||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses.",
|
||||
},
|
||||
{
|
||||
title: "Claude 3 Sonnet (Free Trial)",
|
||||
provider: "free-trial",
|
||||
|
@ -50,9 +43,9 @@ export const defaultConfig: SerializedContinueConfig = {
|
|||
export const defaultConfigJetBrains: SerializedContinueConfig = {
|
||||
models: [
|
||||
{
|
||||
title: "GPT-4-Turbo (Free Trial)",
|
||||
title: "GPT-4o (Free Trial)",
|
||||
provider: "free-trial",
|
||||
model: "gpt-4-turbo",
|
||||
model: "gpt-4o",
|
||||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses.",
|
||||
},
|
||||
|
@ -63,13 +56,6 @@ export const defaultConfigJetBrains: SerializedContinueConfig = {
|
|||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses. Whenever you write a code block you include the language after the opening ticks.",
|
||||
},
|
||||
{
|
||||
title: "GPT-4o (Free Trial)",
|
||||
provider: "free-trial",
|
||||
model: "gpt-4o",
|
||||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses.",
|
||||
},
|
||||
{
|
||||
title: "Claude 3 Sonnet (Free Trial)",
|
||||
provider: "free-trial",
|
||||
|
@ -129,7 +115,6 @@ export const defaultContextProvidersVsCode: ContextProviderWithParams[] = [
|
|||
{ name: "code", params: {} },
|
||||
{ name: "docs", params: {} },
|
||||
{ name: "diff", params: {} },
|
||||
{ name: "open", params: {} },
|
||||
{ name: "terminal", params: {} },
|
||||
{ name: "problems", params: {} },
|
||||
{ name: "folder", params: {} },
|
||||
|
@ -137,5 +122,7 @@ export const defaultContextProvidersVsCode: ContextProviderWithParams[] = [
|
|||
];
|
||||
|
||||
export const defaultContextProvidersJetBrains: ContextProviderWithParams[] = [
|
||||
{ name: "open", params: {} },
|
||||
{ name: "diff", params: {} },
|
||||
{ name: "folder", params: {} },
|
||||
{ name: "codebase", params: {} },
|
||||
];
|
||||
|
|
|
@ -64,7 +64,7 @@ export class ConfigHandler {
|
|||
const uniqueId = await this.ide.getUniqueId();
|
||||
|
||||
this.savedConfig = await loadFullConfigNode(
|
||||
this.ide.readFile.bind(this.ide),
|
||||
this.ide,
|
||||
workspaceConfigs,
|
||||
this.ideSettings,
|
||||
ideInfo.ideType,
|
||||
|
|
|
@ -19,6 +19,7 @@ import {
|
|||
CustomLLM,
|
||||
EmbeddingsProviderDescription,
|
||||
IContextProvider,
|
||||
IDE,
|
||||
IdeType,
|
||||
ModelDescription,
|
||||
Reranker,
|
||||
|
@ -49,6 +50,7 @@ import {
|
|||
defaultSlashCommandsJetBrains,
|
||||
defaultSlashCommandsVscode,
|
||||
} from "./default.js";
|
||||
import { getPromptFiles, slashCommandFromPromptFile } from "./promptFile.js";
|
||||
const { execSync } = require("child_process");
|
||||
|
||||
function resolveSerializedConfig(filepath: string): SerializedContinueConfig {
|
||||
|
@ -130,9 +132,10 @@ function loadSerializedConfig(
|
|||
return config;
|
||||
}
|
||||
|
||||
function serializedToIntermediateConfig(
|
||||
async function serializedToIntermediateConfig(
|
||||
initial: SerializedContinueConfig,
|
||||
): Config {
|
||||
ide: IDE,
|
||||
): Promise<Config> {
|
||||
const slashCommands: SlashCommand[] = [];
|
||||
for (const command of initial.slashCommands || []) {
|
||||
const newCommand = slashCommandFromDescription(command);
|
||||
|
@ -144,6 +147,20 @@ function serializedToIntermediateConfig(
|
|||
slashCommands.push(slashFromCustomCommand(command));
|
||||
}
|
||||
|
||||
const workspaceDirs = await ide.getWorkspaceDirs();
|
||||
const promptFiles = (
|
||||
await Promise.all(
|
||||
workspaceDirs.map((dir) =>
|
||||
getPromptFiles(ide, path.join(dir, ".prompts")),
|
||||
),
|
||||
)
|
||||
)
|
||||
.flat()
|
||||
.filter(({ path }) => path.endsWith(".prompt"));
|
||||
for (const file of promptFiles) {
|
||||
slashCommands.push(slashCommandFromPromptFile(file.path, file.content));
|
||||
}
|
||||
|
||||
const config: Config = {
|
||||
...initial,
|
||||
slashCommands,
|
||||
|
@ -168,7 +185,7 @@ function isContextProviderWithParams(
|
|||
/** Only difference between intermediate and final configs is the `models` array */
|
||||
async function intermediateToFinalConfig(
|
||||
config: Config,
|
||||
readFile: (filepath: string) => Promise<string>,
|
||||
ide: IDE,
|
||||
ideSettings: IdeSettings,
|
||||
uniqueId: string,
|
||||
writeLog: (log: string) => Promise<void>,
|
||||
|
@ -179,7 +196,7 @@ async function intermediateToFinalConfig(
|
|||
if (isModelDescription(desc)) {
|
||||
const llm = await llmFromDescription(
|
||||
desc,
|
||||
readFile,
|
||||
ide.readFile.bind(ide),
|
||||
uniqueId,
|
||||
ideSettings,
|
||||
writeLog,
|
||||
|
@ -201,7 +218,7 @@ async function intermediateToFinalConfig(
|
|||
model: modelName,
|
||||
title: llm.title + " - " + modelName,
|
||||
},
|
||||
readFile,
|
||||
ide.readFile.bind(ide),
|
||||
uniqueId,
|
||||
ideSettings,
|
||||
writeLog,
|
||||
|
@ -261,7 +278,7 @@ async function intermediateToFinalConfig(
|
|||
if (isModelDescription(config.tabAutocompleteModel)) {
|
||||
autocompleteLlm = await llmFromDescription(
|
||||
config.tabAutocompleteModel,
|
||||
readFile,
|
||||
ide.readFile.bind(ide),
|
||||
uniqueId,
|
||||
ideSettings,
|
||||
writeLog,
|
||||
|
@ -451,7 +468,7 @@ async function buildConfigTs() {
|
|||
}
|
||||
|
||||
async function loadFullConfigNode(
|
||||
readFile: (filepath: string) => Promise<string>,
|
||||
ide: IDE,
|
||||
workspaceConfigs: ContinueRcJson[],
|
||||
ideSettings: IdeSettings,
|
||||
ideType: IdeType,
|
||||
|
@ -459,7 +476,7 @@ async function loadFullConfigNode(
|
|||
writeLog: (log: string) => Promise<void>,
|
||||
): Promise<ContinueConfig> {
|
||||
let serialized = loadSerializedConfig(workspaceConfigs, ideSettings, ideType);
|
||||
let intermediate = serializedToIntermediateConfig(serialized);
|
||||
let intermediate = await serializedToIntermediateConfig(serialized, ide);
|
||||
|
||||
const configJsContents = await buildConfigTs();
|
||||
if (configJsContents) {
|
||||
|
@ -496,7 +513,7 @@ async function loadFullConfigNode(
|
|||
|
||||
const finalConfig = await intermediateToFinalConfig(
|
||||
intermediate,
|
||||
readFile,
|
||||
ide,
|
||||
ideSettings,
|
||||
uniqueId,
|
||||
writeLog,
|
||||
|
|
|
@ -7,9 +7,9 @@ export function setupOptimizedMode(
|
|||
...config,
|
||||
models: [
|
||||
{
|
||||
title: "GPT-4-Turbo (Free Trial)",
|
||||
title: "GPT-4o (Free Trial)",
|
||||
provider: "free-trial",
|
||||
model: "gpt-4-turbo",
|
||||
model: "gpt-4o",
|
||||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses.",
|
||||
},
|
||||
|
@ -20,13 +20,6 @@ export function setupOptimizedMode(
|
|||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses. Whenever you write a code block you include the language after the opening ticks.",
|
||||
},
|
||||
{
|
||||
title: "GPT-4o (Free Trial)",
|
||||
provider: "free-trial",
|
||||
model: "gpt-4o",
|
||||
systemMessage:
|
||||
"You are an expert software developer. You give helpful and concise responses.",
|
||||
},
|
||||
{
|
||||
title: "Claude 3 Sonnet (Free Trial)",
|
||||
provider: "free-trial",
|
||||
|
@ -82,6 +75,7 @@ export function setupLocalMode(
|
|||
provider: "ollama",
|
||||
model: "AUTODETECT",
|
||||
},
|
||||
...config.models.filter((model) => model.provider !== "free-trial"),
|
||||
],
|
||||
tabAutocompleteModel: {
|
||||
title: "Starcoder 3b",
|
||||
|
@ -94,3 +88,24 @@ export function setupLocalMode(
|
|||
reranker: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
export function setupLocalAfterFreeTrial(
|
||||
config: SerializedContinueConfig,
|
||||
): SerializedContinueConfig {
|
||||
return {
|
||||
...config,
|
||||
models: [
|
||||
{
|
||||
title: "Llama 3",
|
||||
provider: "ollama",
|
||||
model: "llama3",
|
||||
},
|
||||
{
|
||||
title: "Ollama",
|
||||
provider: "ollama",
|
||||
model: "AUTODETECT",
|
||||
},
|
||||
...config.models.filter((model) => model.provider !== "free-trial"),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
import * as YAML from "yaml";
|
||||
import { IDE, SlashCommand } from "..";
|
||||
import { stripImages } from "../llm/countTokens";
|
||||
import { renderTemplatedString } from "../llm/llms";
|
||||
import { getBasename } from "../util";
|
||||
|
||||
export async function getPromptFiles(
|
||||
ide: IDE,
|
||||
dir: string,
|
||||
): Promise<{ path: string; content: string }[]> {
|
||||
try {
|
||||
const paths = await ide.listWorkspaceContents(dir);
|
||||
const results = paths.map(async (path) => {
|
||||
const content = await ide.readFile(path);
|
||||
return { path, content };
|
||||
});
|
||||
return Promise.all(results);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
export function slashCommandFromPromptFile(
|
||||
path: string,
|
||||
content: string,
|
||||
): SlashCommand {
|
||||
let [preambleRaw, prompt] = content.split("\n---\n");
|
||||
if (prompt === undefined) {
|
||||
prompt = preambleRaw;
|
||||
preambleRaw = "";
|
||||
}
|
||||
|
||||
const preamble = YAML.parse(preambleRaw) ?? {};
|
||||
const name = preamble.name ?? getBasename(path).split(".prompt")[0];
|
||||
const description = preamble.description ?? name;
|
||||
|
||||
let systemMessage: string | undefined = undefined;
|
||||
if (prompt.includes("<system>")) {
|
||||
systemMessage = prompt.split("<system>")[1].split("</system>")[0].trim();
|
||||
prompt = prompt.split("</system>")[1].trim();
|
||||
}
|
||||
|
||||
return {
|
||||
name,
|
||||
description,
|
||||
run: async function* ({ input, llm, history, ide }) {
|
||||
// Remove slash command prefix from input
|
||||
let userInput = input;
|
||||
if (userInput.startsWith(`/${name}`)) {
|
||||
userInput = userInput
|
||||
.slice(name.length + 1, userInput.length)
|
||||
.trimStart();
|
||||
}
|
||||
|
||||
// Render prompt template
|
||||
const diff = await ide.getDiff();
|
||||
const promptUserInput = await renderTemplatedString(
|
||||
prompt,
|
||||
ide.readFile.bind(ide),
|
||||
{ input: userInput, diff },
|
||||
);
|
||||
|
||||
const messages = [...history];
|
||||
// Find the last chat message with this slash command and replace it with the user input
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
const { role, content } = messages[i];
|
||||
if (role !== "user") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (
|
||||
Array.isArray(content) &&
|
||||
content.some((part) => part.text?.startsWith(`/${name}`))
|
||||
) {
|
||||
messages[i] = {
|
||||
...messages[i],
|
||||
content: content.map((part) => {
|
||||
return part.text?.startsWith(`/${name}`)
|
||||
? { ...part, text: promptUserInput }
|
||||
: part;
|
||||
}),
|
||||
};
|
||||
break;
|
||||
} else if (
|
||||
typeof content === "string" &&
|
||||
content.startsWith(`/${name}`)
|
||||
) {
|
||||
messages[i] = { ...messages[i], content: promptUserInput };
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// System message
|
||||
if (systemMessage) {
|
||||
if (messages[0]?.role === "system") {
|
||||
messages[0].content = systemMessage;
|
||||
} else {
|
||||
messages.unshift({ role: "system", content: systemMessage });
|
||||
}
|
||||
}
|
||||
|
||||
for await (const chunk of llm.streamChat(messages)) {
|
||||
yield stripImages(chunk.content);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
|
@ -32,13 +32,18 @@ export function addModel(model: ModelDescription) {
|
|||
|
||||
export function addOpenAIKey(key: string) {
|
||||
editConfigJson((config) => {
|
||||
config.models = config.models.map((m: ModelDescription) => {
|
||||
if (m.provider === "free-trial") {
|
||||
m.apiKey = key;
|
||||
m.provider = "openai";
|
||||
}
|
||||
return m;
|
||||
});
|
||||
config.models = config.models
|
||||
.filter(
|
||||
(model) =>
|
||||
model.provider !== "free-trial" || model.model.startsWith("gpt"),
|
||||
)
|
||||
.map((m: ModelDescription) => {
|
||||
if (m.provider === "free-trial") {
|
||||
m.apiKey = key;
|
||||
m.provider = "openai";
|
||||
}
|
||||
return m;
|
||||
});
|
||||
return config;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ class DocsContextProvider extends BaseContextProvider {
|
|||
return 1;
|
||||
} else {
|
||||
// Secondary criterion: Alphabetical order when both items are in the same category
|
||||
return a.title.toString().localeCompare(b.title.toString());
|
||||
return a.title.toString().localeCompare(b.title.toString());
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import fetch from "node-fetch";
|
||||
import { Chunk, Reranker } from "../../index.js";
|
||||
import { getHeaders } from "../../continueServer/stubs/headers.js";
|
||||
import { Chunk, Reranker } from "../../index.js";
|
||||
import { SERVER_URL } from "../../util/parameters.js";
|
||||
|
||||
export class FreeTrialReranker implements Reranker {
|
||||
|
@ -11,7 +11,7 @@ export class FreeTrialReranker implements Reranker {
|
|||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
...getHeaders(),
|
||||
...(await getHeaders()),
|
||||
},
|
||||
body: JSON.stringify({
|
||||
query,
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
export function getHeaders() {
|
||||
export async function getHeaders() {
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -577,6 +577,7 @@ export type ModelName =
|
|||
// Gemini
|
||||
| "gemini-pro"
|
||||
| "gemini-1.5-pro-latest"
|
||||
| "gemini-1.5-flash-latest"
|
||||
// Mistral
|
||||
| "mistral-tiny"
|
||||
| "mistral-small"
|
||||
|
|
|
@ -321,6 +321,7 @@ export class LanceDbIndex implements CodebaseIndex {
|
|||
const tableName = this.tableNameForTag(tag);
|
||||
const tableNames = await db.tableNames();
|
||||
if (!tableNames.includes(tableName)) {
|
||||
console.warn("Table not found in LanceDB", tableName);
|
||||
return [];
|
||||
}
|
||||
|
||||
|
|
|
@ -227,6 +227,16 @@ const configs: SiteIndexingConfig[] = [
|
|||
startUrl: "https://doc.qt.io/qtforpython-6/quickstart.html",
|
||||
rootUrl: "https://doc.qt.io/qtforpython-6/api.html",
|
||||
},
|
||||
{
|
||||
title: "Bootstrap",
|
||||
startUrl: "https://getbootstrap.com/docs/5.3/getting-started/introduction/",
|
||||
rootUrl: "https://getbootstrap.com/docs/5.3/",
|
||||
},
|
||||
{
|
||||
title: "Alpine.js",
|
||||
startUrl: "https://alpinejs.dev/start-here",
|
||||
rootUrl: "https://alpinejs.dev/",
|
||||
},
|
||||
];
|
||||
|
||||
export default configs;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import { Response } from "node-fetch";
|
||||
import { EmbedOptions } from "../../index.js";
|
||||
import { getHeaders } from "../../continueServer/stubs/headers.js";
|
||||
import { EmbedOptions } from "../../index.js";
|
||||
import { SERVER_URL } from "../../util/parameters.js";
|
||||
import { withExponentialBackoff } from "../../util/withExponentialBackoff.js";
|
||||
import BaseEmbeddingsProvider from "./BaseEmbeddingsProvider.js";
|
||||
|
@ -30,7 +30,7 @@ class FreeTrialEmbeddingsProvider extends BaseEmbeddingsProvider {
|
|||
await Promise.all(
|
||||
batchedChunks.map(async (batch) => {
|
||||
const fetchWithBackoff = () =>
|
||||
withExponentialBackoff<Response>(() =>
|
||||
withExponentialBackoff<Response>(async () =>
|
||||
this.fetch(new URL("embeddings", SERVER_URL), {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
|
@ -39,7 +39,7 @@ class FreeTrialEmbeddingsProvider extends BaseEmbeddingsProvider {
|
|||
}),
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
...getHeaders(),
|
||||
...(await getHeaders()),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// prettier-ignore
|
||||
// @ts-ignore
|
||||
import { PipelineType, env, pipeline } from "../../vendor/modules/@xenova/transformers/src/transformers.js";
|
||||
|
||||
|
@ -6,10 +7,7 @@ import BaseEmbeddingsProvider from "./BaseEmbeddingsProvider.js";
|
|||
|
||||
env.allowLocalModels = true;
|
||||
env.allowRemoteModels = false;
|
||||
if (typeof window === "undefined") {
|
||||
// The embeddings provider should just never be called in the browser
|
||||
env.localModelPath = path.join(__dirname, "..", "models");
|
||||
}
|
||||
env.localModelPath = path.join(__dirname, "..", "models");
|
||||
|
||||
class EmbeddingsPipeline {
|
||||
static task: PipelineType = "feature-extraction";
|
||||
|
@ -31,7 +29,7 @@ export class TransformersJsEmbeddingsProvider extends BaseEmbeddingsProvider {
|
|||
constructor(modelPath?: string) {
|
||||
super({ model: "all-MiniLM-L2-v6" }, () => Promise.resolve(null));
|
||||
if (modelPath) {
|
||||
env.localModelPath = modelPath;
|
||||
// env.localModelPath = modelPath;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ const MODEL_SUPPORTS_IMAGES: string[] = [
|
|||
"claude-3",
|
||||
"gemini-ultra",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"sonnet",
|
||||
"opus",
|
||||
"haiku",
|
||||
|
|
|
@ -57,7 +57,7 @@ function countTokens(
|
|||
: encoding.encode(part.text ?? "", "all", []).length;
|
||||
}, 0);
|
||||
} else {
|
||||
return encoding.encode(content, "all", []).length;
|
||||
return encoding.encode(content ?? "", "all", []).length;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -217,14 +217,16 @@ ${settings}
|
|||
${prompt}`;
|
||||
}
|
||||
|
||||
private _logTokensGenerated(model: string, completion: string) {
|
||||
let tokens = this.countTokens(completion);
|
||||
private _logTokensGenerated(model: string, prompt: string, completion: string) {
|
||||
let promptTokens = this.countTokens(prompt);
|
||||
let generatedTokens = this.countTokens(completion);
|
||||
Telemetry.capture("tokens_generated", {
|
||||
model: model,
|
||||
provider: this.providerName,
|
||||
tokens: tokens,
|
||||
promptTokens: promptTokens,
|
||||
generatedTokens: generatedTokens,
|
||||
});
|
||||
DevDataSqliteDb.logTokensGenerated(model, this.providerName, tokens);
|
||||
DevDataSqliteDb.logTokensGenerated(model, this.providerName, promptTokens, generatedTokens);
|
||||
}
|
||||
|
||||
fetch(url: RequestInfo | URL, init?: RequestInit): Promise<Response> {
|
||||
|
@ -335,7 +337,7 @@ ${prompt}`;
|
|||
yield chunk;
|
||||
}
|
||||
|
||||
this._logTokensGenerated(completionOptions.model, completion);
|
||||
this._logTokensGenerated(completionOptions.model, prompt, completion);
|
||||
|
||||
if (log && this.writeLog) {
|
||||
await this.writeLog(`Completion:\n\n${completion}\n\n`);
|
||||
|
@ -370,7 +372,7 @@ ${prompt}`;
|
|||
|
||||
const completion = await this._complete(prompt, completionOptions);
|
||||
|
||||
this._logTokensGenerated(completionOptions.model, completion);
|
||||
this._logTokensGenerated(completionOptions.model, prompt, completion);
|
||||
if (log && this.writeLog) {
|
||||
await this.writeLog(`Completion:\n\n${completion}\n\n`);
|
||||
}
|
||||
|
@ -432,7 +434,7 @@ ${prompt}`;
|
|||
throw error;
|
||||
}
|
||||
|
||||
this._logTokensGenerated(completionOptions.model, completion);
|
||||
this._logTokensGenerated(completionOptions.model, prompt, completion);
|
||||
if (log && this.writeLog) {
|
||||
await this.writeLog(`Completion:\n\n${completion}\n\n`);
|
||||
}
|
||||
|
@ -531,3 +533,4 @@ ${prompt}`;
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
import {
|
||||
BedrockRuntimeClient,
|
||||
InvokeModelWithResponseStreamCommand,
|
||||
} from "@aws-sdk/client-bedrock-runtime";
|
||||
import * as fs from "fs";
|
||||
import os from "os";
|
||||
import { join as joinPath } from "path";
|
||||
import { promisify } from "util";
|
||||
import { BaseLLM } from "../index.js";
|
||||
import {
|
||||
ChatMessage,
|
||||
CompletionOptions,
|
||||
|
@ -10,13 +13,14 @@ import {
|
|||
ModelProvider,
|
||||
} from "../../index.js";
|
||||
import { stripImages } from "../countTokens.js";
|
||||
import { BaseLLM } from "../index.js";
|
||||
|
||||
const aws4 = require("aws4");
|
||||
const readFile = promisify(fs.readFile);
|
||||
|
||||
namespace BedrockCommon {
|
||||
export enum Method {
|
||||
Chat = "invoke",
|
||||
Chat = "invoke-with-response-stream",
|
||||
Completion = "invoke-with-response-stream",
|
||||
}
|
||||
export const Service: string = "bedrock";
|
||||
|
@ -126,10 +130,7 @@ class Bedrock extends BaseLLM {
|
|||
const path = `/model/${model}/${apiMethod}`;
|
||||
const opts = {
|
||||
headers: {
|
||||
accept:
|
||||
apiMethod === BedrockCommon.Method.Chat
|
||||
? "application/json"
|
||||
: "application/vnd.amazon.eventstream",
|
||||
accept: "application/vnd.amazon.eventstream",
|
||||
"content-type": "application/json",
|
||||
"x-amzn-bedrock-accept": "*/*",
|
||||
},
|
||||
|
@ -147,10 +148,11 @@ class Bedrock extends BaseLLM {
|
|||
joinPath(process.env.HOME ?? os.homedir(), ".aws", "credentials"),
|
||||
"utf8",
|
||||
);
|
||||
const credentials = this._parseCredentialsFile(data);
|
||||
accessKeyId = credentials.bedrock.accessKeyId;
|
||||
secretAccessKey = credentials.bedrock.secretAccessKey;
|
||||
sessionToken = credentials.bedrock.sessionToken || "";
|
||||
const credentialsFile = this._parseCredentialsFile(data);
|
||||
const credentials = credentialsFile.bedrock ?? credentialsFile.default;
|
||||
accessKeyId = credentials.accessKeyId;
|
||||
secretAccessKey = credentials.secretAccessKey;
|
||||
sessionToken = credentials.sessionToken || "";
|
||||
} catch (err) {
|
||||
console.error("Error reading AWS credentials", err);
|
||||
return new Response("403");
|
||||
|
@ -172,39 +174,56 @@ class Bedrock extends BaseLLM {
|
|||
for await (const update of this._streamChat(messages, options)) {
|
||||
yield stripImages(update.content);
|
||||
}
|
||||
// TODO: Couldn't seem to get this stream API working yet. Deferring to _streamChat.
|
||||
// import { streamSse } from "../stream";
|
||||
// const response = await this._fetchWithAwsAuthSigV4(BedrockCommon.Method.Completion, JSON.stringify({
|
||||
// ...this._convertArgs(options),
|
||||
// max_tokens: undefined, // Delete this key in favor of the correct one for the Completions API.
|
||||
// max_tokens_to_sample: options.maxTokens,
|
||||
// prompt: `\n\nHuman: ${prompt}\n\nAssistant:`,
|
||||
// })
|
||||
// );
|
||||
// for await (const value of streamSse(response)) {
|
||||
// if (value.completion) {
|
||||
// yield value.completion
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
protected async *_streamChat(
|
||||
messages: ChatMessage[],
|
||||
options: CompletionOptions,
|
||||
): AsyncGenerator<ChatMessage> {
|
||||
const response = await this._fetchWithAwsAuthSigV4(
|
||||
BedrockCommon.Method.Chat,
|
||||
JSON.stringify({
|
||||
...this._convertArgs(options),
|
||||
messages: this._convertMessages(messages),
|
||||
anthropic_version: "bedrock-2023-05-31", // Fixed, required parameter for Chat API.
|
||||
}),
|
||||
this._convertModelName(options.model),
|
||||
const data = await readFile(
|
||||
joinPath(process.env.HOME ?? os.homedir(), ".aws", "credentials"),
|
||||
"utf8",
|
||||
);
|
||||
yield {
|
||||
role: "assistant",
|
||||
content: (await response.json()).content[0].text,
|
||||
};
|
||||
const credentialsFile = this._parseCredentialsFile(data);
|
||||
const credentials = credentialsFile.bedrock ?? credentialsFile.default;
|
||||
const accessKeyId = credentials.accessKeyId;
|
||||
const secretAccessKey = credentials.secretAccessKey;
|
||||
const sessionToken = credentials.sessionToken || "";
|
||||
const client = new BedrockRuntimeClient({
|
||||
region: this.region,
|
||||
credentials: {
|
||||
accessKeyId: accessKeyId,
|
||||
secretAccessKey: secretAccessKey,
|
||||
sessionToken: sessionToken,
|
||||
},
|
||||
});
|
||||
const command = new InvokeModelWithResponseStreamCommand({
|
||||
body: new TextEncoder().encode(
|
||||
JSON.stringify({
|
||||
anthropic_version: "bedrock-2023-05-31",
|
||||
max_tokens: options.maxTokens,
|
||||
system: this.systemMessage,
|
||||
messages: this._convertMessages(messages),
|
||||
temperature: options.temperature,
|
||||
top_p: options.topP,
|
||||
top_k: options.topK,
|
||||
stop_sequences: options.stop,
|
||||
}),
|
||||
),
|
||||
contentType: "application/json",
|
||||
modelId: options.model,
|
||||
});
|
||||
const response = await client.send(command);
|
||||
if (response.body) {
|
||||
for await (const value of response.body) {
|
||||
const binaryChunk = value.chunk?.bytes;
|
||||
const textChunk = new TextDecoder().decode(binaryChunk);
|
||||
const chunk = JSON.parse(textChunk).delta?.text;
|
||||
if (chunk) {
|
||||
yield { role: "assistant", content: chunk };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,20 +1,38 @@
|
|||
import { getHeaders } from "../../continueServer/stubs/headers.js";
|
||||
import { ChatMessage, CompletionOptions, ModelProvider } from "../../index.js";
|
||||
import { SERVER_URL } from "../../util/parameters.js";
|
||||
import { Telemetry } from "../../util/posthog.js";
|
||||
import { BaseLLM } from "../index.js";
|
||||
import { streamResponse } from "../stream.js";
|
||||
|
||||
class FreeTrial extends BaseLLM {
|
||||
static providerName: ModelProvider = "free-trial";
|
||||
|
||||
private _getHeaders() {
|
||||
private async _getHeaders() {
|
||||
return {
|
||||
uniqueId: this.uniqueId || "None",
|
||||
extensionVersion: Telemetry.extensionVersion ?? "Unknown",
|
||||
os: Telemetry.os ?? "Unknown",
|
||||
"Content-Type": "application/json",
|
||||
...getHeaders(),
|
||||
...(await getHeaders()),
|
||||
};
|
||||
}
|
||||
|
||||
private async _countTokens(prompt: string, model: string, isPrompt: boolean) {
|
||||
if (!Telemetry.client) {
|
||||
throw new Error(
|
||||
'In order to use the free trial, telemetry must be enabled so that we can monitor abuse. To enable telemetry, set "allowAnonymousTelemetry": true in config.json and make sure the box is checked in IDE settings. If you use your own model (local or API key), telemetry will never be required.',
|
||||
);
|
||||
}
|
||||
const event = isPrompt
|
||||
? "free_trial_prompt_tokens"
|
||||
: "free_trial_completion_tokens";
|
||||
Telemetry.capture(event, {
|
||||
tokens: this.countTokens(prompt),
|
||||
model,
|
||||
});
|
||||
}
|
||||
|
||||
private _convertArgs(options: CompletionOptions): any {
|
||||
return {
|
||||
model: options.model,
|
||||
|
@ -36,18 +54,23 @@ class FreeTrial extends BaseLLM {
|
|||
): AsyncGenerator<string> {
|
||||
const args = this._convertArgs(this.collectArgs(options));
|
||||
|
||||
await this._countTokens(prompt, args.model, true);
|
||||
|
||||
const response = await this.fetch(`${SERVER_URL}/stream_complete`, {
|
||||
method: "POST",
|
||||
headers: this._getHeaders(),
|
||||
headers: await this._getHeaders(),
|
||||
body: JSON.stringify({
|
||||
prompt,
|
||||
...args,
|
||||
}),
|
||||
});
|
||||
|
||||
let completion = "";
|
||||
for await (const value of streamResponse(response)) {
|
||||
yield value;
|
||||
completion += value;
|
||||
}
|
||||
this._countTokens(completion, args.model, false);
|
||||
}
|
||||
|
||||
protected _convertMessage(message: ChatMessage) {
|
||||
|
@ -74,21 +97,30 @@ class FreeTrial extends BaseLLM {
|
|||
): AsyncGenerator<ChatMessage> {
|
||||
const args = this._convertArgs(this.collectArgs(options));
|
||||
|
||||
await this._countTokens(
|
||||
messages.map((m) => m.content).join("\n"),
|
||||
args.model,
|
||||
true,
|
||||
);
|
||||
|
||||
const response = await this.fetch(`${SERVER_URL}/stream_chat`, {
|
||||
method: "POST",
|
||||
headers: this._getHeaders(),
|
||||
headers: await this._getHeaders(),
|
||||
body: JSON.stringify({
|
||||
messages: messages.map(this._convertMessage),
|
||||
...args,
|
||||
}),
|
||||
});
|
||||
|
||||
let completion = "";
|
||||
for await (const chunk of streamResponse(response)) {
|
||||
yield {
|
||||
role: "assistant",
|
||||
content: chunk,
|
||||
};
|
||||
completion += chunk;
|
||||
}
|
||||
this._countTokens(completion, args.model, false);
|
||||
}
|
||||
|
||||
async listModels(): Promise<string[]> {
|
||||
|
@ -96,9 +128,7 @@ class FreeTrial extends BaseLLM {
|
|||
"llama3-70b",
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-4o",
|
||||
"gpt-4-turbo",
|
||||
"gemini-1.5-pro-latest",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
];
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,11 +22,12 @@
|
|||
"eslint-plugin-import": "^2.29.1",
|
||||
"eslint-plugin-require-extensions": "^0.1.3",
|
||||
"jest": "^29.7.0",
|
||||
"onnxruntime-common": "^1.17.3",
|
||||
"onnxruntime-web": "^1.17.3",
|
||||
"onnxruntime-common": "1.14.0",
|
||||
"onnxruntime-web": "1.14.0",
|
||||
"ts-jest": "^29.1.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.574.0",
|
||||
"@mozilla/readability": "^0.5.0",
|
||||
"@octokit/rest": "^20.0.2",
|
||||
"@types/jsdom": "^21.1.6",
|
||||
|
@ -34,7 +35,8 @@
|
|||
"@types/node-fetch": "^2.6.11",
|
||||
"@typescript-eslint/eslint-plugin": "^7.8.0",
|
||||
"@typescript-eslint/parser": "^7.8.0",
|
||||
"@xenova/transformers": "^2.14.0",
|
||||
"@xenova/transformers": "2.14.0",
|
||||
"onnxruntime-node": "1.14.0",
|
||||
"adf-to-md": "^1.1.0",
|
||||
"axios": "^1.6.7",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
|
@ -49,11 +51,11 @@
|
|||
"ignore": "^5.3.1",
|
||||
"js-tiktoken": "^1.0.8",
|
||||
"jsdom": "^24.0.0",
|
||||
"launchdarkly-node-client-sdk": "^3.2.0",
|
||||
"llm-code-highlighter": "^0.0.14",
|
||||
"node-fetch": "^3.3.2",
|
||||
"node-html-markdown": "^1.3.0",
|
||||
"ollama": "^0.4.6",
|
||||
"onnxruntime-node": "1.17.3-rev.1",
|
||||
"openai": "^4.20.1",
|
||||
"pg": "^8.11.3",
|
||||
"posthog-node": "^3.6.3",
|
||||
|
@ -64,8 +66,9 @@
|
|||
"sqlite3": "^5.1.7",
|
||||
"tree-sitter-wasms": "^0.1.11",
|
||||
"uuid": "^9.0.1",
|
||||
"vectordb": "^0.4.12",
|
||||
"web-tree-sitter": "^0.21.0"
|
||||
"vectordb": "^0.4.20",
|
||||
"web-tree-sitter": "^0.21.0",
|
||||
"yaml": "^2.4.2"
|
||||
},
|
||||
"puppeteer": {
|
||||
"chromium_revision": "119.0.6045.105"
|
||||
|
|
|
@ -12,41 +12,54 @@ export class DevDataSqliteDb {
|
|||
model TEXT NOT NULL,
|
||||
provider TEXT NOT NULL,
|
||||
tokens_generated INTEGER NOT NULL,
|
||||
tokens_prompt INTEGER NOT NULL DEFAULT 0,
|
||||
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)`,
|
||||
);
|
||||
|
||||
// Add tokens_prompt column if it doesn't exist
|
||||
const columnCheckResult = await db.all(
|
||||
`PRAGMA table_info(tokens_generated);`,
|
||||
);
|
||||
const columnExists = columnCheckResult.some(
|
||||
(col: any) => col.name === "tokens_prompt",
|
||||
);
|
||||
if (!columnExists) {
|
||||
await db.exec(
|
||||
`ALTER TABLE tokens_generated ADD COLUMN tokens_prompt INTEGER NOT NULL DEFAULT 0;`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public static async logTokensGenerated(
|
||||
model: string,
|
||||
provider: string,
|
||||
tokens: number,
|
||||
promptTokens: number,
|
||||
generatedTokens: number,
|
||||
) {
|
||||
const db = await DevDataSqliteDb.get();
|
||||
await db?.run(
|
||||
"INSERT INTO tokens_generated (model, provider, tokens_generated) VALUES (?, ?, ?)",
|
||||
[model, provider, tokens],
|
||||
`INSERT INTO tokens_generated (model, provider, tokens_prompt, tokens_generated) VALUES (?, ?, ?, ?)`,
|
||||
[model, provider, promptTokens, generatedTokens],
|
||||
);
|
||||
}
|
||||
|
||||
public static async getTokensPerDay() {
|
||||
const db = await DevDataSqliteDb.get();
|
||||
// Return a sum of tokens_generated column aggregated by day
|
||||
const result = await db?.all(
|
||||
`SELECT date(timestamp) as day, sum(tokens_generated) as tokens
|
||||
// Return a sum of tokens_generated and tokens_prompt columns aggregated by day
|
||||
`SELECT date(timestamp) as day, sum(tokens_prompt) as promptTokens, sum(tokens_generated) as generatedTokens
|
||||
FROM tokens_generated
|
||||
GROUP BY date(timestamp)`,
|
||||
// WHERE model = ? AND provider = ?
|
||||
// [model, provider],
|
||||
);
|
||||
return result ?? [];
|
||||
}
|
||||
|
||||
public static async getTokensPerModel() {
|
||||
const db = await DevDataSqliteDb.get();
|
||||
// Return a sum of tokens_generated column aggregated by model
|
||||
const result = await db?.all(
|
||||
`SELECT model, sum(tokens_generated) as tokens
|
||||
// Return a sum of tokens_generated and tokens_prompt columns aggregated by model
|
||||
`SELECT model, sum(tokens_prompt) as promptTokens, sum(tokens_generated) as generatedTokens
|
||||
FROM tokens_generated
|
||||
GROUP BY model`,
|
||||
);
|
||||
|
|
|
@ -59,10 +59,14 @@ export function fetchwithRequestOptions(
|
|||
: new HttpProxyAgent(proxy, agentOptions)
|
||||
: new protocol.Agent(agentOptions);
|
||||
|
||||
const headers: { [key: string]: string } = requestOptions?.headers || {};
|
||||
let headers: { [key: string]: string } = {};
|
||||
for (const [key, value] of Object.entries(init?.headers || {})) {
|
||||
headers[key] = value as string;
|
||||
}
|
||||
headers = {
|
||||
...headers,
|
||||
...requestOptions?.headers,
|
||||
};
|
||||
|
||||
// Replace localhost with 127.0.0.1
|
||||
if (url.hostname === "localhost") {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import { RangeInFileWithContents } from "../commands/util.js";
|
||||
import {
|
||||
ContextItemWithId,
|
||||
ContextSubmenuItem,
|
||||
|
@ -10,7 +11,6 @@ import {
|
|||
Thread,
|
||||
SiteIndexingConfig
|
||||
} from "../index.js";
|
||||
import { RangeInFileWithContents } from "../commands/util.js";
|
||||
|
||||
import { Protocol } from "../protocol.js";
|
||||
|
||||
|
@ -87,8 +87,14 @@ export type WebviewProtocol = Protocol &
|
|||
reloadWindow: [undefined, void];
|
||||
focusEditor: [undefined, void];
|
||||
toggleFullScreen: [undefined, void];
|
||||
"stats/getTokensPerDay": [undefined, { day: string; tokens: number }[]];
|
||||
"stats/getTokensPerModel": [undefined, { model: string; tokens: number }[]];
|
||||
"stats/getTokensPerDay": [
|
||||
undefined,
|
||||
{ day: string; promptTokens: number; generatedTokens: number }[],
|
||||
];
|
||||
"stats/getTokensPerModel": [
|
||||
undefined,
|
||||
{ model: string; promptTokens: number; generatedTokens: number }[],
|
||||
];
|
||||
insertAtCursor: [{ text: string }, void];
|
||||
copyText: [{ text: string }, void];
|
||||
"jetbrains/editorInsetHeight": [{ height: number }, void];
|
||||
|
@ -99,7 +105,8 @@ export type WebviewProtocol = Protocol &
|
|||
| "optimized"
|
||||
| "custom"
|
||||
| "localExistingUser"
|
||||
| "optimizedExistingUser";
|
||||
| "optimizedExistingUser"
|
||||
| "localAfterFreeTrial";
|
||||
},
|
||||
void,
|
||||
];
|
||||
|
@ -136,4 +143,6 @@ export type ReverseWebviewProtocol = {
|
|||
setTheme: [{ theme: any }, void];
|
||||
setColors: [{ [key: string]: string }, void];
|
||||
"jetbrains/editorInsetRefresh": [undefined, void];
|
||||
addApiKey: [undefined, void];
|
||||
setupLocalModel: [undefined, void];
|
||||
};
|
||||
|
|
5963
core/yarn.lock
5963
core/yarn.lock
File diff suppressed because it is too large
Load Diff
|
@ -167,14 +167,13 @@ If you select some code to be edited, you can have the context provider filter o
|
|||
|
||||
### Jira Issues
|
||||
|
||||
Type '@jira' to reference the conversation in a Jira issue. Make sure to include your own [Atlassian API Token](https://id.atlassian.com/manage-profile/security/api-tokens).
|
||||
Type '@jira' to reference the conversation in a Jira issue. Make sure to include your own [Atlassian API Token](https://id.atlassian.com/manage-profile/security/api-tokens), or use your `email` and `token`, with token set to your password for basic authentication. If you use your own Atlassian API Token, don't configure your email.
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "jira",
|
||||
"params": {
|
||||
"domain": "company.atlassian.net",
|
||||
"email": "someone@somewhere.com",
|
||||
"token ": "ATATT..."
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,9 +58,9 @@ Groq provides lightning fast inference for open-source LLMs like Llama3, up to t
|
|||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "GPT-4-Turbo",
|
||||
"title": "GPT-4o",
|
||||
"provider": "openai",
|
||||
"model": "gpt-4-turbo",
|
||||
"model": "gpt-4o",
|
||||
"apiKey": "YOUR_API_KEY"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# OpenRouter
|
||||
|
||||
OpenRouter is a unified interface for commercial and open-source models, giving you access to the best models at the best prices. You can sign up [here](https://openrouter.ai/signup), create your API key on the [keys page](https://openrouter.ai/keys), and then choose a model from the [list of supported models](https://openrouter.ai/models).
|
||||
|
||||
Change `~/.continue/config.json` to look like the following. Since OpenRouter is fully API compatible with OpenAI, it is recommended to stick with `"provider": "openai"`, even if they aren't necessarily the upstream provider.
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "OpenRouter LLaMA 70 8B",
|
||||
"provider": "openai",
|
||||
"model": "meta-llama/llama-3-70b-instruct",
|
||||
"apiBase": "https://openrouter.ai/api/v1",
|
||||
"apiKey": "..."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
To utilize features such as provider preferences or model routing configuration, include these parameters inside the `models[].requestsOptions.extraBodyProperties` field of your plugin config.
|
||||
|
||||
For example, to prevent extra long prompts from being compressed, you can explicitly turn off the feature like so:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
...
|
||||
"requestOptions": {
|
||||
"extraBodyProperties": {
|
||||
"transforms": []
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Learn more about available settings [here](https://openrouter.ai/docs).
|
|
@ -38,6 +38,9 @@ You can deploy a model in your [AWS](https://github.com/continuedev/deploy-os-co
|
|||
|
||||
## SaaS
|
||||
|
||||
You can access both open-source and commercial LLMs via:
|
||||
* [OpenRouter](../reference/Model%20Providers/openrouter.md)
|
||||
|
||||
### Open-source models
|
||||
|
||||
You can deploy open-source LLMs on a service using:
|
||||
|
|
|
@ -67,10 +67,22 @@ Continue can be used in [code-server](https://coder.com/), but if you are runnin
|
|||
|
||||
## Download the latest pre-release
|
||||
|
||||
### VS Code
|
||||
|
||||
We are constantly making fixes and improvements to Continue, but the latest changes remain in a "pre-release" version for roughly a week so that we can test their stability. If you are experiencing issues, you can try the pre-release by going to the Continue extension page in VS Code and selecting "Switch to Pre-Release" as shown below.
|
||||
|
||||

|
||||
|
||||
### JetBrains
|
||||
|
||||
On JetBrains, the "pre-release" happens through their Early Access Program (EAP) channel. To download the latest EAP version, enable the EAP channel:
|
||||
|
||||
1. Open JetBrains settings (cmd/ctrl+,) and go to "Plugins"
|
||||
2. Click the gear icon at the top
|
||||
3. Select "Manage Plugin Repositories..."
|
||||
4. Add "https://plugins.jetbrains.com/plugins/eap/list" to the list
|
||||
5. You'll now always be able to download the latest EAP version from the marketplace
|
||||
|
||||
## Download an Older Version
|
||||
|
||||
If you've tried everything, reported an error, know that a previous version was working for you, and are waiting to hear back, you can try downloading an older version of the extension.
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
# Prompt files (experimental)
|
||||
|
||||
Prompt (`.prompt`) files are an easy way to build and share LLM prompts with others. The format is inspired by [HumanLoops's .prompt file](https://docs.humanloop.com/docs/prompt-file-format), and adds templating so that you can easily refer to files, your current git diff, and eventually much more.
|
||||
|
||||
## How to create a prompt file
|
||||
|
||||
Below is a quick example of setting up a prompt file to write unit tests:s
|
||||
|
||||
1. Create a folder called `.prompts` at the top level of your repository
|
||||
2. Add a file called `test.prompt` to this folder
|
||||
3. Write the following contents to `test.prompt` and save
|
||||
|
||||
```
|
||||
temperature: 0.5
|
||||
maxTokens: 4096
|
||||
---
|
||||
<system>
|
||||
You are an expert programmer
|
||||
</system>
|
||||
|
||||
{{{ input }}}
|
||||
|
||||
Write unit tests for the above selected code, following each of these instructions:
|
||||
- Use `jest`
|
||||
- Properly set up and tear down
|
||||
- Include important edge cases
|
||||
- The tests should be complete and sophisticated
|
||||
- Give the tests just as chat output, don't edit any file
|
||||
- Don't explain how to set up `jest`
|
||||
```
|
||||
|
||||
Now to use this prompt, you can highlight code and use cmd/ctrl+L to select it in the Continue sidebar. Then, type "/" to see the list of slash commands and choose the one called "test". Press enter and the LLM will respond given the instructions from your prompt file.
|
||||
|
||||
## Syntax
|
||||
|
||||
> The current state of this format is experimental and subject to change
|
||||
|
||||
### Preamble
|
||||
|
||||
The "preamble" is everything above the `---` separator, and lets you specify model parameters. It uses YAML syntax and currently supports the following parameters:
|
||||
|
||||
- `temperature`
|
||||
- `topP`
|
||||
- `topK`
|
||||
- `minP`
|
||||
- `presencePenalty`
|
||||
- `frequencyPenalty`
|
||||
- `mirostat`
|
||||
- `stop`
|
||||
- `maxTokens`
|
||||
- `name`
|
||||
- `description`
|
||||
|
||||
If you don't need any of these parameters, you can leave out the preamble and do not need to include the `---` separator.
|
||||
|
||||
### Body
|
||||
|
||||
The "body" is everything below the `---` separator, and contains your prompt.
|
||||
|
||||
At its most basic, the body can be just text.
|
||||
|
||||
To add a system message, start the body with `<system></system>` tags like in the example above and place your system message inside.
|
||||
|
||||
The body also supports templating with [Handlebars syntax](https://handlebarsjs.com/guide/). The following variables are currently available:
|
||||
|
||||
- `input`: The full text from the input box in the sidebar that is sent along with the slash command
|
||||
- `diff`: The current git diff in your workspace
|
||||
|
||||
We plan to add support soon for templating with all [context providers](../customization/context-providers.md).
|
||||
|
||||
## Feedback
|
||||
|
||||
If you have ideas about how to improve the `.prompt` file format, please reach out on [Discord](https://discord.gg/NWtdYexhMs).
|
|
@ -28,6 +28,30 @@ ollama run starcoder2:3b
|
|||
|
||||
Once it has been downloaded, you should begin to see completions in VS Code.
|
||||
|
||||
## Setting up with LM Studio
|
||||
|
||||
You can also set up tab-autocomplete with a local LM Studio instance by following these steps:
|
||||
|
||||
1. Download the latest version of LM Studio from [here](https://lmstudio.ai/)
|
||||
2. Download a model (e.g. search for `second-state/StarCoder2-3B-GGUF` and choose one of the options there)
|
||||
3. Go to the server section (button is on the left), select your model from the dropdown at the top, and click "Start Server"
|
||||
4. Go to the "My Models" section (button is on the left), find your selected model, and copy the name the path (example: `second-state/StarCoder2-3B-GGUF/starcoder2-3b-Q8_0.gguf`); this will be used as the "model" attribute in Continue
|
||||
5. Go to Continue and modify the configurations for a [custom model](#setting-up-a-custom-model)
|
||||
6. Set the "provider" to `lmstudio` and the "model" to the path copied earlier
|
||||
|
||||
Example:
|
||||
|
||||
```json title=~/.continue/config.json
|
||||
{
|
||||
"tabAutocompleteModel": {
|
||||
"title": "Starcoder2 3b",
|
||||
"model": "second-state/StarCoder2-3B-GGUF/starcoder2-3b-Q8_0.gguf",
|
||||
"provider": "lmstudio",
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Setting up a custom model
|
||||
|
||||
All of the configuration options available for chat models are available to use for tab-autocomplete. For example, if you wanted to use a remote Ollama instance you would edit your `config.json` like this (note that it is not inside the models array):
|
||||
|
|
|
@ -67,7 +67,7 @@ const config = {
|
|||
},
|
||||
],
|
||||
// Replace with your project's social card
|
||||
image: "img/continue-social-card.png",
|
||||
image: "https://docs.continue.dev/img/continue-social-card.png",
|
||||
navbar: {
|
||||
title: "Continue",
|
||||
logo: {
|
||||
|
|
|
@ -50,6 +50,7 @@ const sidebars = {
|
|||
"walkthroughs/running-continue-without-internet",
|
||||
"walkthroughs/codebase-embeddings",
|
||||
"walkthroughs/tab-autocomplete",
|
||||
"walkthroughs/prompt-files",
|
||||
// "walkthroughs/config-file-migration",
|
||||
],
|
||||
},
|
||||
|
|
|
@ -438,14 +438,12 @@
|
|||
"properties": {
|
||||
"model": {
|
||||
"enum": [
|
||||
"gpt-4-turbo",
|
||||
"gpt-4o",
|
||||
"llama3-70b",
|
||||
"gpt-3.5-turbo",
|
||||
"phind-codellama-34b",
|
||||
"gemini-pro",
|
||||
"mistral-8x7b",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
"starcoder-7b",
|
||||
|
|
|
@ -438,14 +438,12 @@
|
|||
"properties": {
|
||||
"model": {
|
||||
"enum": [
|
||||
"gpt-4-turbo",
|
||||
"gpt-4o",
|
||||
"llama3-70b",
|
||||
"gpt-3.5-turbo",
|
||||
"phind-codellama-34b",
|
||||
"gemini-pro",
|
||||
"mistral-8x7b",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
"starcoder-7b",
|
||||
|
|
|
@ -1,9 +1,3 @@
|
|||
{
|
||||
"customCommands": [
|
||||
{
|
||||
"name": "hello",
|
||||
"prompt": "{{{ input }}}\n\nWrite a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
|
||||
"description": "This is an example custom command. Use /config to edit it and create more"
|
||||
}
|
||||
]
|
||||
"customCommands": []
|
||||
}
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
temperature: 0.0
|
||||
---
|
||||
{{{ diff }}}
|
||||
|
||||
Give me feedback on the above changes. For each file, you should output a markdown section including the following:
|
||||
- If you found any problems, an h3 like "❌ <filename>"
|
||||
- If you didn't find any problems, an h3 like "✅ <filename>"
|
||||
- If you found any problems, add below a bullet point description of what you found, including a minimal code snippet explaining how to fix it
|
||||
- If you didn't find any problems, you don't need to add anything else
|
||||
|
||||
Here is an example. The example is surrounded in backticks, but your response should not be:
|
||||
|
||||
```
|
||||
### ✅ <Filename1>
|
||||
|
||||
### ❌ <Filename2>
|
||||
|
||||
<Description>
|
||||
```
|
||||
|
||||
You should look primarily for the following types of issues, and only mention other problems if they are highly pressing.
|
||||
|
||||
- console.logs that have been left after debugging
|
||||
- repeated code
|
||||
- algorithmic errors that could fail under edge cases
|
||||
- something that could be refactored
|
||||
|
||||
Make sure to review ALL files that were changed, do not skip any.
|
|
@ -0,0 +1,17 @@
|
|||
temperature: 0.5
|
||||
maxTokens: 4096
|
||||
---
|
||||
<system>
|
||||
You are an expert programmer
|
||||
</system>
|
||||
|
||||
{{{ input }}}
|
||||
|
||||
Write unit tests for the above selected code, following each of these instructions:
|
||||
- Use `jest`
|
||||
- Properly set up and tear down
|
||||
- Include important edge cases
|
||||
- The tests should be complete and sophisticated
|
||||
- Give the tests just as chat output, don't edit any file
|
||||
- Don't explain how to set up `jest`
|
||||
- Write a single code block, making sure to label with the language being used (e.g. "```typscript")
|
|
@ -438,14 +438,12 @@
|
|||
"properties": {
|
||||
"model": {
|
||||
"enum": [
|
||||
"gpt-4-turbo",
|
||||
"gpt-4o",
|
||||
"llama3-70b",
|
||||
"gpt-3.5-turbo",
|
||||
"phind-codellama-34b",
|
||||
"gemini-pro",
|
||||
"mistral-8x7b",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
"starcoder-7b",
|
||||
|
@ -612,7 +610,8 @@
|
|||
"enum": [
|
||||
"chat-bison-001",
|
||||
"gemini-pro",
|
||||
"gemini-1.5-pro-latest"
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-flash-latest"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -987,6 +986,7 @@
|
|||
"chat-bison-001",
|
||||
"gemini-pro",
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-flash-latest",
|
||||
"mistral-tiny",
|
||||
"mistral-small",
|
||||
"mistral-medium",
|
||||
|
|
|
@ -267,7 +267,10 @@
|
|||
"apiType": {
|
||||
"title": "Api Type",
|
||||
"markdownDescription": "OpenAI API type, either `openai` or `azure`",
|
||||
"enum": ["openai", "azure"]
|
||||
"enum": [
|
||||
"openai",
|
||||
"azure"
|
||||
]
|
||||
},
|
||||
"apiVersion": {
|
||||
"title": "Api Version",
|
||||
|
@ -280,7 +283,11 @@
|
|||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["title", "provider", "model"],
|
||||
"required": [
|
||||
"title",
|
||||
"provider",
|
||||
"model"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
|
@ -290,7 +297,9 @@
|
|||
}
|
||||
},
|
||||
"not": {
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
|
@ -316,33 +325,48 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"required": ["apiKey"]
|
||||
"required": [
|
||||
"apiKey"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["huggingface-tgi", "huggingface-inference-api"]
|
||||
"enum": [
|
||||
"huggingface-tgi",
|
||||
"huggingface-inference-api"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": ["apiBase"]
|
||||
"required": [
|
||||
"apiBase"
|
||||
]
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["openai"]
|
||||
"enum": [
|
||||
"openai"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -362,10 +386,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["openai"]
|
||||
"enum": [
|
||||
"openai"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -396,10 +424,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["replicate"]
|
||||
"enum": [
|
||||
"replicate"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -429,23 +461,25 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["free-trial"]
|
||||
"enum": [
|
||||
"free-trial"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"model": {
|
||||
"enum": [
|
||||
"gpt-4-turbo",
|
||||
"gpt-4o",
|
||||
"llama3-70b",
|
||||
"gpt-3.5-turbo",
|
||||
"phind-codellama-34b",
|
||||
"gemini-pro",
|
||||
"mistral-8x7b",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
"starcoder-7b",
|
||||
|
@ -459,7 +493,9 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["openai"]
|
||||
"enum": [
|
||||
"openai"
|
||||
]
|
||||
},
|
||||
"apiType": {
|
||||
"not": {
|
||||
|
@ -467,7 +503,9 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -525,10 +563,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["anthropic"]
|
||||
"enum": [
|
||||
"anthropic"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -556,15 +598,22 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["cohere"]
|
||||
"enum": [
|
||||
"cohere"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"model": {
|
||||
"enum": ["command-r", "command-r-plus"]
|
||||
"enum": [
|
||||
"command-r",
|
||||
"command-r-plus"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -573,10 +622,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["bedrock"]
|
||||
"enum": [
|
||||
"bedrock"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -601,10 +654,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["gemini"]
|
||||
"enum": [
|
||||
"gemini"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -622,10 +679,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["together"]
|
||||
"enum": [
|
||||
"together"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -659,10 +720,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["deepinfra"]
|
||||
"enum": [
|
||||
"deepinfra"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -685,7 +750,9 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -731,10 +798,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["ollama"]
|
||||
"enum": [
|
||||
"ollama"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -782,10 +853,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["mistral"]
|
||||
"enum": [
|
||||
"mistral"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -806,10 +881,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["groq"]
|
||||
"enum": [
|
||||
"groq"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -833,20 +912,30 @@
|
|||
"const": "azure"
|
||||
}
|
||||
},
|
||||
"required": ["apiType"]
|
||||
"required": [
|
||||
"apiType"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"required": ["engine", "apiVersion", "apiBase"]
|
||||
"required": [
|
||||
"engine",
|
||||
"apiVersion",
|
||||
"apiBase"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["openai"]
|
||||
"enum": [
|
||||
"openai"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -863,7 +952,9 @@
|
|||
"const": "llamafile"
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -877,10 +968,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["text-gen-webui"]
|
||||
"enum": [
|
||||
"text-gen-webui"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -894,10 +989,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["flowise"]
|
||||
"enum": [
|
||||
"flowise"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -924,7 +1023,10 @@
|
|||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["key", "value"]
|
||||
"required": [
|
||||
"key",
|
||||
"value"
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalFlowiseConfiguration": {
|
||||
|
@ -943,7 +1045,10 @@
|
|||
"description": "Configuration Property value"
|
||||
}
|
||||
},
|
||||
"required": ["key", "value"]
|
||||
"required": [
|
||||
"key",
|
||||
"value"
|
||||
]
|
||||
}
|
||||
},
|
||||
"model": {
|
||||
|
@ -1032,7 +1137,9 @@
|
|||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["default"]
|
||||
"required": [
|
||||
"default"
|
||||
]
|
||||
},
|
||||
"SlashCommand": {
|
||||
"title": "SlashCommand",
|
||||
|
@ -1091,7 +1198,9 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["issue"]
|
||||
"enum": [
|
||||
"issue"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1104,17 +1213,23 @@
|
|||
"description": "Enter the URL of the GitHub repository for which you want to generate the issue."
|
||||
}
|
||||
},
|
||||
"required": ["repositoryUrl"]
|
||||
"required": [
|
||||
"repositoryUrl"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["params"]
|
||||
"required": [
|
||||
"params"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["edit"]
|
||||
"enum": [
|
||||
"edit"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1139,7 +1254,9 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["share"]
|
||||
"enum": [
|
||||
"share"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1157,7 +1274,10 @@
|
|||
}
|
||||
}
|
||||
],
|
||||
"required": ["name", "description"]
|
||||
"required": [
|
||||
"name",
|
||||
"description"
|
||||
]
|
||||
},
|
||||
"CustomCommand": {
|
||||
"title": "CustomCommand",
|
||||
|
@ -1176,7 +1296,11 @@
|
|||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["name", "prompt", "description"]
|
||||
"required": [
|
||||
"name",
|
||||
"prompt",
|
||||
"description"
|
||||
]
|
||||
},
|
||||
"ContextProviderWithParams": {
|
||||
"title": "ContextProviderWithParams",
|
||||
|
@ -1249,7 +1373,9 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["google"]
|
||||
"enum": [
|
||||
"google"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1262,17 +1388,23 @@
|
|||
"description": "Your API key for https://serper.dev in order to get Google search results"
|
||||
}
|
||||
},
|
||||
"required": ["serperApiKey"]
|
||||
"required": [
|
||||
"serperApiKey"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["params"]
|
||||
"required": [
|
||||
"params"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["open"]
|
||||
"enum": [
|
||||
"open"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1294,7 +1426,9 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["issue"]
|
||||
"enum": [
|
||||
"issue"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1323,24 +1457,37 @@
|
|||
"type": {
|
||||
"type": "string",
|
||||
"description": "The type of issues to search for",
|
||||
"enum": ["open", "closed", "all"]
|
||||
"enum": [
|
||||
"open",
|
||||
"closed",
|
||||
"all"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["owner", "repo"]
|
||||
"required": [
|
||||
"owner",
|
||||
"repo"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["repos"]
|
||||
"required": [
|
||||
"repos"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["params"]
|
||||
"required": [
|
||||
"params"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["database"]
|
||||
"enum": [
|
||||
"database"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1358,7 +1505,11 @@
|
|||
"connection_type": {
|
||||
"type": "string",
|
||||
"description": "The type of database (e.g., 'postgres', 'mysql')",
|
||||
"enum": ["postgres", "mysql", "sqlite"]
|
||||
"enum": [
|
||||
"postgres",
|
||||
"mysql",
|
||||
"sqlite"
|
||||
]
|
||||
},
|
||||
"connection": {
|
||||
"type": "object",
|
||||
|
@ -1391,17 +1542,25 @@
|
|||
"required": []
|
||||
}
|
||||
},
|
||||
"required": ["name", "type", "connection"]
|
||||
"required": [
|
||||
"name",
|
||||
"type",
|
||||
"connection"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["connections"]
|
||||
"required": [
|
||||
"connections"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["gitlab-mr"]
|
||||
"enum": [
|
||||
"gitlab-mr"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1422,17 +1581,23 @@
|
|||
"description": "If you have code selected, filters out comments that aren't related to the selection."
|
||||
}
|
||||
},
|
||||
"required": ["token"]
|
||||
"required": [
|
||||
"token"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["params"]
|
||||
"required": [
|
||||
"params"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["jira"]
|
||||
"enum": [
|
||||
"jira"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1478,17 +1643,24 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"required": ["domain", "token"]
|
||||
"required": [
|
||||
"domain",
|
||||
"token"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["params"]
|
||||
"required": [
|
||||
"params"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["http"]
|
||||
"enum": [
|
||||
"http"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1501,17 +1673,24 @@
|
|||
"description": "The HTTP endpoint of your context provider server."
|
||||
}
|
||||
},
|
||||
"required": ["url"]
|
||||
"required": [
|
||||
"url"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["params"]
|
||||
"required": [
|
||||
"params"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["codebase", "folder"]
|
||||
"enum": [
|
||||
"codebase",
|
||||
"folder"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1546,7 +1725,9 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["postgres"]
|
||||
"enum": [
|
||||
"postgres"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1598,11 +1779,19 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"required": ["host", "port", "user", "password", "database"]
|
||||
"required": [
|
||||
"host",
|
||||
"port",
|
||||
"user",
|
||||
"password",
|
||||
"database"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"required": ["name"]
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"SerializedContinueConfig": {
|
||||
"title": "config.json",
|
||||
|
@ -1753,32 +1942,46 @@
|
|||
"$ref": "#/definitions/RequestOptions"
|
||||
}
|
||||
},
|
||||
"required": ["provider"],
|
||||
"required": [
|
||||
"provider"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["ollama"]
|
||||
"enum": [
|
||||
"ollama"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"required": ["model"]
|
||||
"required": [
|
||||
"model"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"provider": {
|
||||
"enum": ["cohere"]
|
||||
"enum": [
|
||||
"cohere"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["provider"]
|
||||
"required": [
|
||||
"provider"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"required": ["apiKey"]
|
||||
"required": [
|
||||
"apiKey"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -1789,22 +1992,33 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["cohere", "voyage", "llm", "free-trial"]
|
||||
"enum": [
|
||||
"cohere",
|
||||
"voyage",
|
||||
"llm",
|
||||
"free-trial"
|
||||
]
|
||||
},
|
||||
"params": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": ["name"],
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["cohere"]
|
||||
"enum": [
|
||||
"cohere"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["name"]
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -1826,7 +2040,9 @@
|
|||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["apiKey"]
|
||||
"required": [
|
||||
"apiKey"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1835,10 +2051,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["llm"]
|
||||
"enum": [
|
||||
"llm"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["name"]
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -1849,7 +2069,9 @@
|
|||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["modelTitle"]
|
||||
"required": [
|
||||
"modelTitle"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1858,10 +2080,14 @@
|
|||
"if": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"enum": ["voyage"]
|
||||
"enum": [
|
||||
"voyage"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["name"]
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
|
@ -1872,10 +2098,14 @@
|
|||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"enum": ["rerank-lite-1"]
|
||||
"enum": [
|
||||
"rerank-lite-1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["apiKey"]
|
||||
"required": [
|
||||
"apiKey"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1931,7 +2161,11 @@
|
|||
"description": "An optional template string to be used for autocomplete. It will be rendered with the Mustache templating language, and is passed the 'prefix' and 'suffix' variables."
|
||||
},
|
||||
"multilineCompletions": {
|
||||
"enum": ["always", "never", "auto"],
|
||||
"enum": [
|
||||
"always",
|
||||
"never",
|
||||
"auto"
|
||||
],
|
||||
"description": "If set to true, Continue will only complete a single line at a time."
|
||||
},
|
||||
"useCache": {
|
||||
|
@ -1960,7 +2194,10 @@
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"codeBlockToolbarPosition": {
|
||||
"enum": ["top", "bottom"],
|
||||
"enum": [
|
||||
"top",
|
||||
"bottom"
|
||||
],
|
||||
"default": "top",
|
||||
"description": "Whether to show the copy and apply code buttons at the top or bottom of code blocks in the sidebar."
|
||||
},
|
||||
|
@ -1977,7 +2214,9 @@
|
|||
"defaultContext": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"enum": ["activeFile"]
|
||||
"enum": [
|
||||
"activeFile"
|
||||
]
|
||||
}
|
||||
},
|
||||
"modelRoles": {
|
||||
|
@ -2017,7 +2256,10 @@
|
|||
},
|
||||
"mergeBehavior": {
|
||||
"type": "string",
|
||||
"enum": ["merge", "overwrite"],
|
||||
"enum": [
|
||||
"merge",
|
||||
"overwrite"
|
||||
],
|
||||
"default": "merge",
|
||||
"title": "Merge behavior",
|
||||
"markdownDescription": "If set to 'merge', .continuerc.json will be applied on top of config.json (arrays and objects are merged). If set to 'overwrite', then every top-level property of .continuerc.json will overwrite that property from config.json."
|
||||
|
@ -2025,4 +2267,4 @@
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
import { factorial, repeat } from "./factorial";
|
||||
|
||||
function fib(n) {
|
||||
if (n <= 1) return n;
|
||||
return fib(n - 2) + fib(n - 1);
|
||||
}
|
||||
|
||||
let d = repeat(5, "a");
|
||||
console.log(d);
|
||||
|
||||
let e = factorial(3);
|
||||
console.log(e);
|
|
@ -1,8 +0,0 @@
|
|||
export function factorial(n) {
|
||||
if (n <= 1) return 1;
|
||||
return n * factorial(n - 1);
|
||||
}
|
||||
|
||||
export function repeat(n: number, a: string) {
|
||||
return a.repeat(n);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "continue",
|
||||
"icon": "media/icon.png",
|
||||
"version": "0.9.94",
|
||||
"version": "0.9.140",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/continuedev/continue"
|
||||
|
|
|
@ -16,6 +16,9 @@ function execCmdSync(cmd) {
|
|||
// Clear folders that will be packaged to ensure clean slate
|
||||
rimrafSync(path.join(__dirname, "..", "bin"));
|
||||
rimrafSync(path.join(__dirname, "..", "out"));
|
||||
fs.mkdirSync(path.join(__dirname, "..", "out", "node_modules"), {
|
||||
recursive: true,
|
||||
});
|
||||
|
||||
// Get the target to package for
|
||||
let target = undefined;
|
||||
|
@ -102,16 +105,17 @@ const exe = os === "win32" ? ".exe" : "";
|
|||
}
|
||||
|
||||
// Install node_modules //
|
||||
// execCmdSync("pnpm install");
|
||||
// console.log("[info] pnpm install in extensions/vscode completed");
|
||||
execCmdSync("npm install");
|
||||
console.log("[info] npm install in extensions/vscode completed");
|
||||
console.log("Contents of node_modules: ", fs.readdirSync("./node_modules"));
|
||||
|
||||
process.chdir("../../gui");
|
||||
|
||||
execCmdSync("pnpm install");
|
||||
console.log("[info] pnpm install in gui completed");
|
||||
execCmdSync("npm install");
|
||||
console.log("[info] npm install in gui completed");
|
||||
|
||||
if (ghAction()) {
|
||||
execCmdSync("pnpm run build");
|
||||
execCmdSync("npm run build");
|
||||
}
|
||||
|
||||
// Copy over the dist folder to the Intellij extension //
|
||||
|
@ -482,9 +486,9 @@ function validateFilesPresent() {
|
|||
`bin/napi-v3/${os}/${arch}/onnxruntime_binding.node`,
|
||||
`bin/napi-v3/${os}/${arch}/${
|
||||
os === "darwin"
|
||||
? "libonnxruntime.1.17.3.dylib"
|
||||
? "libonnxruntime.1.14.0.dylib"
|
||||
: os === "linux"
|
||||
? "libonnxruntime.so.1.17.3"
|
||||
? "libonnxruntime.so.1.14.0"
|
||||
: "onnxruntime.dll"
|
||||
}`,
|
||||
"builtin-themes/dark_modern.json",
|
||||
|
|
|
@ -214,7 +214,10 @@ export class VsCodeExtension {
|
|||
// Listen for file changes in the workspace
|
||||
const filepath = event.uri.fsPath;
|
||||
|
||||
if (filepath.endsWith(".continuerc.json")) {
|
||||
if (
|
||||
filepath.endsWith(".continuerc.json") ||
|
||||
filepath.endsWith(".prompt")
|
||||
) {
|
||||
this.configHandler.reloadConfig();
|
||||
this.tabAutocompleteModel.clearLlm();
|
||||
} else if (
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import { ContextItemId, IDE } from "core";
|
||||
import { ConfigHandler } from "core/config/handler";
|
||||
import {
|
||||
setupLocalAfterFreeTrial,
|
||||
setupLocalMode,
|
||||
setupOptimizedExistingUserMode,
|
||||
setupOptimizedMode,
|
||||
|
@ -126,19 +127,45 @@ export async function showTutorial() {
|
|||
}
|
||||
}
|
||||
|
||||
vscode.window
|
||||
.showErrorMessage(message, "Show Logs", "Troubleshooting")
|
||||
.then((selection) => {
|
||||
if (selection === "Show Logs") {
|
||||
vscode.commands.executeCommand(
|
||||
"workbench.action.toggleDevTools",
|
||||
);
|
||||
} else if (selection === "Troubleshooting") {
|
||||
vscode.env.openExternal(
|
||||
vscode.Uri.parse("https://docs.continue.dev/troubleshooting"),
|
||||
);
|
||||
}
|
||||
});
|
||||
if (message.includes("https://proxy-server")) {
|
||||
message = message.split("\n").slice(1).join("\n").trim();
|
||||
try {
|
||||
message = JSON.parse(message).message;
|
||||
} catch {}
|
||||
if (message.includes("exceeded")) {
|
||||
message +=
|
||||
" To keep using Continue, you can set up a local model or use your own API key.";
|
||||
} else {
|
||||
message +=
|
||||
" To avoid rate limiting, you can set up a local model or use your own API key.";
|
||||
}
|
||||
|
||||
vscode.window
|
||||
.showInformationMessage(message, "Add API Key", "Use Local Model")
|
||||
.then((selection) => {
|
||||
if (selection === "Add API Key") {
|
||||
this.request("addApiKey", undefined);
|
||||
} else if (selection === "Use Local Model") {
|
||||
this.request("setupLocalModel", undefined);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
vscode.window
|
||||
.showErrorMessage(message, "Show Logs", "Troubleshooting")
|
||||
.then((selection) => {
|
||||
if (selection === "Show Logs") {
|
||||
vscode.commands.executeCommand(
|
||||
"workbench.action.toggleDevTools",
|
||||
);
|
||||
} else if (selection === "Troubleshooting") {
|
||||
vscode.env.openExternal(
|
||||
vscode.Uri.parse(
|
||||
"https://docs.continue.dev/troubleshooting",
|
||||
),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -628,9 +655,11 @@ export async function showTutorial() {
|
|||
editConfigJson(
|
||||
mode === "local"
|
||||
? setupLocalMode
|
||||
: mode === "optimized"
|
||||
? setupOptimizedMode
|
||||
: setupOptimizedExistingUserMode,
|
||||
: mode === "localAfterFreeTrial"
|
||||
? setupLocalAfterFreeTrial
|
||||
: mode === "optimized"
|
||||
? setupOptimizedMode
|
||||
: setupOptimizedExistingUserMode,
|
||||
);
|
||||
this.configHandler.reloadConfig();
|
||||
});
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
8119
gui/pnpm-lock.yaml
8119
gui/pnpm-lock.yaml
File diff suppressed because it is too large
Load Diff
|
@ -26,6 +26,7 @@ import { isJetBrains, postToIde } from "../util/ide";
|
|||
import { getLocalStorage } from "../util/localStorage";
|
||||
import HeaderButtonWithText from "./HeaderButtonWithText";
|
||||
import TextDialog from "./dialogs";
|
||||
import { ftl } from "./dialogs/FTCDialog";
|
||||
import IndexingProgressBar from "./loaders/IndexingProgressBar";
|
||||
import ProgressBar from "./loaders/ProgressBar";
|
||||
import ModelSelect from "./modelSelection/ModelSelect";
|
||||
|
@ -167,6 +168,25 @@ const Layout = () => {
|
|||
setIndexingState(data);
|
||||
});
|
||||
|
||||
useWebviewListener(
|
||||
"addApiKey",
|
||||
async () => {
|
||||
navigate("/modelconfig/openai");
|
||||
},
|
||||
[navigate],
|
||||
);
|
||||
|
||||
useWebviewListener(
|
||||
"setupLocalModel",
|
||||
async () => {
|
||||
postToIde("completeOnboarding", {
|
||||
mode: "localAfterFreeTrial",
|
||||
});
|
||||
navigate("/localOnboarding");
|
||||
},
|
||||
[navigate],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (isJetBrains()) {
|
||||
return;
|
||||
|
@ -248,12 +268,10 @@ const Layout = () => {
|
|||
)} */}
|
||||
<ModelSelect />
|
||||
{indexingState.status !== "indexing" && // Would take up too much space together with indexing progress
|
||||
defaultModel?.provider === "free-trial" &&
|
||||
(location.pathname === "/settings" ||
|
||||
parseInt(localStorage.getItem("ftc") || "0") >= 50) && (
|
||||
defaultModel?.provider === "free-trial" && (
|
||||
<ProgressBar
|
||||
completed={parseInt(localStorage.getItem("ftc") || "0")}
|
||||
total={100}
|
||||
total={ftl()}
|
||||
/>
|
||||
)}
|
||||
|
||||
|
|
|
@ -14,6 +14,14 @@ const GridDiv = styled.div`
|
|||
align-items: center;
|
||||
`;
|
||||
|
||||
export const ftl = () => {
|
||||
const ftc = parseInt(localStorage.getItem("ftc"));
|
||||
if (ftc && ftc > 52) {
|
||||
return 100;
|
||||
}
|
||||
return 50;
|
||||
};
|
||||
|
||||
function FTCDialog() {
|
||||
const navigate = useNavigate();
|
||||
const [apiKey, setApiKey] = React.useState("");
|
||||
|
@ -23,18 +31,16 @@ function FTCDialog() {
|
|||
<div className="p-4">
|
||||
<h3>Free Trial Limit Reached</h3>
|
||||
<p>
|
||||
You've reached the free trial limit of 250 free inputs with Continue's
|
||||
OpenAI API key. To keep using Continue, you can either use your own API
|
||||
key, or use a local LLM. To read more about the options, see our{" "}
|
||||
You've reached the free trial limit of {ftl()} free inputs. To keep
|
||||
using Continue, you can either use your own API key, or use a local LLM.
|
||||
To read more about the options, see our{" "}
|
||||
<a
|
||||
href="https://docs.continue.dev/customization/models"
|
||||
target="_blank"
|
||||
>
|
||||
documentation
|
||||
</a>
|
||||
. If you're just looking for fastest way to keep going, type '/config'
|
||||
to open your Continue config file and paste your API key into the
|
||||
OpenAIFreeTrial object.
|
||||
.
|
||||
</p>
|
||||
|
||||
<Input
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
import React from "react";
|
||||
import { useDispatch } from "react-redux";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import styled from "styled-components";
|
||||
import { Button, Input } from "..";
|
||||
import { setDefaultModel } from "../../redux/slices/stateSlice";
|
||||
import { setShowDialog } from "../../redux/slices/uiStateSlice";
|
||||
import { ideRequest, postToIde } from "../../util/ide";
|
||||
|
||||
const GridDiv = styled.div`
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
grid-gap: 8px;
|
||||
align-items: center;
|
||||
`;
|
||||
|
||||
function SetupLocalOrKeyDialog() {
|
||||
const navigate = useNavigate();
|
||||
const [apiKey, setApiKey] = React.useState("");
|
||||
const dispatch = useDispatch();
|
||||
|
||||
return (
|
||||
<div className="p-4">
|
||||
<h3>Set up your own model</h3>
|
||||
<p>
|
||||
To keep using Continue after your free inputs, you can either use your
|
||||
own API key, or use a local LLM. To read more about the options, see our{" "}
|
||||
<a
|
||||
className="cursor-pointer"
|
||||
onClick={() =>
|
||||
ideRequest(
|
||||
"openUrl",
|
||||
"https://docs.continue.dev/reference/Model%20Providers/freetrial",
|
||||
)
|
||||
}
|
||||
>
|
||||
documentation
|
||||
</a>
|
||||
.
|
||||
</p>
|
||||
|
||||
<Input
|
||||
type="text"
|
||||
placeholder="Enter your OpenAI API key"
|
||||
value={apiKey}
|
||||
onChange={(e) => setApiKey(e.target.value)}
|
||||
/>
|
||||
<Button
|
||||
className="w-full"
|
||||
disabled={!apiKey}
|
||||
onClick={() => {
|
||||
postToIde("config/addOpenAiKey", apiKey);
|
||||
dispatch(setShowDialog(false));
|
||||
dispatch(setDefaultModel({ title: "GPT-4" }));
|
||||
}}
|
||||
>
|
||||
Use my OpenAI API key
|
||||
</Button>
|
||||
<div className="text-center">— OR —</div>
|
||||
<GridDiv>
|
||||
<Button
|
||||
onClick={() => {
|
||||
dispatch(setShowDialog(false));
|
||||
postToIde("completeOnboarding", {
|
||||
mode: "localAfterFreeTrial",
|
||||
});
|
||||
navigate("/localOnboarding");
|
||||
}}
|
||||
>
|
||||
Use local model
|
||||
</Button>
|
||||
<Button
|
||||
onClick={() => {
|
||||
dispatch(setShowDialog(false));
|
||||
navigate("/models");
|
||||
}}
|
||||
>
|
||||
View all options
|
||||
</Button>
|
||||
</GridDiv>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default SetupLocalOrKeyDialog;
|
|
@ -1,6 +1,13 @@
|
|||
import ReactDOM from "react-dom";
|
||||
import { useDispatch } from "react-redux";
|
||||
import styled from "styled-components";
|
||||
import { StyledTooltip, lightGray, vscForeground } from "..";
|
||||
import {
|
||||
setDialogMessage,
|
||||
setShowDialog,
|
||||
} from "../../redux/slices/uiStateSlice";
|
||||
import { getFontSize } from "../../util";
|
||||
import SetupLocalOrKeyDialog from "../dialogs/SetupLocalOrKey";
|
||||
|
||||
const ProgressBarWrapper = styled.div`
|
||||
width: 100px;
|
||||
|
@ -23,6 +30,7 @@ const GridDiv = styled.div`
|
|||
grid-template-rows: 1fr auto;
|
||||
align-items: center;
|
||||
justify-items: center;
|
||||
cursor: pointer;
|
||||
`;
|
||||
|
||||
const P = styled.p`
|
||||
|
@ -42,37 +50,46 @@ interface ProgressBarProps {
|
|||
}
|
||||
|
||||
const ProgressBar = ({ completed, total }: ProgressBarProps) => {
|
||||
const dispatch = useDispatch();
|
||||
const fillPercentage = Math.min(100, Math.max(0, (completed / total) * 100));
|
||||
|
||||
const tooltipPortalDiv = document.getElementById("tooltip-portal-div");
|
||||
|
||||
return (
|
||||
<>
|
||||
<a
|
||||
href="https://docs.continue.dev/reference/Model%20Providers/freetrial"
|
||||
className="no-underline ml-2"
|
||||
<GridDiv
|
||||
data-tooltip-id="usage_progress_bar"
|
||||
onClick={() => {
|
||||
dispatch(setShowDialog(true));
|
||||
dispatch(setDialogMessage(<SetupLocalOrKeyDialog />));
|
||||
}}
|
||||
>
|
||||
<GridDiv data-tooltip-id="usage_progress_bar">
|
||||
<ProgressBarWrapper>
|
||||
<ProgressBarFill
|
||||
completed={fillPercentage}
|
||||
color={
|
||||
completed / total > 0.75
|
||||
? completed / total > 0.95
|
||||
? "#f00"
|
||||
: "#fc0"
|
||||
: undefined
|
||||
}
|
||||
/>
|
||||
</ProgressBarWrapper>
|
||||
<P>
|
||||
Free Uses: {completed} / {total}
|
||||
</P>
|
||||
</GridDiv>
|
||||
</a>
|
||||
<StyledTooltip id="usage_progress_bar" place="bottom">
|
||||
{
|
||||
"Continue allows you to use our OpenAI API key for up to 250 inputs. After this, you can either use your own API key, or use a local LLM. Click the progress bar to go to the docs and learn more."
|
||||
}
|
||||
</StyledTooltip>
|
||||
<ProgressBarWrapper>
|
||||
<ProgressBarFill
|
||||
completed={fillPercentage}
|
||||
color={
|
||||
completed / total > 0.75
|
||||
? completed / total > 0.9
|
||||
? "#f00"
|
||||
: "#fc0"
|
||||
: undefined
|
||||
}
|
||||
/>
|
||||
</ProgressBarWrapper>
|
||||
<P>
|
||||
Free Uses: {completed} / {total}
|
||||
</P>
|
||||
</GridDiv>
|
||||
|
||||
{tooltipPortalDiv &&
|
||||
ReactDOM.createPortal(
|
||||
<StyledTooltip id="usage_progress_bar" place="top">
|
||||
{
|
||||
"Click to use your own API key or local LLM (required after 100 inputs)"
|
||||
}
|
||||
</StyledTooltip>,
|
||||
tooltipPortalDiv,
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -4,10 +4,10 @@ import {
|
|||
AtSymbolIcon,
|
||||
BeakerIcon,
|
||||
BookOpenIcon,
|
||||
ChevronDoubleRightIcon,
|
||||
CodeBracketIcon,
|
||||
Cog6ToothIcon,
|
||||
CommandLineIcon,
|
||||
CubeIcon,
|
||||
ExclamationCircleIcon,
|
||||
ExclamationTriangleIcon,
|
||||
FolderIcon,
|
||||
|
@ -82,8 +82,8 @@ function DropdownIcon(props: { className?: string; item: ComboBoxItem }) {
|
|||
props.item.type === "contextProvider"
|
||||
? props.item.id
|
||||
: props.item.type === "slashCommand"
|
||||
? props.item.id
|
||||
: props.item.type;
|
||||
? props.item.id
|
||||
: props.item.type;
|
||||
|
||||
const Icon = ICONS_FOR_DROPDOWN[provider];
|
||||
const iconClass = `${props.className} flex-shrink-0`;
|
||||
|
@ -91,11 +91,7 @@ function DropdownIcon(props: { className?: string; item: ComboBoxItem }) {
|
|||
return props.item.type === "contextProvider" ? (
|
||||
<AtSymbolIcon className={iconClass} height="1.2em" width="1.2em" />
|
||||
) : (
|
||||
<ChevronDoubleRightIcon
|
||||
className={iconClass}
|
||||
height="1.2em"
|
||||
width="1.2em"
|
||||
/>
|
||||
<CubeIcon className={iconClass} height="1.2em" width="1.2em" />
|
||||
);
|
||||
}
|
||||
return <Icon className={iconClass} height="1.2em" width="1.2em" />;
|
||||
|
|
|
@ -2,5 +2,5 @@ import { InputModifiers } from "core";
|
|||
|
||||
export const defaultInputModifiers: InputModifiers = {
|
||||
useCodebase: false,
|
||||
noContext: false,
|
||||
noContext: true,
|
||||
};
|
||||
|
|
|
@ -26,7 +26,7 @@ import {
|
|||
vscBackground,
|
||||
vscForeground,
|
||||
} from "../components";
|
||||
import FTCDialog from "../components/dialogs/FTCDialog";
|
||||
import FTCDialog, { ftl } from "../components/dialogs/FTCDialog";
|
||||
import StepContainer from "../components/gui/StepContainer";
|
||||
import TimelineItem from "../components/gui/TimelineItem";
|
||||
import ContinueInputBox from "../components/mainInput/ContinueInputBox";
|
||||
|
@ -244,7 +244,7 @@ function GUI(props: GUIProps) {
|
|||
const u = parseInt(ftc);
|
||||
localStorage.setItem("ftc", (u + 1).toString());
|
||||
|
||||
if (u >= 100) {
|
||||
if (u >= ftl()) {
|
||||
dispatch(setShowDialog(true));
|
||||
dispatch(setDialogMessage(<FTCDialog />));
|
||||
posthog?.capture("ftc_reached");
|
||||
|
|
|
@ -37,7 +37,12 @@ function Stats() {
|
|||
useNavigationListener();
|
||||
const navigate = useNavigate();
|
||||
|
||||
const [days, setDays] = useState<{ day: string; tokens: number }[]>([]);
|
||||
const [days, setDays] = useState<
|
||||
{ day: string; promptTokens: number; generatedTokens: number }[]
|
||||
>([]);
|
||||
const [models, setModels] = useState<
|
||||
{ model: string; promptTokens: number; generatedTokens: number }[]
|
||||
>([]);
|
||||
|
||||
useEffect(() => {
|
||||
ideRequest("stats/getTokensPerDay", undefined).then((days) => {
|
||||
|
@ -45,8 +50,6 @@ function Stats() {
|
|||
});
|
||||
}, []);
|
||||
|
||||
const [models, setModels] = useState<{ model: string; tokens: number }[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
ideRequest("stats/getTokensPerModel", undefined).then((models) => {
|
||||
setModels(models);
|
||||
|
@ -75,8 +78,12 @@ function Stats() {
|
|||
<h2 className="ml-2">Tokens per Day</h2>
|
||||
<CopyButton
|
||||
text={generateTable(
|
||||
([["Day", "Tokens"]] as any).concat(
|
||||
days.map((day) => [day.day, day.tokens]),
|
||||
([["Day", "Generated Tokens", "Prompt Tokens"]] as any).concat(
|
||||
days.map((day) => [
|
||||
day.day,
|
||||
day.generatedTokens,
|
||||
day.promptTokens,
|
||||
]),
|
||||
),
|
||||
)}
|
||||
/>
|
||||
|
@ -85,14 +92,16 @@ function Stats() {
|
|||
<thead>
|
||||
<Tr>
|
||||
<Th>Day</Th>
|
||||
<Th>Tokens</Th>
|
||||
<Th>Generated Tokens</Th>
|
||||
<Th>Prompt Tokens</Th>
|
||||
</Tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{days.map((day) => (
|
||||
<Tr key={day.day} className="">
|
||||
<Td>{day.day}</Td>
|
||||
<Td>{day.tokens}</Td>
|
||||
<Td>{day.generatedTokens}</Td>
|
||||
<Td>{day.promptTokens}</Td>
|
||||
</Tr>
|
||||
))}
|
||||
</tbody>
|
||||
|
@ -102,8 +111,12 @@ function Stats() {
|
|||
<h2 className="ml-2">Tokens per Model</h2>
|
||||
<CopyButton
|
||||
text={generateTable(
|
||||
([["Model", "Tokens"]] as any).concat(
|
||||
models.map((model) => [model.model, model.tokens]),
|
||||
([["Model", "Generated Tokens", "Prompt Tokens"]] as any).concat(
|
||||
models.map((model) => [
|
||||
model.model,
|
||||
model.generatedTokens,
|
||||
model.promptTokens,
|
||||
]),
|
||||
),
|
||||
)}
|
||||
/>
|
||||
|
@ -112,14 +125,16 @@ function Stats() {
|
|||
<thead>
|
||||
<Tr>
|
||||
<Th>Model</Th>
|
||||
<Th>Tokens</Th>
|
||||
<Th>Generated Tokens</Th>
|
||||
<Th>Prompt Tokens</Th>
|
||||
</Tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{models.map((model) => (
|
||||
<Tr key={model.model} className="">
|
||||
<Td>{model.model}</Td>
|
||||
<Td>{model.tokens}</Td>
|
||||
<Td>{model.generatedTokens}</Td>
|
||||
<Td>{model.promptTokens}</Td>
|
||||
</Tr>
|
||||
))}
|
||||
</tbody>
|
||||
|
|
|
@ -454,6 +454,18 @@ const gemini15Pro: ModelPackage = {
|
|||
icon: "gemini.png",
|
||||
providerOptions: ["gemini", "freetrial"],
|
||||
};
|
||||
const gemini15Flash: ModelPackage = {
|
||||
title: "Gemini 1.5 Flash",
|
||||
description: "Fast and versatile multimodal model for scaling across diverse tasks",
|
||||
params: {
|
||||
title: "Gemini 1.5 Flash",
|
||||
model: "gemini-1.5-flash-latest",
|
||||
contextLength: 1_000_000,
|
||||
apiKey: "<API_KEY>",
|
||||
},
|
||||
icon: "gemini.png",
|
||||
providerOptions: ["gemini"],
|
||||
};
|
||||
|
||||
const deepseek: ModelPackage = {
|
||||
title: "DeepSeek-Coder",
|
||||
|
@ -546,7 +558,7 @@ const gpt4turbo: ModelPackage = {
|
|||
contextLength: 128_000,
|
||||
title: "GPT-4 Turbo",
|
||||
},
|
||||
providerOptions: ["openai", "freetrial"],
|
||||
providerOptions: ["openai"],
|
||||
icon: "openai.png",
|
||||
};
|
||||
|
||||
|
@ -660,6 +672,7 @@ export const MODEL_INFO: (ModelPackage | string)[] = [
|
|||
"Gemini",
|
||||
gemini15Pro,
|
||||
geminiPro,
|
||||
gemini15Flash,
|
||||
"Open Source",
|
||||
llama3Chat,
|
||||
deepseek,
|
||||
|
@ -863,7 +876,7 @@ export const PROVIDER_INFO: { [key: string]: ModelInfo } = {
|
|||
required: true,
|
||||
},
|
||||
],
|
||||
packages: [gemini15Pro, geminiPro],
|
||||
packages: [gemini15Pro, geminiPro, gemini15Flash],
|
||||
},
|
||||
mistral: {
|
||||
title: "Mistral API",
|
||||
|
@ -1039,7 +1052,6 @@ After it's up and running, you can start using Continue.`,
|
|||
{ ...claude3Haiku, title: "Claude 3 Haiku (trial)" },
|
||||
{ ...gemini15Pro, title: "Gemini 1.5 Pro (trial)" },
|
||||
{ ...gpt4o, title: "GPT-4o (trial)" },
|
||||
{ ...gpt4turbo, title: "GPT-4-Turbo (trial)" },
|
||||
{ ...gpt35turbo, title: "GPT-3.5-Turbo (trial)" },
|
||||
{
|
||||
...AUTODETECT,
|
||||
|
|
|
@ -50,15 +50,15 @@ if (($null -eq $cargo) -or ($null -eq $node)) {
|
|||
|
||||
Write-Host "`nInstalling Core extension dependencies..." -ForegroundColor White
|
||||
Push-Location core
|
||||
pnpm install
|
||||
pnpm link --global
|
||||
npm install
|
||||
npm link
|
||||
Pop-Location
|
||||
|
||||
Write-Output "`nInstalling GUI extension dependencies..." -ForegroundColor White
|
||||
Push-Location gui
|
||||
pnpm install
|
||||
pnpm link --global @continuedev/core
|
||||
pnpm run build
|
||||
npm install
|
||||
npm link @continuedev/core
|
||||
npm run build
|
||||
Pop-Location
|
||||
|
||||
# VSCode Extension (will also package GUI)
|
||||
|
@ -66,10 +66,10 @@ Write-Output "`nInstalling VSCode extension dependencies..." -ForegroundColor Wh
|
|||
Push-Location extensions/vscode
|
||||
|
||||
# This does way too many things inline but is the common denominator between many of the scripts
|
||||
pnpm install
|
||||
pnpm link --global @continuedev/core
|
||||
npm install
|
||||
npm link @continuedev/core
|
||||
|
||||
pnpm run package
|
||||
npm run package
|
||||
|
||||
Pop-Location
|
||||
|
||||
|
@ -77,8 +77,8 @@ Pop-Location
|
|||
Write-Output "`nInstalling binary dependencies..." -ForegroundColor White
|
||||
Push-Location binary
|
||||
|
||||
pnpm install
|
||||
pnpm run build
|
||||
npm install
|
||||
npm run build
|
||||
|
||||
Pop-Location
|
||||
|
||||
|
|
|
@ -6,28 +6,28 @@
|
|||
set -e
|
||||
echo "Installing Core extension dependencies..."
|
||||
pushd core
|
||||
pnpm install
|
||||
pnpm link --global
|
||||
npm install
|
||||
npm link
|
||||
popd
|
||||
|
||||
echo "Installing GUI extension dependencies..."
|
||||
pushd gui
|
||||
pnpm install
|
||||
pnpm link --global @continuedev/core
|
||||
pnpm run build
|
||||
npm install
|
||||
npm link @continuedev/core
|
||||
npm run build
|
||||
popd
|
||||
# VSCode Extension (will also package GUI)
|
||||
echo "Installing VSCode extension dependencies..."
|
||||
pushd extensions/vscode
|
||||
|
||||
# This does way too many things inline but is the common denominator between many of the scripts
|
||||
pnpm install
|
||||
pnpm link --global @continuedev/core
|
||||
pnpm run package
|
||||
npm install
|
||||
npm link @continuedev/core
|
||||
npm run package
|
||||
|
||||
popd
|
||||
|
||||
echo "Installing binary dependencies..."
|
||||
pushd binary
|
||||
pnpm install
|
||||
pnpm run build
|
||||
npm install
|
||||
npm run build
|
|
@ -1,15 +1,30 @@
|
|||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
const directories = ["./gui", "./core", "./extensions/vscode", "./binary"];
|
||||
const directories = [
|
||||
// gui
|
||||
"./gui/node_modules",
|
||||
"./gui/out",
|
||||
"./gui/dist",
|
||||
// core
|
||||
"./core/node_modules",
|
||||
"./core/dist",
|
||||
// extensions/vscode
|
||||
"./extensions/vscode/node_modules",
|
||||
"./extensions/vscode/bin",
|
||||
"./extensions/vscode/build",
|
||||
"./extensions/vscode/out",
|
||||
// binary
|
||||
"./binary/node_modules",
|
||||
"./binary/bin",
|
||||
"./binary/dist",
|
||||
"./binary/out",
|
||||
];
|
||||
|
||||
directories.forEach((dir) => {
|
||||
const nodeModulesPath = path.join(dir, "node_modules");
|
||||
|
||||
if (fs.existsSync(nodeModulesPath)) {
|
||||
fs.rmdirSync(nodeModulesPath, { recursive: true });
|
||||
console.log(`Removed ${nodeModulesPath}`);
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmdirSync(dir, { recursive: true });
|
||||
console.log(`Removed ${dir}`);
|
||||
} else {
|
||||
console.log(`No node_modules found in ${dir}`);
|
||||
console.log(`${dir} not found`);
|
||||
}
|
||||
});
|
||||
|
|
Loading…
Reference in New Issue