Merge branch 'main' into pr/5587

This commit is contained in:
Tomasz Stefaniak 2025-05-15 15:56:03 -07:00
commit acf1e30900
97 changed files with 2295 additions and 973 deletions

View File

@ -12,6 +12,6 @@
[ For visual changes, include screenshots. Screen recordings are particularly helpful, and appreciated! ]
## Testing instructions
## Tests
[ For new or modified features, provide step-by-step testing instructions to validate the intended behavior of the change, including any relevant tests to run. ]
[ What tests were added or updated to ensure the changes work as expected? ]

29
.github/workflows/cla.yaml vendored Normal file
View File

@ -0,0 +1,29 @@
name: "CLA Assistant"
on:
issue_comment:
types: [ created ]
pull_request_target:
types: [ opened, closed, synchronize ]
permissions:
actions: write
contents: write
pull-requests: write
statuses: write
jobs:
CLAAssistant:
runs-on: ubuntu-latest
# Only run this workflow on the main repository (continuedev/continue)
if: github.repository == 'continuedev/continue'
steps:
- name: "CLA Assistant"
if: (contains(github.event.comment.body, 'recheck') || contains(github.event.comment.body, 'I have read the CLA Document and I hereby sign the CLA')) || github.event_name == 'pull_request_target'
uses: contributor-assistant/github-action@v2.6.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
path-to-signatures: "signatures/version1/cla.json"
path-to-document: "https://github.com/continuedev/continue/blob/main/docs/docs/CLA.md"
branch: cla-signatures
allowlist: dependabot[bot]

View File

@ -63,7 +63,7 @@ jobs:
- name: Bump version in gradle.properties
run: |
cd extensions/intelllij
cd extensions/intellij
awk '/pluginVersion=/{split($0,a,"="); split(a[2],b,"."); b[3]+=1; printf "%s=%s.%s.%s\n",a[1],b[1],b[2],b[3];next}1' gradle.properties > tmp && mv tmp gradle.properties
rm -rf tmp
NEW_VERSION=$(grep 'pluginVersion=' gradle.properties | cut -d'=' -f2)

View File

@ -235,7 +235,7 @@ jobs:
- name: Publish to VS Code Marketplace
run: |
cd extensions/vscode
npx vsce publish --packagePath ../../vsix-artifacts/*.vsix
npx @vscode/vsce publish --packagePath ../../vsix-artifacts/*.vsix
env:
VSCE_PAT: ${{ secrets.VSCE_TOKEN }}

View File

@ -235,10 +235,11 @@ jobs:
cd packages/config-yaml
npm run build
- name: Type check
- name: Type check and lint
run: |
cd gui
npx tsc --noEmit
npm run lint
binary-checks:
needs: [install-root, install-core, install-config-yaml]
@ -670,13 +671,6 @@ jobs:
cd extensions/vscode
rm -rf e2e/.test-extensions/continue*
- name: Upload e2e test screenshots
if: failure()
uses: actions/upload-artifact@v4
with:
name: e2e-failure-screenshots
path: extensions/vscode/e2e/storage/screenshots
- name: Sanitize test file name
id: sanitize_filename
if: always()
@ -685,6 +679,13 @@ jobs:
SANITIZED_FILENAME="${FILENAME//\//-}" # Replace / with - using bash parameter expansion
echo "sanitized_test_file=${SANITIZED_FILENAME}" >> $GITHUB_OUTPUT
- name: Upload e2e test screenshots
if: failure()
uses: actions/upload-artifact@v4
with:
name: e2e-failure-screenshots-${{ steps.sanitize_filename.outputs.sanitized_test_file || 'unknown' }}-${{ matrix.command == 'e2e:ci:run-yaml' && 'yaml' || 'json' }}
path: extensions/vscode/e2e/storage/screenshots
- name: Find e2e log file
if: always()
run: |

View File

@ -232,7 +232,7 @@ jobs:
- name: Publish to VS Code Marketplace
run: |
cd extensions/vscode
npx vsce publish --pre-release --packagePath ../../vsix-artifacts/*.vsix
npx @vscode/vsce publish --pre-release --packagePath ../../vsix-artifacts/*.vsix
env:
VSCE_PAT: ${{ secrets.VSCE_TOKEN }}

View File

@ -0,0 +1,22 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="config-yaml tests" type="JavaScriptTestRunnerJest">
<config-file value="$PROJECT_DIR$/packages/config-yaml/jest.config.mjs" />
<node-interpreter value="project" />
<node-options value="--experimental-vm-modules" />
<jest-package value="$PROJECT_DIR$/binary/node_modules/jest" />
<working-dir value="$PROJECT_DIR$/packages/config-yaml" />
<envs />
<scope-kind value="ALL" />
<method v="2">
<option name="NpmBeforeRunTask" enabled="true">
<package-json value="$PROJECT_DIR$/packages/config-yaml/package.json" />
<command value="run" />
<scripts>
<script value="build" />
</scripts>
<node-interpreter value="project" />
<envs />
</option>
</method>
</configuration>
</component>

View File

@ -0,0 +1,12 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="core tests" type="JavaScriptTestRunnerJest">
<config-file value="$PROJECT_DIR$/core/jest.config.js" />
<node-interpreter value="project" />
<node-options value="--experimental-vm-modules" />
<jest-package value="$PROJECT_DIR$/binary/node_modules/jest" />
<working-dir value="$PROJECT_DIR$/core" />
<envs />
<scope-kind value="ALL" />
<method v="2" />
</configuration>
</component>

View File

@ -0,0 +1,25 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="openai-adapters configuration" type="JavaScriptTestRunnerJest">
<node-interpreter value="project" />
<node-options value="--experimental-vm-modules" />
<jest-package value="$PROJECT_DIR$/binary/node_modules/jest" />
<working-dir value="$PROJECT_DIR$/packages/openai-adapters" />
<envs />
<scope-kind value="SUITE" />
<test-file value="$PROJECT_DIR$/packages/openai-adapters/src/test/main.test.ts" />
<test-names>
<test-name value="openai configuration" />
</test-names>
<method v="2">
<option name="NpmBeforeRunTask" enabled="true">
<package-json value="$PROJECT_DIR$/packages/openai-adapters/package.json" />
<command value="run" />
<scripts>
<script value="build" />
</scripts>
<node-interpreter value="project" />
<envs />
</option>
</method>
</configuration>
</component>

View File

@ -1,5 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CommitMessageInspectionProfile">
<profile version="1.0">
<inspection_tool class="CommitFormat" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="CommitNamingConvention" enabled="true" level="WARNING" enabled_by_default="true" />
</profile>
</component>
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>

View File

@ -5,6 +5,7 @@
"dbaeumer.vscode-eslint",
"esbenp.prettier-vscode",
"bradlc.vscode-tailwindcss",
"YoavBls.pretty-ts-errors"
"YoavBls.pretty-ts-errors",
"github.vscode-github-actions"
]
}

View File

@ -3,44 +3,49 @@
## Table of Contents
- [Contributing to Continue](#contributing-to-continue)
- [Table of Contents](#table-of-contents)
- [Table of Contents](#table-of-contents)
- [❤️ Ways to Contribute](#-ways-to-contribute)
- [👋 Continue Contribution Ideas](#-continue-contribution-ideas)
- [🐛 Report Bugs](#-report-bugs)
- [✨ Suggest Enhancements](#-suggest-enhancements)
- [📖 Updating / Improving Documentation](#-updating--improving-documentation)
- [Running the Documentation Server Locally](#running-the-documentation-server-locally)
- [Method 1: NPM Script](#method-1-npm-script)
- [Method 2: VS Code Task](#method-2-vs-code-task)
- [🧑‍💻 Contributing Code](#-contributing-code)
- [Environment Setup](#environment-setup)
- [Pre-requisites](#pre-requisites)
- [Fork the Continue Repository](#fork-the-continue-repository)
- [VS Code](#vs-code)
- [Debugging](#debugging)
- [JetBrains](#jetbrains)
- [Our Git Workflow](#our-git-workflow)
- [Development workflow](#development-workflow)
- [Formatting](#formatting)
- [Testing](#testing)
- [Review Process](#review-process)
- [Getting help](#getting-help)
- [Contribtuing new LLM Providers/Models](#contribtuing-new-llm-providersmodels)
- [Adding an LLM Provider](#adding-an-llm-provider)
- [Adding Models](#adding-models)
- [📐 Continue Architecture](#-continue-architecture)
- [Continue VS Code Extension](#continue-vs-code-extension)
- [Continue JetBrains Extension](#continue-jetbrains-extension)
- [👋 Continue Contribution Ideas](#-continue-contribution-ideas)
- [🐛 Report Bugs](#-report-bugs)
- [✨ Suggest Enhancements](#-suggest-enhancements)
- [📖 Updating / Improving Documentation](#-updating--improving-documentation)
- [Running the Documentation Server Locally](#running-the-documentation-server-locally)
- [Method 1: NPM Script](#method-1-npm-script)
- [Method 2: VS Code Task](#method-2-vs-code-task)
- [🧑‍💻 Contributing Code](#-contributing-code)
- [Environment Setup](#environment-setup)
- [Pre-requisites](#pre-requisites)
- [Fork the Continue Repository](#fork-the-continue-repository)
- [VS Code](#vs-code)
- [Debugging](#debugging)
- [JetBrains](#jetbrains)
- [Our Git Workflow](#our-git-workflow)
- [Development workflow](#development-workflow)
- [Formatting](#formatting)
- [Testing](#testing)
- [Review Process](#review-process)
- [Getting help](#getting-help)
- [Contribtuing new LLM Providers/Models](#contribtuing-new-llm-providersmodels)
- [Adding an LLM Provider](#adding-an-llm-provider)
- [Adding Models](#adding-models)
- [📐 Continue Architecture](#-continue-architecture)
- [Continue VS Code Extension](#continue-vs-code-extension)
- [Continue JetBrains Extension](#continue-jetbrains-extension)
- [Contibutor License Agreement](#contributor-license-agreement-cla)
# ❤️ Ways to Contribute
## 👋 Continue Contribution Ideas
[This GitHub project board](https://github.com/orgs/continuedev/projects/2) is a list of ideas for how you can contribute to Continue. These aren't the only ways, but are a great starting point if you are new to the project.
[This GitHub project board](https://github.com/orgs/continuedev/projects/2) is a list of ideas for how you can
contribute to Continue. These aren't the only ways, but are a great starting point if you are new to the project. You
can also browse the list
of [good first issues](https://github.com/continuedev/continue/issues?q=is:issue%20state:open%20label:good-first-issue).
## 🐛 Report Bugs
If you find a bug, please [create an issue](https://github.com/continuedev/continue/issues) to report it! A great bug report includes:
If you find a bug, please [create an issue](https://github.com/continuedev/continue/issues) to report it! A great bug
report includes:
- A description of the bug
- Steps to reproduce
@ -50,19 +55,22 @@ If you find a bug, please [create an issue](https://github.com/continuedev/conti
## ✨ Suggest Enhancements
Continue is quickly adding features, and we'd love to hear which are the most important to you. The best ways to suggest an enhancement are:
Continue is quickly adding features, and we'd love to hear which are the most important to you. The best ways to suggest
an enhancement are:
- Create an issue
- First, check whether a similar proposal has already been made
- If not, [create an issue](https://github.com/continuedev/continue/issues)
- Please describe the enhancement in as much detail as you can, and why it would be useful
- First, check whether a similar proposal has already been made
- If not, [create an issue](https://github.com/continuedev/continue/issues)
- Please describe the enhancement in as much detail as you can, and why it would be useful
- Join the [Continue Discord](https://discord.gg/NWtdYexhMs) and tell us about your idea in the `#feedback` channel
## 📖 Updating / Improving Documentation
Continue is continuously improving, but a feature isn't complete until it is reflected in the documentation! If you see something out-of-date or missing, you can help by clicking "Edit this page" at the bottom of any page on [docs.continue.dev](https://docs.continue.dev).
Continue is continuously improving, but a feature isn't complete until it is reflected in the documentation! If you see
something out-of-date or missing, you can help by clicking "Edit this page" at the bottom of any page
on [docs.continue.dev](https://docs.continue.dev).
### Running the Documentation Server Locally
@ -70,7 +78,8 @@ You can run the documentation server locally using either of the following metho
#### Method 1: NPM Script
1. Open your terminal and navigate to the `docs` subdirectory of the project. The `docusaurus.config.js` file you'll see there is a sign you're in the right place.
1. Open your terminal and navigate to the `docs` subdirectory of the project. The `docusaurus.config.js` file you'll see
there is a sign you're in the right place.
2. Run the following command to install the necessary dependencies for the documentation server:
@ -92,17 +101,22 @@ You can run the documentation server locally using either of the following metho
3. Look for the `docs:start` task and select it.
This will start a local server and you can see the documentation rendered in your default browser, typically accessible at `http://localhost:3000`.
This will start a local server and you can see the documentation rendered in your default browser, typically accessible
at `http://localhost:3000`.
## 🧑‍💻 Contributing Code
We welcome contributions from developers of all experience levels - from first-time contributors to seasoned open source maintainers. While we aim to maintain high standards for reliability and maintainability, our goal is to keep the process as welcoming and straightforward as possible.
We welcome contributions from developers of all experience levels - from first-time contributors to seasoned open source
maintainers. While we aim to maintain high standards for reliability and maintainability, our goal is to keep the
process as welcoming and straightforward as possible.
### Environment Setup
#### Pre-requisites
You should have Node.js version 20.19.0 (LTS) or higher installed. You can get it on [nodejs.org](https://nodejs.org/en/download) or, if you are using NVM (Node Version Manager), you can set the correct version of Node.js for this project by running the following command in the root of the project:
You should have Node.js version 20.19.0 (LTS) or higher installed. You can get it
on [nodejs.org](https://nodejs.org/en/download) or, if you are using NVM (Node Version Manager), you can set the correct
version of Node.js for this project by running the following command in the root of the project:
```bash
nvm use
@ -114,32 +128,39 @@ nvm use
2. Clone your forked repository to your local machine. Use: `git clone https://github.com/YOUR_USERNAME/continue.git`
3. Navigate to the cloned directory and make sure you are on the main branch. Create your feature/fix branch from there, like so: `git checkout -b 123-my-feature-branch`
3. Navigate to the cloned directory and make sure you are on the main branch. Create your feature/fix branch from there,
like so: `git checkout -b 123-my-feature-branch`
4. Send your pull request to the main branch.
#### VS Code
1. Open the VS Code command pallet (`cmd/ctrl+shift+p`) and select `Tasks: Run Task` and then select `install-all-dependencies`
1. Open the VS Code command pallet (`cmd/ctrl+shift+p`) and select `Tasks: Run Task` and then select
`install-all-dependencies`
2. Start debugging:
1. Switch to Run and Debug view
2. Select `Launch extension` from drop down
3. Hit play button
4. This will start the extension in debug mode and open a new VS Code window with it installed
1. The new VS Code window with the extension is referred to as the _Host VS Code_
2. The window you started debugging from is referred to as the _Main VS Code_
1. Switch to Run and Debug view
2. Select `Launch extension` from drop down
3. Hit play button
4. This will start the extension in debug mode and open a new VS Code window with it installed
1. The new VS Code window with the extension is referred to as the _Host VS Code_
2. The window you started debugging from is referred to as the _Main VS Code_
3. To package the extension, run `npm run package` in the `extensions/vscode` directory, select `Tasks: Run Task` and then select `vscode-extension:package`. This will generate `extensions/vscode/build/continue-{VERSION}.vsix`, which you can install by right-clicking and selecting "Install Extension VSIX".
3. To package the extension, run `npm run package` in the `extensions/vscode` directory, select `Tasks: Run Task` and
then select `vscode-extension:package`. This will generate `extensions/vscode/build/continue-{VERSION}.vsix`, which
you can install by right-clicking and selecting "Install Extension VSIX".
##### Debugging
**Breakpoints** can be used in both the `core` and `extensions/vscode` folders while debugging, but are not currently supported inside of `gui` code.
**Breakpoints** can be used in both the `core` and `extensions/vscode` folders while debugging, but are not currently
supported inside of `gui` code.
**Hot-reloading** is enabled with Vite, so if you make any changes to the `gui`, they should be automatically reflected without rebuilding. In some cases, you may need to refresh the _Host VS Code_ window to see the changes.
**Hot-reloading** is enabled with Vite, so if you make any changes to the `gui`, they should be automatically reflected
without rebuilding. In some cases, you may need to refresh the _Host VS Code_ window to see the changes.
Similarly, any changes to `core` or `extensions/vscode` will be automatically included by just reloading the _Host VS Code_ window with cmd/ctrl+shift+p "Reload Window".
Similarly, any changes to `core` or `extensions/vscode` will be automatically included by just reloading the _Host VS
Code_ window with cmd/ctrl+shift+p "Reload Window".
#### JetBrains
@ -147,11 +168,20 @@ See [`intellij/CONTRIBUTING.md`](./extensions/intellij/CONTRIBUTING.md) for the
### Our Git Workflow
We keep a single permanent branch: `main`. When we are ready to create a "pre-release" version, we create a tag on the `main` branch titled `v0.9.x-vscode`, which automatically triggers the workflow in [preview.yaml](./.github/workflows/preview.yaml), which builds and releases a version of the VS Code extension. When a release has been sufficiently tested, we will create a new release titled `v0.8.x-vscode`, triggering a similar workflow in [main.yaml](./.github/workflows/main.yaml), which will build and release a main release of the VS Code extension. Any hotfixes can be made by creating a feature branch from the tag for the release in question. This workflow is well explained by <http://releaseflow.org>.
We keep a single permanent branch: `main`. When we are ready to create a "pre-release" version, we create a tag on the
`main` branch titled `v1.1.x-vscode`, which automatically triggers the workflow
in [preview.yaml](./.github/workflows/preview.yaml), which builds and releases a version of the VS Code extension. When
a release has been sufficiently tested, we will create a new release titled `v1.0x-vscode`, triggering a similar
workflow in [main.yaml](./.github/workflows/main.yaml), which will build and release a main release of the VS Code
extension. Any hotfixes can be made by creating a feature branch from the tag for the release in question. This workflow
is well explained by <http://releaseflow.org>.
### Development workflow
### What makes a good PR?
- Open a new issue or comment on an existing one before writing code. This ensures your proposed changes are aligned with the project direction.
To keep the Continue codebase clean and maintainable, we expect the following from our own team and all contributors:
- Open a new issue or comment on an existing one before writing code. This ensures your proposed changes are aligned
with the project direction
- Keep changes focused. Multiple unrelated fixes should be opened as separate PRs
- Write or update tests for new functionality
- Update relevant documentation in the `docs` folder
@ -159,18 +189,22 @@ We keep a single permanent branch: `main`. When we are ready to create a "pre-re
### Formatting
Continue uses [Prettier](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) to format JavaScript/TypeScript. Please install the Prettier extension in VS Code and enable "Format on Save" in your settings.
Continue uses [Prettier](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) to format
JavaScript/TypeScript. Please install the Prettier extension in VS Code and enable "Format on Save" in your settings.
### Testing
We have a mix of unit, functional, and e2e test suites, with a primary focus on functional testing. These tests run on each pull request. If your PR causes one of these tests to fail, we will ask that you to resolve the issue before we merge.
We have a mix of unit, functional, and e2e test suites, with a primary focus on functional testing. These tests run on
each pull request. If your PR causes one of these tests to fail, we will ask that you to resolve the issue before we
merge.
When contributing, please update or create the appropriate tests to help verify the correctness of your implementaiton.
### Review Process
- **Initial Review** - A maintainer will be assigned as primary reviewer
- **Feedback Loop** - The reviewer may request changes. We value your work, but also want to ensure the code is maintainable and follows our patterns.
- **Feedback Loop** - The reviewer may request changes. We value your work, but also want to ensure the code is
maintainable and follows our patterns.
- **Approval & Merge** - Once the PR is approved, it will be merged into the `main` branch.
### Getting help
@ -181,42 +215,53 @@ Join [#contribute on Discord](https://discord.gg/vapESyrFmJ) to engage with main
### Adding an LLM Provider
Continue has support for more than a dozen different LLM "providers", making it easy to use models running on OpenAI, Ollama, Together, LM Studio, Msty, and more. You can find all of the existing providers [here](https://github.com/continuedev/continue/tree/main/core/llm/llms), and if you see one missing, you can add it with the following steps:
1. Create a new file in the `core/llm/llms` directory. The name of the file should be the name of the provider, and it should export a class that extends `BaseLLM`. This class should contain the following minimal implementation. We recommend viewing pre-existing providers for more details. The [LlamaCpp Provider](./core/llm/llms/LlamaCpp.ts) is a good simple example.
- `providerName` - the identifier for your provider.
- At least one of `_streamComplete` or `_streamChat` - This is the function that makes the request to the API and returns the streamed response. You only need to implement one because Continue can automatically convert between "chat" and "raw completion".
Continue has support for more than a dozen different LLM "providers", making it easy to use models running on OpenAI,
Ollama, Together, LM Studio, Msty, and more. You can find all of the existing
providers [here](https://github.com/continuedev/continue/tree/main/core/llm/llms), and if you see one missing, you can
add it with the following steps:
1. Create a new file in the `core/llm/llms` directory. The name of the file should be the name of the provider, and it
should export a class that extends `BaseLLM`. This class should contain the following minimal implementation. We
recommend viewing pre-existing providers for more details. The [LlamaCpp Provider](./core/llm/llms/LlamaCpp.ts) is a
good simple example.
2. Add your provider to the `LLMs` array in [core/llm/llms/index.ts](./core/llm/llms/index.ts).
3. If your provider supports images, add it to the `PROVIDER_SUPPORTS_IMAGES` array in [core/llm/autodetect.ts](./core/llm/autodetect.ts).
4. Add the necessary JSON Schema types to [`config_schema.json`](./extensions/vscode/config_schema.json). This makes sure that Intellisense shows users what options are available for your provider when they are editing `config.json`.
5. Add a documentation page for your provider in [`docs/docs/customize/model-providers`](./docs/docs/customize/model-providers). This should show an example of configuring your provider in `config.json` and explain what options are available.
3. If your provider supports images, add it to the `PROVIDER_SUPPORTS_IMAGES` array
in [core/llm/autodetect.ts](./core/llm/autodetect.ts).
4. Add a documentation page for your provider in [
`docs/docs/customize/model-providers/more`](./docs/docs/customize/model-providers/more). This should show an example
of configuring your provider in `config.yaml` and explain what options are available.
### Adding Models
While any model that works with a supported provider can be used with Continue, we keep a list of recommended models that can be automatically configured from the UI or `config.json`. The following files should be updated when adding a model:
While any model that works with a supported provider can be used with Continue, we keep a list of recommended models
that can be automatically configured from the UI or `config.json`. The following files should be updated when adding a
model:
- [config_schema.json](./extensions/vscode/config_schema.json) - This is the JSON Schema definition that is used to validate `config.json`. You'll notice a number of rules defined in "definitions.ModelDescription.allOf". Here is where you write rules that can specify something like "for the provider 'anthropic', only models 'claude-2' and 'claude-instant-1' are allowed. Look through all of these rules and make sure that your model is included for providers that support it.
- [AddNewModel page](./gui/src/pages/AddNewModel) - This directory defines which model options are shown in the side bar model selection UI. To add a new model:
1. Add a `ModelPackage` entry for the model into [configs/models.ts](./gui/src/pages/AddNewModel/configs/models.ts), following the lead of the many examples near the top of the file
2. Add the model within its provider's array to [AddNewModel.tsx](./gui/src/pages/AddNewModel/AddNewModel.tsx) (add provider if needed)
- [index.d.ts](./core/index.d.ts) - This file defines the TypeScript types used throughout Continue. You'll find a `ModelName` type. Be sure to add the name of your model to this.
- LLM Providers: Since many providers use their own custom strings to identify models, you'll have to add the translation from Continue's model name (the one you added to `index.d.ts`) and the model string for each of these providers: [Ollama](./core/llm/llms/Ollama.ts), [Together](./core/llm/llms/Together.ts), and [Replicate](./core/llm/llms/Replicate.ts). You can find their full model lists here: [Ollama](https://ollama.ai/library), [Together](https://docs.together.ai/docs/inference-models), [Replicate](https://replicate.com/collections/streaming-language-models).
- [Prompt Templates](./core/llm/index.ts) - In this file you'll find the `autodetectTemplateType` function. Make sure that for the model name you just added, this function returns the correct template type. This is assuming that the chat template for that model is already built in Continue. If not, you will have to add the template type and corresponding edit and chat templates.
- [AddNewModel page](./gui/src/pages/AddNewModel/configs/) - This directory defines which model options are shown in the
side bar model selection UI. To add a new model:
1. Add a `ModelPackage` entry for the model into [configs/models.ts](./gui/src/pages/AddNewModel/configs/models.ts),
following the lead of the many examples near the top of the file
2. Add the model within its provider's array
to [configs/providers.ts](./gui/src/pages/AddNewModel/configs/providers.ts) (add provider if needed)
- LLM Providers: Since many providers use their own custom strings to identify models, you'll have to add the
translation from Continue's model name (the one you added to `index.d.ts`) and the model string for each of these
providers: [Ollama](./core/llm/llms/Ollama.ts), [Together](./core/llm/llms/Together.ts),
and [Replicate](./core/llm/llms/Replicate.ts). You can find their full model lists
here: [Ollama](https://ollama.ai/library), [Together](https://docs.together.ai/docs/inference-models), [Replicate](https://replicate.com/collections/streaming-language-models).
- [Prompt Templates](./core/llm/autodetect.ts) - In this file you'll find the `autodetectTemplateType` function. Make
sure that for the model name you just added, this function returns the correct template type. This is assuming that
the chat template for that model is already built in Continue. If not, you will have to add the template type and
corresponding edit and chat templates.
## 📐 Continue Architecture
## Contributor License Agreement (CLA)
Continue consists of 2 parts that are split so that it can be extended to work in other IDEs as easily as possible:
We require all contributors to accept the CLA and have made it as easy as commenting on your PR:
1. **Continue GUI** - The Continue GUI is a React application that gives the user control over Continue. It displays the current chat history, allows the user to ask questions, invoke slash commands, and use context providers. The GUI also handles most state and holds as much of the logic as possible so that it can be reused between IDEs.
1. Open your pull request.
2. Paste the following comment (or reply `recheck` if youve signed before):
2. **Continue Extension** - The Continue Extension is a plugin for the IDE which implements the [IDE Interface](./core/index.d.ts#L229). This allows the GUI to request information from or actions to be taken within the IDE. This same interface is used regardless of IDE. The first Continue extensions we have built are for VS Code and JetBrains, but we plan to build clients for other IDEs in the future. The IDE Client must 1. implement IDE Interface, as is done [here](./extensions/vscode/src/VsCodeIde.ts) for VS Code and 2. display the Continue GUI in a sidebar, like [here](./extensions/vscode/src/ContinueGUIWebviewViewProvider.ts).
```text
I have read the CLA Document and I hereby sign the CLA
```
### Continue VS Code Extension
The starting point for the VS Code extension is [activate.ts](./extensions/vscode/src/activation/activate.ts). The `activateExtension` function here will register all commands and load the Continue GUI in the sidebar of the IDE as a webview.
### Continue JetBrains Extension
The JetBrains extension is currently in alpha testing. Please reach out on [Discord](https://discord.gg/vapESyrFmJ) if you are interested in contributing to its development.
3. The CLAAssistant bot records your signature in the repo and marks the status check as passed.

View File

@ -8,13 +8,15 @@ const {
execCmdSync,
autodetectPlatformAndArch,
} = require("../scripts/util");
const { downloadRipgrep } = require("./utils/ripgrep");
const { ALL_TARGETS, TARGET_TO_LANCEDB } = require("./utils/targets");
const bin = path.join(__dirname, "bin");
const out = path.join(__dirname, "out");
const build = path.join(__dirname, "build");
function cleanSlate() {
// Clean slate
// Clean slate
rimrafSync(bin);
rimrafSync(out);
rimrafSync(build);
@ -25,13 +27,7 @@ function cleanSlate() {
}
const esbuildOutputFile = "out/index.js";
let targets = [
"darwin-x64",
"darwin-arm64",
"linux-x64",
"linux-arm64",
"win32-x64",
];
let targets = [...ALL_TARGETS];
const [currentPlatform, currentArch] = autodetectPlatformAndArch();
@ -50,15 +46,6 @@ for (let i = 2; i < process.argv.length; i++) {
}
}
const targetToLanceDb = {
"darwin-arm64": "@lancedb/vectordb-darwin-arm64",
"darwin-x64": "@lancedb/vectordb-darwin-x64",
"linux-arm64": "@lancedb/vectordb-linux-arm64-gnu",
"linux-x64": "@lancedb/vectordb-linux-x64-gnu",
"win32-x64": "@lancedb/vectordb-win32-x64-msvc",
"win32-arm64": "@lancedb/vectordb-win32-arm64-msvc",
};
// Bundles the extension into one file
async function buildWithEsbuild() {
console.log("[info] Building with esbuild...");
@ -89,7 +76,6 @@ async function buildWithEsbuild() {
inject: ["./importMetaUrl.js"],
define: { "import.meta.url": "importMetaUrl" },
});
}
async function installNodeModuleInTempDirAndCopyToCurrent(packageName, toCopy) {
@ -153,13 +139,31 @@ async function installNodeModuleInTempDirAndCopyToCurrent(packageName, toCopy) {
}
}
/**
* Downloads and installs ripgrep binaries for the specified target
*
* @param {string} target - Target platform-arch (e.g., 'darwin-x64')
* @param {string} targetDir - Directory to install ripgrep to
* @returns {Promise<void>}
*/
async function downloadRipgrepForTarget(target, targetDir) {
console.log(`[info] Downloading ripgrep for ${target}...`);
try {
await downloadRipgrep(target, targetDir);
console.log(`[info] Successfully installed ripgrep for ${target}`);
} catch (error) {
console.error(`[error] Failed to download ripgrep for ${target}:`, error);
throw error;
}
}
(async () => {
if (esbuildOnly) {
await buildWithEsbuild();
return;
}
cleanSlate()
cleanSlate();
// Informs of where to look for node_sqlite3.node https://www.npmjs.com/package/bindings#:~:text=The%20searching%20for,file%20is%20found
// This is only needed for our `pkg` command at build time
@ -179,10 +183,10 @@ async function installNodeModuleInTempDirAndCopyToCurrent(packageName, toCopy) {
console.log("[info] Downloading prebuilt lancedb...");
for (const target of targets) {
if (targetToLanceDb[target]) {
if (TARGET_TO_LANCEDB[target]) {
console.log(`[info] Downloading for ${target}...`);
await installNodeModuleInTempDirAndCopyToCurrent(
targetToLanceDb[target],
TARGET_TO_LANCEDB[target],
"@lancedb",
);
}
@ -290,10 +294,13 @@ async function installNodeModuleInTempDirAndCopyToCurrent(packageName, toCopy) {
// copy @lancedb to bin folders
console.log("[info] Copying @lancedb files to bin");
fs.copyFileSync(
`node_modules/${targetToLanceDb[target]}/index.node`,
`node_modules/${TARGET_TO_LANCEDB[target]}/index.node`,
`${targetDir}/index.node`,
);
// Download and install ripgrep for the target
await downloadRipgrepForTarget(target, targetDir);
// Informs the `continue-binary` of where to look for node_sqlite3.node
// https://www.npmjs.com/package/bindings#:~:text=The%20searching%20for,file%20is%20found
fs.writeFileSync(`${targetDir}/package.json`, "");
@ -310,6 +317,7 @@ async function installNodeModuleInTempDirAndCopyToCurrent(packageName, toCopy) {
`${targetDir}/continue-binary${exe}`,
`${targetDir}/index.node`, // @lancedb
`${targetDir}/build/Release/node_sqlite3.node`,
`${targetDir}/rg${exe}`, // ripgrep binary
);
}

View File

@ -10,6 +10,7 @@
"license": "Apache-2.0",
"dependencies": {
"@octokit/rest": "^20.0.2",
"adm-zip": "^0.5.16",
"commander": "^12.0.0",
"core": "file:../core",
"follow-redirects": "^1.15.5",
@ -18,6 +19,7 @@
"node-fetch": "^3.3.2",
"posthog-node": "^3.6.3",
"system-ca": "^1.0.2",
"tar": "^7.4.3",
"uuid": "^9.0.1",
"vectordb": "^0.4.20",
"win-ca": "^3.5.1"
@ -1527,6 +1529,18 @@
"node": ">=12"
}
},
"node_modules/@isaacs/fs-minipass": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz",
"integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==",
"license": "ISC",
"dependencies": {
"minipass": "^7.0.4"
},
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@istanbuljs/load-nyc-config": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
@ -2435,6 +2449,15 @@
"ncc": "dist/ncc/cli.js"
}
},
"node_modules/adm-zip": {
"version": "0.5.16",
"resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.5.16.tgz",
"integrity": "sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==",
"license": "MIT",
"engines": {
"node": ">=12.0"
}
},
"node_modules/agent-base": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
@ -5242,14 +5265,41 @@
}
},
"node_modules/minipass": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.1.tgz",
"integrity": "sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA==",
"dev": true,
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
"integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
"license": "ISC",
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/minizlib": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz",
"integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==",
"license": "MIT",
"dependencies": {
"minipass": "^7.1.2"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/mkdirp": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz",
"integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==",
"license": "MIT",
"bin": {
"mkdirp": "dist/cjs/src/bin.js"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/mkdirp-classic": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
@ -5360,6 +5410,7 @@
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz",
"integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==",
"license": "MIT",
"dependencies": {
"data-uri-to-buffer": "^4.0.0",
"fetch-blob": "^3.1.4",
@ -6430,6 +6481,23 @@
"node": ">=12.17"
}
},
"node_modules/tar": {
"version": "7.4.3",
"resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz",
"integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==",
"license": "ISC",
"dependencies": {
"@isaacs/fs-minipass": "^4.0.0",
"chownr": "^3.0.0",
"minipass": "^7.1.2",
"minizlib": "^3.0.1",
"mkdirp": "^3.0.1",
"yallist": "^5.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/tar-fs": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz",
@ -6473,6 +6541,24 @@
"node": ">= 6"
}
},
"node_modules/tar/node_modules/chownr": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz",
"integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==",
"license": "BlueOak-1.0.0",
"engines": {
"node": ">=18"
}
},
"node_modules/tar/node_modules/yallist": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz",
"integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==",
"license": "BlueOak-1.0.0",
"engines": {
"node": ">=18"
}
},
"node_modules/test-exclude": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",

View File

@ -43,6 +43,7 @@
},
"dependencies": {
"@octokit/rest": "^20.0.2",
"adm-zip": "^0.5.16",
"commander": "^12.0.0",
"core": "file:../core",
"follow-redirects": "^1.15.5",
@ -51,6 +52,7 @@
"node-fetch": "^3.3.2",
"posthog-node": "^3.6.3",
"system-ca": "^1.0.2",
"tar": "^7.4.3",
"uuid": "^9.0.1",
"vectordb": "^0.4.20",
"win-ca": "^3.5.1"

View File

@ -4,17 +4,35 @@ const path = require('path');
const logDirPath = path.join(__dirname, '..', 'extensions', '.continue-debug', 'logs');
const logFilePath = path.join(logDirPath, "prompt.log");
// Ensure the log directory exists
if (!fs.existsSync(logDirPath)) {
fs.mkdirSync(logDirPath, { recursive: true });
console.log("Created log directory at " + logDirPath);
}
// Create the log file if it doesn't exist
if (!fs.existsSync(logFilePath)) {
fs.createWriteStream(logFilePath).end();
fs.writeFileSync(logFilePath, ''); // Create an empty file synchronously
console.log("Created empty log file at " + logFilePath);
}
console.log("Watching logs at " + logFilePath)
console.log("Watching logs at " + logFilePath);
fs.watch(logFilePath, () => {
// console.clear();
fs.createReadStream(logFilePath).pipe(process.stdout);
});
// Set up the file watcher
try {
fs.watch(logFilePath, () => {
try {
// console.clear();
const stream = fs.createReadStream(logFilePath);
stream.pipe(process.stdout);
stream.on('error', (err) => {
console.error('Error reading log file:', err.message);
});
} catch (err) {
console.error('Error while handling file change:', err.message);
}
});
} catch (err) {
console.error('Error setting up file watcher:', err.message);
console.log('You may need to restart the script after the extension generates the first log entry.');
}

120
binary/utils/ripgrep.js Normal file
View File

@ -0,0 +1,120 @@
const fs = require("fs");
const path = require("path");
const { rimrafSync } = require("rimraf");
const tar = require("tar");
const { RIPGREP_VERSION, TARGET_TO_RIPGREP_RELEASE } = require("./targets");
const AdmZip = require("adm-zip");
const RIPGREP_BASE_URL = `https://github.com/BurntSushi/ripgrep/releases/download/${RIPGREP_VERSION}`;
/**
* Downloads a file from a URL to a specified path
*
* @param {string} url - The URL to download from
* @param {string} destPath - The destination path for the downloaded file
* @returns {Promise<void>}
*/
async function downloadFile(url, destPath) {
// Use the built-in fetch API instead of node-fetch
const response = await fetch(url, {
redirect: "follow", // Automatically follow redirects
});
if (!response.ok) {
throw new Error(`Failed to download file, status code: ${response.status}`);
}
// Get the response as an array buffer and write it to the file
const buffer = await response.arrayBuffer();
fs.writeFileSync(destPath, Buffer.from(buffer));
}
/**
* Extracts an archive to a specified directory
*
* @param {string} archivePath - Path to the archive file
* @param {string} targetDir - Directory to extract the archive to
* @param {string} platform - Platform identifier (e.g., 'darwin', 'linux', 'win32')
* @returns {Promise<void>}
*/
async function extractArchive(archivePath, targetDir, platform) {
if (platform === "win32" || archivePath.endsWith(".zip")) {
// Simple zip extraction for Windows - extract rg.exe
const zip = new AdmZip(archivePath);
const rgEntry = zip
.getEntries()
.find((entry) => entry.entryName.endsWith("rg.exe"));
if (!rgEntry) {
throw new Error("Could not find rg.exe in the downloaded archive");
}
// Extract the found rg.exe file to the target directory
const entryData = rgEntry.getData();
fs.writeFileSync(path.join(targetDir, "rg.exe"), entryData);
} else {
await tar.extract({
file: archivePath,
cwd: targetDir,
strip: 1, // Strip the top-level directory
filter: (path) => path.endsWith("/rg"),
});
}
}
/**
* Downloads and installs ripgrep for the specified target
*
* @param {string} target - Target platform-arch (e.g., 'darwin-x64')
* @param {string} targetDir - Directory to install ripgrep to
* @returns {Promise<string>} - Path to the installed ripgrep binary
*/
async function downloadRipgrep(target, targetDir) {
// Get the ripgrep release file name for the target
const releaseFile = TARGET_TO_RIPGREP_RELEASE[target];
if (!releaseFile) {
throw new Error(`Unsupported target: ${target}`);
}
const platform = target.split("-")[0];
const downloadUrl = `${RIPGREP_BASE_URL}/${releaseFile}`;
const tempDir = path.join(targetDir, "temp");
// Create temp directory
fs.mkdirSync(tempDir, { recursive: true });
const archivePath = path.join(tempDir, releaseFile);
try {
// Download the ripgrep release
console.log(`[info] Downloading ripgrep from ${downloadUrl}`);
await downloadFile(downloadUrl, archivePath);
// Extract the archive
console.log(`[info] Extracting ripgrep to ${targetDir}`);
await extractArchive(archivePath, targetDir, platform);
// Make the binary executable on Unix-like systems
if (platform !== "win32") {
const rgPath = path.join(targetDir, "rg");
fs.chmodSync(rgPath, 0o755);
}
// Clean up
rimrafSync(tempDir);
// Return the path to the ripgrep binary
const binName = platform === "win32" ? "rg.exe" : "rg";
return path.join(targetDir, binName);
} catch (error) {
console.error(`[error] Failed to download ripgrep for ${target}:`, error);
// Clean up temp directory on error
rimrafSync(tempDir);
throw error;
}
}
module.exports = {
downloadRipgrep,
RIPGREP_VERSION,
};

42
binary/utils/targets.js Normal file
View File

@ -0,0 +1,42 @@
const RIPGREP_VERSION = "14.1.1";
/**
* All supported platform-architecture targets
*/
const ALL_TARGETS = [
"darwin-x64",
"darwin-arm64",
"linux-x64",
"linux-arm64",
"win32-x64",
];
/**
* Mapping from target triplets to ripgrep release file names
*/
const TARGET_TO_RIPGREP_RELEASE = {
"darwin-x64": `ripgrep-${RIPGREP_VERSION}-x86_64-apple-darwin.tar.gz`,
"darwin-arm64": `ripgrep-${RIPGREP_VERSION}-aarch64-apple-darwin.tar.gz`,
"linux-x64": `ripgrep-${RIPGREP_VERSION}-x86_64-unknown-linux-musl.tar.gz`,
"linux-arm64": `ripgrep-${RIPGREP_VERSION}-aarch64-unknown-linux-gnu.tar.gz`,
"win32-x64": `ripgrep-${RIPGREP_VERSION}-x86_64-pc-windows-msvc.zip`,
};
/**
* Mapping from target triplets to LanceDB package names
*/
const TARGET_TO_LANCEDB = {
"darwin-arm64": "@lancedb/vectordb-darwin-arm64",
"darwin-x64": "@lancedb/vectordb-darwin-x64",
"linux-arm64": "@lancedb/vectordb-linux-arm64-gnu",
"linux-x64": "@lancedb/vectordb-linux-x64-gnu",
"win32-x64": "@lancedb/vectordb-win32-x64-msvc",
"win32-arm64": "@lancedb/vectordb-win32-arm64-msvc",
};
module.exports = {
ALL_TARGETS,
TARGET_TO_RIPGREP_RELEASE,
TARGET_TO_LANCEDB,
RIPGREP_VERSION,
};

View File

@ -72,7 +72,8 @@ export class StreamTransformPipeline {
fullStop,
);
const timeoutValue = helper.options.showWhateverWeHaveAtXMs;
const timeoutValue = helper.options.modelTimeout;
lineGenerator = showWhateverWeHaveAtXMs(lineGenerator, timeoutValue!);

View File

@ -2,6 +2,7 @@ import { jest } from "@jest/globals";
import * as lineStream from "./lineStream";
// eslint-disable-next-line max-lines-per-function
describe("lineStream", () => {
let mockFullStop: jest.Mock;
@ -204,6 +205,33 @@ describe("lineStream", () => {
});
describe("filterCodeBlockLines", () => {
it("should handle unfenced code", async () => {
const linesGenerator = await getLineGenerator(["const x = 5;"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;"]);
});
it("should handle unfenced code with a code block", async () => {
const linesGenerator = await getLineGenerator(["const x = 5;","```bash","ls -al","```"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","ls -al","```"]);
});
it("should handle unfenced code with two code blocks", async () => {
const linesGenerator = await getLineGenerator(["const x = 5;","```bash","ls -al","```","```bash","ls -al","```"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","ls -al","```","```bash","ls -al","```"]);
});
it("should remove lines before the first valid line", async () => {
const linesGenerator = await getLineGenerator(["```ts", "const x = 5;"]);
@ -213,7 +241,59 @@ describe("lineStream", () => {
expect(filteredLines).toEqual(["const x = 5;"]);
});
it.todo("Need some sample inputs to properly test this");
it("should remove outer blocks", async () => {
const linesGenerator = await getLineGenerator(["```ts", "const x = 5;","```"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;"]);
});
it("should leave inner blocks intact", async () => {
const linesGenerator = await getLineGenerator(["```md", "const x = 5;", "```bash","ls -al","```","```"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","ls -al","```"]);
});
it("should handle included inner ticks", async () => {
const linesGenerator = await getLineGenerator(["```md", "const x = 5;", "```bash","echo ```test```","```","```"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","echo ```test```","```"]);
});
it("should leave single inner blocks intact but not return trailing text", async () => {
const linesGenerator = await getLineGenerator(["```md", "const x = 5;", "```bash","ls -al","```","```","trailing text"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","ls -al","```"]);
});
it("should leave double inner blocks intact but not return trailing text", async () => {
const linesGenerator = await getLineGenerator(["```md", "const x = 5;", "```bash","ls -al","```","const y = 10;","```sh","echo `hello world`","```","```","trailing text"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","ls -al","```","const y = 10;","```sh","echo `hello world`","```"]);
});
it("should leave inner blocks intact but not return trailing or leading text", async () => {
const linesGenerator = await getLineGenerator(["[CODE]", "const x = 5;", "```bash","ls -al","```","[/CODE]","trailing text"]);
const result = lineStream.filterCodeBlockLines(linesGenerator);
const filteredLines = await getFilteredLines(result);
expect(filteredLines).toEqual(["const x = 5;","```bash","ls -al","```"]);
});
});
describe("filterEnglishLinesAtStart", () => {

View File

@ -55,8 +55,8 @@ function shouldChangeLineAndStop(line: string): string | undefined {
return line;
}
if (line.includes(CODE_START_BLOCK)) {
return line.split(CODE_START_BLOCK)[0].trimEnd();
if (line.includes(CODE_STOP_BLOCK)) {
return line.split(CODE_STOP_BLOCK)[0].trimEnd();
}
return undefined;
@ -71,9 +71,9 @@ function isUselessLine(line: string): boolean {
return hasUselessLine || trimmed.startsWith("// end");
}
export const USELESS_LINES = ["", "```"];
export const USELESS_LINES = [""];
export const CODE_KEYWORDS_ENDING_IN_SEMICOLON = ["def"];
export const CODE_START_BLOCK = "[/CODE]";
export const CODE_STOP_BLOCK = "[/CODE]";
export const BRACKET_ENDING_CHARS = [")", "]", "}", ";"];
export const PREFIXES_TO_SKIP = ["<COMPLETION>"];
export const LINES_TO_STOP_AT = ["# End of file.", "<STOP EDITING HERE"];
@ -343,34 +343,48 @@ export async function* removeTrailingWhitespace(
* 4. Yields processed lines that are part of the actual code block content.
*/
export async function* filterCodeBlockLines(rawLines: LineStream): LineStream {
let seenValidLine = false;
let waitingToSeeIfLineIsLast = undefined;
let seenFirstFence = false;
// nestCount is set to 1 when the entire code block is wrapped with ``` or START blocks. It's then incremented
// when an inner code block is discovered to avoid exiting the function prematurly. The function will exit early
// when all blocks are matched. When no outer fence is discovered the function will always continue to the end.
let nestCount = 0;
for await (const line of rawLines) {
// Filter out starting ```
if (!seenValidLine) {
if (!seenFirstFence) {
if (shouldRemoveLineBeforeStart(line)) {
continue;
// Filter out starting ``` or START block
continue
}
seenValidLine = true;
// Regardless of a fence or START block start tracking the nesting level
seenFirstFence = true;
nestCount = 1;
}
// Filter out ending ```
if (typeof waitingToSeeIfLineIsLast !== "undefined") {
yield waitingToSeeIfLineIsLast;
waitingToSeeIfLineIsLast = undefined;
}
const changedEndLine = shouldChangeLineAndStop(line);
if (typeof changedEndLine === "string") {
yield changedEndLine;
return;
}
if (line.startsWith("```")) {
waitingToSeeIfLineIsLast = line;
} else {
yield line;
if (nestCount > 0) {
// Inside a block including the outer block
const changedEndLine = shouldChangeLineAndStop(line);
if (typeof changedEndLine === "string") {
// Ending a block with just backticks (```) or STOP
nestCount--;
if (nestCount === 0) {
// if we are closing the outer block then exit early
// only exit early if the outer block was started with a block
// it it was text, we will never exit early
return;
} else {
// otherwise just yield the line
yield line;
}
} else if (line.startsWith("```")) {
// Going into a nested codeblock
nestCount++;
yield line;
} else {
// otherwise just yield the line
yield line;
}
}
}
}

View File

@ -1,32 +1,32 @@
import { v4 as uuidv4 } from "uuid";
export class AutocompleteDebouncer {
private debounceTimeout: NodeJS.Timeout | undefined = undefined;
private debouncing = false;
private lastUUID: string | undefined = undefined;
private currentRequestId: string | undefined = undefined;
async delayAndShouldDebounce(debounceDelay: number): Promise<boolean> {
// Debounce
const uuid = uuidv4();
this.lastUUID = uuid;
// Debounce
if (this.debouncing) {
this.debounceTimeout?.refresh();
const lastUUID = await new Promise((resolve) =>
setTimeout(() => {
resolve(this.lastUUID);
}, debounceDelay),
);
if (uuid !== lastUUID) {
return true;
}
} else {
this.debouncing = true;
this.debounceTimeout = setTimeout(async () => {
this.debouncing = false;
}, debounceDelay);
// Generate a unique ID for this request
const requestId = uuidv4();
this.currentRequestId = requestId;
// Clear any existing timeout
if (this.debounceTimeout) {
clearTimeout(this.debounceTimeout);
}
return false;
// Create a new promise that resolves after the debounce delay
return new Promise<boolean>((resolve) => {
this.debounceTimeout = setTimeout(() => {
// When the timeout completes, check if this is still the most recent request
const shouldDebounce = this.currentRequestId !== requestId;
// If this is the most recent request, it shouldn't be debounced
if (!shouldDebounce) {
this.currentRequestId = undefined;
}
resolve(shouldDebounce);
}, debounceDelay);
});
}
}

View File

@ -255,7 +255,7 @@ export class ConfigHandler {
if (currentProfile) {
this.globalContext.update("lastSelectedProfileForWorkspace", {
...selectedProfiles,
[profileKey]: selectedProfiles.id ?? null,
[profileKey]: currentProfile.profileDescription.id,
});
}

View File

@ -31,6 +31,8 @@ export const sharedConfigSchema = z
useAutocompleteCache: z.boolean(),
useAutocompleteMultilineCompletions: z.enum(["always", "never", "auto"]),
disableAutocompleteInFiles: z.array(z.string()),
modelTimeout: z.number(),
debounceDelay: z.number(),
})
.partial();
@ -103,6 +105,14 @@ export function modifyAnyConfigWithSharedConfig<
configCopy.tabAutocompleteOptions.disableInFiles =
sharedConfig.disableAutocompleteInFiles;
}
if (sharedConfig.modelTimeout !== undefined) {
configCopy.tabAutocompleteOptions.modelTimeout =
sharedConfig.modelTimeout;
}
if (sharedConfig.debounceDelay !== undefined) {
configCopy.tabAutocompleteOptions.debounceDelay =
sharedConfig.debounceDelay;
}
configCopy.ui = {
...configCopy.ui,

View File

@ -9,6 +9,7 @@ import {
PackageIdentifier,
RegistryClient,
Rule,
TEMPLATE_VAR_REGEX,
unrollAssistant,
validateConfigYaml,
} from "@continuedev/config-yaml";
@ -72,7 +73,7 @@ function convertYamlMcpToContinueMcp(
args: server.args ?? [],
env: server.env,
},
timeout: server.connectionTimeout
timeout: server.connectionTimeout,
};
}
@ -241,6 +242,21 @@ async function configYamlToContinueConfig(options: {
rootUrl: doc.rootUrl,
faviconUrl: doc.faviconUrl,
}));
config.mcpServers?.forEach((mcpServer) => {
const mcpArgVariables =
mcpServer.args?.filter((arg) => TEMPLATE_VAR_REGEX.test(arg)) ?? [];
if (mcpArgVariables.length === 0) {
return;
}
localErrors.push({
fatal: false,
message: `MCP server "${mcpServer.name}" has unsubstituted variables in args: ${mcpArgVariables.join(", ")}. Please refer to https://docs.continue.dev/hub/secrets/secret-types for managing hub secrets.`,
});
});
continueConfig.experimental = {
modelContextProtocolServers: config.mcpServers?.map(
convertYamlMcpToContinueMcp,
@ -445,7 +461,7 @@ async function configYamlToContinueConfig(options: {
args: [],
...server,
},
timeout: server.connectionTimeout
timeout: server.connectionTimeout,
})),
false,
);

View File

@ -64,10 +64,10 @@ describe("MCPConnection", () => {
url: "http://test.com/events",
requestOptions: {
headers: {
"Authorization": "Bearer token123",
"X-Custom-Header": "custom-value"
}
}
Authorization: "Bearer token123",
"X-Custom-Header": "custom-value",
},
},
},
};
@ -172,7 +172,7 @@ describe("MCPConnection", () => {
expect(mockConnect).toHaveBeenCalled();
});
it('should handle custom connection timeout', async () => {
it("should handle custom connection timeout", async () => {
const conn = new MCPConnection({ ...options, timeout: 11 });
const mockConnect = jest
.spyOn(Client.prototype, "connect")
@ -199,7 +199,7 @@ describe("MCPConnection", () => {
await conn.connectClient(false, abortController.signal);
expect(conn.status).toBe("error");
expect(conn.errors[0]).toContain("Failed to connect to MCP server");
expect(conn.errors[0]).toContain("Failed to connect");
expect(mockConnect).toHaveBeenCalled();
});

View File

@ -16,6 +16,17 @@ import {
const DEFAULT_MCP_TIMEOUT = 20_000; // 20 seconds
// Commands that are batch scripts on Windows and need cmd.exe to execute
const WINDOWS_BATCH_COMMANDS = [
"npx",
"uv",
"uvx",
"pnpx",
"dlx",
"nx",
"bunx",
];
class MCPConnection {
public client: Client;
private transport: Transport;
@ -52,18 +63,53 @@ class MCPConnection {
await this.transport.close();
}
/**
* Resolves the command and arguments for the current platform
* On Windows, batch script commands need to be executed via cmd.exe
* @param originalCommand The original command
* @param originalArgs The original command arguments
* @returns An object with the resolved command and arguments
*/
private resolveCommandForPlatform(
originalCommand: string,
originalArgs: string[],
): { command: string; args: string[] } {
// If not on Windows or not a batch command, return as-is
if (
process.platform !== "win32" ||
!WINDOWS_BATCH_COMMANDS.includes(originalCommand)
) {
return { command: originalCommand, args: originalArgs };
}
// On Windows, we need to execute batch commands via cmd.exe
// Format: cmd.exe /c command [args]
return {
command: "cmd.exe",
args: ["/c", originalCommand, ...originalArgs],
};
}
private constructTransport(options: MCPOptions): Transport {
switch (options.transport.type) {
case "stdio":
const env: Record<string, string> = options.transport.env
? { ...options.transport.env }
: {};
if (process.env.PATH !== undefined) {
env.PATH = process.env.PATH;
}
// Resolve the command and args for the current platform
const { command, args } = this.resolveCommandForPlatform(
options.transport.command,
options.transport.args || [],
);
return new StdioClientTransport({
command: options.transport.command,
args: options.transport.args,
command,
args,
env,
});
case "websocket":
@ -76,11 +122,13 @@ class MCPConnection {
...init,
headers: {
...init?.headers,
...(options.transport.requestOptions?.headers as Record<string, string> | undefined),
}
...(options.transport.requestOptions?.headers as
| Record<string, string>
| undefined),
},
}),
},
requestInit: { headers: options.transport.requestOptions?.headers }
requestInit: { headers: options.transport.requestOptions?.headers },
});
default:
throw new Error(
@ -232,14 +280,14 @@ class MCPConnection {
]);
} catch (error) {
// Otherwise it's a connection error
let errorMessage = `Failed to connect to MCP server ${this.options.name}`;
let errorMessage = `Failed to connect to "${this.options.name}"\n`;
if (error instanceof Error) {
const msg = error.message.toLowerCase();
if (msg.includes("spawn") && msg.includes("enoent")) {
const command = msg.split(" ")[1];
errorMessage += `command "${command}" not found. To use this MCP server, install the ${command} CLI.`;
errorMessage += `Error: command "${command}" not found. To use this MCP server, install the ${command} CLI.`;
} else {
errorMessage += ": " + error.message;
errorMessage += "Error: " + error.message;
}
}

21
core/index.d.ts vendored
View File

@ -459,6 +459,7 @@ export interface ChatHistoryItem {
toolCallState?: ToolCallState;
isGatheringContext?: boolean;
reasoning?: Reasoning;
appliedRules?: RuleWithSource[];
}
export interface LLMFullCompletionOptions extends BaseCompletionOptions {
@ -1023,6 +1024,7 @@ export interface BaseCompletionOptions {
toolChoice?: ToolChoice;
reasoning?: boolean;
reasoningBudgetTokens?: number;
promptCaching?: boolean;
}
export interface ModelCapability {
@ -1090,6 +1092,7 @@ export interface TabAutocompleteOptions {
disable: boolean;
maxPromptTokens: number;
debounceDelay: number;
modelTimeout: number;
maxSuffixPercentage: number;
prefixPercentage: number;
transform?: boolean;
@ -1495,17 +1498,19 @@ export interface TerminalOptions {
waitForCompletion?: boolean;
}
export type RuleSource =
| "default-chat"
| "default-agent"
| "model-chat-options"
| "model-agent-options"
| "rules-block"
| "json-systemMessage"
| ".continuerules";
export interface RuleWithSource {
name?: string;
slug?: string;
source:
| "default-chat"
| "default-agent"
| "model-chat-options"
| "model-agent-options"
| "rules-block"
| "json-systemMessage"
| ".continuerules";
source: RuleSource;
globs?: string | string[];
rule: string;
description?: string;

View File

@ -79,7 +79,8 @@ const PROVIDER_SUPPORTS_IMAGES: string[] = [
"azure",
"scaleway",
"nebius",
"ovhcloud"
"ovhcloud",
"watsonx",
];
const MODEL_SUPPORTS_IMAGES: string[] = [
@ -99,6 +100,7 @@ const MODEL_SUPPORTS_IMAGES: string[] = [
"llama3.2",
"llama-3.2",
"llama4",
"granite-vision",
];
function modelSupportsTools(modelDescription: ModelDescription) {

View File

@ -230,6 +230,7 @@ describe("LLM", () => {
testFim: true,
skip: false,
testToolCall: true,
timeout: 60000,
},
);
testLLM(

View File

@ -5,6 +5,7 @@ import {
ConverseStreamCommandOutput,
InvokeModelCommand,
Message,
ToolConfiguration,
} from "@aws-sdk/client-bedrock-runtime";
import { fromNodeProviderChain } from "@aws-sdk/credential-providers";
@ -291,9 +292,10 @@ class Bedrock extends BaseLLM {
const convertedMessages = this._convertMessages(messages);
const shouldCacheSystemMessage =
!!systemMessage && this.cacheBehavior?.cacheSystemMessage;
!!systemMessage && this.cacheBehavior?.cacheSystemMessage || this.completionOptions.promptCaching;
const enablePromptCaching =
shouldCacheSystemMessage || this.cacheBehavior?.cacheConversation;
shouldCacheSystemMessage || this.cacheBehavior?.cacheConversation || this.completionOptions.promptCaching;
const shouldCacheToolsConfig = this.completionOptions.promptCaching;
// Add header for prompt caching
if (enablePromptCaching) {
@ -305,28 +307,34 @@ class Bedrock extends BaseLLM {
const supportsTools =
PROVIDER_TOOL_SUPPORT.bedrock?.(options.model || "") ?? false;
let toolConfig = supportsTools && options.tools
? {
tools: options.tools.map((tool) => ({
toolSpec: {
name: tool.function.name,
description: tool.function.description,
inputSchema: {
json: tool.function.parameters,
},
},
})),
} as ToolConfiguration
: undefined;
if (toolConfig?.tools && shouldCacheToolsConfig) {
toolConfig.tools.push({ cachePoint: { type: "default" } });
}
return {
modelId: options.model,
messages: convertedMessages,
system: systemMessage
? shouldCacheSystemMessage
? [{ text: systemMessage }, { cachePoint: { type: "default" } }]
: [{ text: systemMessage }]
: undefined,
toolConfig:
supportsTools && options.tools
? {
tools: options.tools.map((tool) => ({
toolSpec: {
name: tool.function.name,
description: tool.function.description,
inputSchema: {
json: tool.function.parameters,
},
},
})),
}
: undefined,
toolConfig: toolConfig,
messages: convertedMessages,
inferenceConfig: {
maxTokens: options.maxTokens,
temperature: options.temperature,

View File

@ -1,5 +1,6 @@
import { Mutex } from "async-mutex";
import { JSONSchema7, JSONSchema7Object } from "json-schema";
import { v4 as uuidv4 } from "uuid";
import {
ChatMessage,
@ -43,7 +44,7 @@ interface OllamaModelFileParams {
min_p?: number;
num_gpu?: number;
// deprecated or not directly supported here:
// Deprecated or not directly supported here:
num_thread?: number;
use_mmap?: boolean;
num_gqa?: number;
@ -90,10 +91,10 @@ type OllamaBaseResponse = {
model: string;
created_at: string;
} & (
| {
| {
done: false;
}
| {
| {
done: true;
done_reason: string;
total_duration: number; // Time spent generating the response in nanoseconds
@ -104,7 +105,7 @@ type OllamaBaseResponse = {
eval_duration: number; // Time spent generating the response in nanoseconds
context: number[]; // An encoding of the conversation used in this response; can be sent in the next request to keep conversational memory
}
);
);
type OllamaErrorResponse = {
error: string;
@ -113,14 +114,14 @@ type OllamaErrorResponse = {
type OllamaRawResponse =
| OllamaErrorResponse
| (OllamaBaseResponse & {
response: string; // the generated response
});
response: string; // the generated response
});
type OllamaChatResponse =
| OllamaErrorResponse
| (OllamaBaseResponse & {
message: OllamaChatMessage;
});
message: OllamaChatMessage;
});
interface OllamaTool {
type: "function";
@ -370,7 +371,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
if ("error" in j) {
throw new Error(j.error);
}
j.response ??= ''
j.response ??= "";
yield j.response;
} catch (e) {
throw new Error(`Error parsing Ollama response: ${e} ${chunk}`);
@ -439,6 +440,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
// But ollama returns the full object in one response with no streaming
chatMessage.toolCalls = res.message.tool_calls.map((tc) => ({
type: "function",
id: `tc_${uuidv4()}`, // Generate a proper UUID with a prefix
function: {
name: tc.function.name,
arguments: JSON.stringify(tc.function.arguments),

View File

@ -4,9 +4,12 @@ import {
CompletionOptions,
LLMOptions,
} from "../../index.js";
import {
fromChatCompletionChunk,
} from "../openaiTypeConverters.js";
import { renderChatMessage } from "../../util/messageContent.js";
import { BaseLLM } from "../index.js";
import { streamResponse } from "../stream.js";
import { streamResponse, streamSse } from "../stream.js";
let watsonxToken = {
expiration: 0,
@ -82,10 +85,8 @@ class WatsonX extends BaseLLM {
}
}
getWatsonxEndpoint(): string {
return this.deploymentId
? `${this.apiBase}/ml/v1/deployments/${this.deploymentId}/text/generation_stream?version=${this.apiVersion}`
: `${this.apiBase}/ml/v1/text/generation_stream?version=${this.apiVersion}`;
_getEndpoint(endpoint: string): string {
return `${this.apiBase}/ml/v1/${this.deploymentId ? `deployments/${this.deploymentId}/` : ""}text/${endpoint}_stream?version=${this.apiVersion}`
}
static providerName = "watsonx";
@ -117,14 +118,10 @@ class WatsonX extends BaseLLM {
};
}
protected _convertModelName(model: string): string {
return model;
}
protected _convertArgs(options: any, messages: ChatMessage[]) {
const finalOptions = {
messages: messages.map(this._convertMessage).filter(Boolean),
model: this._convertModelName(options.model),
model: options.model,
max_tokens: options.maxTokens,
temperature: options.temperature,
top_p: options.topP,
@ -185,26 +182,11 @@ class WatsonX extends BaseLLM {
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<string> {
for await (const chunk of this._streamChat(
[{ role: "user", content: prompt }],
signal,
options,
)) {
yield renderChatMessage(chunk);
}
}
protected async *_streamChat(
messages: ChatMessage[],
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<ChatMessage> {
await this.updateWatsonxToken();
const stopSequences =
options.stop?.slice(0, 6) ??
(options.model?.includes("granite") ? ["Question:"] : []);
const url = this.getWatsonxEndpoint();
const stopSequences = options.stop?.slice(0, 6) ?? [];
const url = this._getEndpoint("generation");
const headers = this._getHeaders();
const parameters: any = {
@ -224,7 +206,7 @@ class WatsonX extends BaseLLM {
}
const payload: any = {
input: messages[messages.length - 1].content,
input: prompt,
parameters: parameters,
};
if (!this.deploymentId) {
@ -268,10 +250,67 @@ class WatsonX extends BaseLLM {
}
}
});
yield {
role: "assistant",
content: generatedChunk,
};
yield generatedChunk
}
}
}
protected async *_streamChat(
messages: ChatMessage[],
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<ChatMessage> {
await this.updateWatsonxToken();
const stopSequences = options.stop?.slice(0, 6) ?? [];
const url = this._getEndpoint("chat");
const headers = this._getHeaders();
const payload: any = {
messages: messages,
max_tokens: options.maxTokens ?? 1024,
stop: stopSequences,
frequency_penalty: options.frequencyPenalty || 1,
presence_penalty: options.presencePenalty || 1,
};
if (!this.deploymentId) {
payload.model_id = options.model;
payload.project_id = this.projectId;
}
if (!!options.temperature) {
payload.temperature = options.temperature;
}
if (!!options.topP) {
payload.top_p = options.topP;
}
if (!!options.tools) {
payload.tools = options.tools;
if (options.toolChoice) {
payload.tool_choice = options.toolChoice;
} else {
payload.tool_choice_option = "auto";
}
}
var response = await this.fetch(url, {
method: "POST",
headers: headers,
body: JSON.stringify(payload),
signal,
});
if (!response.ok || response.body === null) {
throw new Error(
"Something went wrong. No response received, check your connection",
);
} else {
for await (const value of streamSse(response)) {
const chunk = fromChatCompletionChunk(value);
if (chunk) {
yield chunk;
}
}
}
}

View File

@ -23,6 +23,29 @@ const matchesGlobs = (
return false;
};
/**
* Filters rules that apply to the given message
*/
export const getApplicableRules = (
userMessage: UserChatMessage | ToolResultChatMessage | undefined,
rules: RuleWithSource[],
): RuleWithSource[] => {
const filePathsFromMessage = userMessage
? extractPathsFromCodeBlocks(renderChatMessage(userMessage))
: [];
return rules.filter((rule) => {
// A rule is active if it has no globs (applies to all files)
// or if at least one file path matches its globs
const hasNoGlobs = !rule.globs;
const matchesAnyFilePath = filePathsFromMessage.some((path) =>
matchesGlobs(path, rule.globs),
);
return hasNoGlobs || matchesAnyFilePath;
});
};
export const getSystemMessageWithRules = ({
baseSystemMessage,
userMessage,
@ -32,23 +55,11 @@ export const getSystemMessageWithRules = ({
userMessage: UserChatMessage | ToolResultChatMessage | undefined;
rules: RuleWithSource[];
}) => {
const filePathsFromMessage = userMessage
? extractPathsFromCodeBlocks(renderChatMessage(userMessage))
: [];
const applicableRules = getApplicableRules(userMessage, rules);
let systemMessage = baseSystemMessage ?? "";
for (const rule of rules) {
// A rule is active if it has no globs (applies to all files)
// or if at least one file path matches its globs
const hasNoGlobs = !rule.globs;
const matchesAnyFilePath = filePathsFromMessage.some((path) =>
matchesGlobs(path, rule.globs),
);
if (hasNoGlobs || matchesAnyFilePath) {
systemMessage += `\n\n${rule.rule}`;
}
for (const rule of applicableRules) {
systemMessage += `\n\n${rule.rule}`;
}
return systemMessage;

View File

@ -35,7 +35,8 @@ export const PROVIDER_TOOL_SUPPORT: Record<
if (
model.toLowerCase().startsWith("gpt-4") ||
model.toLowerCase().startsWith("o3")
) return true;
)
return true;
return false;
},
openai: (model) => {
@ -66,7 +67,10 @@ export const PROVIDER_TOOL_SUPPORT: Record<
},
vertexai: (model) => {
// All gemini models except flash 2.0 lite support function calling
return model.toLowerCase().includes("gemini") && !model.toLowerCase().includes("lite");;
return (
model.toLowerCase().includes("gemini") &&
!model.toLowerCase().includes("lite")
);
},
bedrock: (model) => {
// For Bedrock, only support Claude Sonnet models with versions 3.5/3-5 and 3.7/3-7
@ -81,22 +85,24 @@ export const PROVIDER_TOOL_SUPPORT: Record<
},
mistral: (model) => {
// https://docs.mistral.ai/capabilities/function_calling/
return !model.toLowerCase().includes("mamba") &&
[
"codestral",
"mistral-large",
"mistral-small",
"pixtral",
"ministral",
"mistral-nemo"
].some((part) => model.toLowerCase().includes(part));
return (
!model.toLowerCase().includes("mamba") &&
[
"codestral",
"mistral-large",
"mistral-small",
"pixtral",
"ministral",
"mistral-nemo",
].some((part) => model.toLowerCase().includes(part))
);
},
// https://ollama.com/search?c=tools
ollama: (model) => {
let modelName = "";
// Extract the model name after the last slash to support other registries
if(model.includes("/")) {
let parts = model.split('/');
if (model.includes("/")) {
let parts = model.split("/");
modelName = parts[parts.length - 1];
} else {
modelName = model;
@ -117,6 +123,7 @@ export const PROVIDER_TOOL_SUPPORT: Record<
"llama3.2",
"llama3.1",
"qwen2",
"qwen3",
"mixtral",
"command-r",
"smollm2",
@ -145,8 +152,84 @@ export const PROVIDER_TOOL_SUPPORT: Record<
}
},
deepseek: (model) => {
if(model !== "deepseek-reasoner") {
if (model !== "deepseek-reasoner") {
return true;
}
}
},
watsonx: (model) => {
if (model.toLowerCase().includes("guard")) return false;
if (
["llama-3", "llama-4", "mistral", "codestral", "granite-3"].some((part) =>
model.toLowerCase().includes(part),
)
)
return true;
},
openrouter: (model) => {
// https://openrouter.ai/models?fmt=cards&supported_parameters=tools
if (
["vision", "math", "guard", "mistrallite", "mistral-openorca"].some(
(part) => model.toLowerCase().includes(part),
)
) {
return false;
}
const supportedPrefixes = [
"openai/gpt-3.5",
"openai/gpt-4",
"openai/o1",
"openai/o3",
"openai/o4",
"anthropic/claude-3",
"microsoft/phi-3",
"google/gemini-flash-1.5",
"google/gemini-2",
"google/gemini-pro",
"x-ai/grok",
"qwen/qwen3",
"qwen/qwen-",
"cohere/command-r",
"ai21/jamba-1.6",
"mistralai/mistral",
"mistralai/ministral",
"mistralai/codestral",
"mistralai/mixtral",
"mistral/ministral",
"mistralai/pixtral",
"meta-llama/llama-3.3",
"amazon/nova",
"deepseek/deepseek-r1",
"deepseek/deepseek-chat",
"meta-llama/llama-4",
"all-hands/openhands-lm-32b",
];
for (const prefix of supportedPrefixes) {
if (model.toLowerCase().startsWith(prefix)) {
return true;
}
}
const specificModels = [
"qwen/qwq-32b",
"qwen/qwen-2.5-72b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3-8b-instruct",
"meta-llama/llama-3-70b-instruct",
"arcee-ai/caller-large",
"nousresearch/hermes-3-llama-3.1-70b",
];
for (const model of specificModels) {
if (model.toLowerCase() === model) {
return true;
}
}
const supportedContains = ["llama-3.1"];
for (const model of supportedContains) {
if (model.toLowerCase().includes(model)) {
return true;
}
}
},
};

View File

@ -43,7 +43,7 @@ export function addToTestDir(pathsOrUris: (string | [string, string])[]) {
if (Array.isArray(p)) {
fs.writeFileSync(filepath, p[1]);
} else if (p.endsWith("/")) {
fs.mkdirSync(p, { recursive: true });
fs.mkdirSync(filepath, { recursive: true });
} else {
fs.writeFileSync(filepath, "");
}

View File

@ -6,6 +6,7 @@ export const DEFAULT_AUTOCOMPLETE_OPTS: TabAutocompleteOptions = {
prefixPercentage: 0.3,
maxSuffixPercentage: 0.2,
debounceDelay: 350,
modelTimeout: 150,
multilineCompletions: "auto",
// @deprecated TO BE REMOVED
slidingWindowPrefixPercentage: 0.75,

41
docs/docs/CLA.md Normal file
View File

@ -0,0 +1,41 @@
# Individual Contributor License Agreement (v1.0, Continue)
_Based on the Apache Software Foundation Individual CLA v 2.2._
By commenting **“I have read the CLA Document and I hereby sign the CLA”**
on a Pull Request, **you (“Contributor”) agree to the following terms** for any
past and future “Contributions” submitted to **Continue (the “Project”)**.
---
## 1. Definitions
- **“Contribution”** any original work of authorship submitted to the Project
(code, documentation, designs, etc.).
- **“You” / “Your”** the individual (or legal entity) posting the acceptance
comment.
## 2. Copyright License
You grant **Continue Dev, Inc.** and all recipients of the Project a perpetual,
worldwide, nonexclusive, royaltyfree, irrevocable (except as below) patent
license to make, have made, use, sell, offer to sell, import, and otherwise
transfer Your Contributions alone or in combination with the Project.
If any entity brings patent litigation alleging that the Project or a
Contribution infringes a patent, the patent licenses granted by You to that
entity under this CLA terminate.
## 4. Representations
1. You are legally entitled to grant the licenses above.
2. Each Contribution is either Your original creation or You have authority to
submit it under this CLA.
3. Your Contributions are provided **“AS IS”** without warranties of any kind.
4. You will notify the Project if any statement above becomes inaccurate.
## 5. Miscellany
This Agreement is governed by the laws of the **State of California**, USA,
excluding its conflictoflaws rules. If any provision is held unenforceable,
the remaining provisions remain in force.

View File

@ -1,13 +1,9 @@
---
title: MCP Blocks
---
title: MCP Blocks
sidebar_label: MCP
description: Model Context Protocol servers provide specialized functionality
keywords: [mcp, blocks, model context protocol, integrations, tools]
sidebar_position: 4
---
Model Context Protocol servers provide specialized functionality:

View File

@ -47,10 +47,6 @@ are pulled from [https://hub.continue.dev](https://hub.continue.dev).
Blocks can be imported into an assistant by adding a `uses` clause under the block type. This can be alongside other
`uses` clauses or explicit blocks of that type.
:::info
Note that local assistants cannot use blocks that require organization-level secrets.
:::
For example, the following assistant imports an Anthropic model and defines an Ollama DeepSeek one.
```yaml title="Assistant models section"
@ -85,6 +81,12 @@ Blocks:
You can find many examples of each of these block types on
the [Continue Explore Page](https://hub.continue.dev/explore/models)
:::info
Local blocks utilizing mustache notation for secrets (`${{ secrets.SECRET_NAME }}`) can read secret values:
- globally, from a `.env` located in the global `.continue` folder (`~/.continue/.env`)
- per-workspace, from a `.env` file located at the root of the current workspace.
:::
### Inputs
Blocks can be passed user inputs, including hub secrets and raw text values. To create a block that has an input, use
@ -111,7 +113,7 @@ models:
TEMP: 0.9
```
Note that hub secrets can be passed as inputs, using the a similar mustache format: `secrets.SECRET_NAME`.
Note that hub secrets can be passed as inputs, using a similar mustache format: `secrets.SECRET_NAME`.
### Overrides
@ -204,7 +206,6 @@ chat, editing, and summarizing.
- `topP`: The cumulative probability for nucleus sampling.
- `topK`: Maximum number of tokens considered at each step.
- `stop`: An array of stop tokens that will terminate the completion.
- `n`: Number of completions to generate.
- `reasoning`: Boolean to enable thinking/reasoning for Anthropic Claude 3.7+ models.
- `reasoningBudgetTokens`: Budget tokens for thinking/reasoning in Anthropic Claude 3.7+ models.
- `requestOptions`: HTTP request options specific to the model.

View File

@ -65,6 +65,10 @@ const config = {
],
],
scripts: [
'!function(){var e,t,n;e="7aa28ed11570734",t=function(){Reo.init({clientID:"7aa28ed11570734"})},(n=document.createElement("script")).src="https://static.reo.dev/"+e+"/reo.js",n.defer=!0,n.onload=t,document.head.appendChild(n)}();',
],
themeConfig:
/** @type {import("@docusaurus/preset-classic").ThemeConfig} */
({

View File

@ -6,7 +6,6 @@ fun environment(key: String) = providers.environmentVariable(key)
fun Sync.prepareSandbox() {
from("../../binary/bin") { into("${intellij.pluginName.get()}/core/") }
from("../vscode/node_modules/@vscode/ripgrep") { into("${intellij.pluginName.get()}/ripgrep/") }
}
val remoteRobotVersion = "0.11.23"

View File

@ -3,7 +3,7 @@ pluginGroup=com.github.continuedev.continueintellijextension
pluginName=continue-intellij-extension
pluginRepositoryUrl=https://github.com/continuedev/continue
# SemVer format -> https://semver.org
pluginVersion=1.0.16
pluginVersion=1.0.18
# Supported build number ranges and IntelliJ Platform versions -> https://plugins.jetbrains.com/docs/intellij/build-number-ranges.html
pluginSinceBuild=223
# IntelliJ Platform Properties -> https://plugins.jetbrains.com/docs/intellij/tools-gradle-intellij-plugin.html#configuration-intellij-extension

View File

@ -0,0 +1,11 @@
package com.github.continuedev.continueintellijextension.constants
/**
* Constants related to the Continue plugin.
*/
object ContinueConstants {
/**
* The unique identifier for the Continue plugin.
*/
const val PLUGIN_ID = "com.github.continuedev.continueintellijextension"
}

View File

@ -83,9 +83,9 @@ class CoreMessenger(
private fun setPermissions(destination: String) {
val osName = System.getProperty("os.name").toLowerCase()
if (osName.contains("mac") || osName.contains("darwin")) {
ProcessBuilder("xattr", "-dr", "com.apple.quarantine", destination).start()
ProcessBuilder("xattr", "-dr", "com.apple.quarantine", destination).start().waitFor()
setFilePermissions(destination, "rwxr-xr-x")
} else if (osName.contains("nix") || osName.contains("nux") || osName.contains("mac")) {
} else if (osName.contains("nix") || osName.contains("nux")) {
setFilePermissions(destination, "rwxr-xr-x")
}
}
@ -146,9 +146,11 @@ class CoreMessenger(
e.printStackTrace()
}
} else {
// Set proper permissions
coroutineScope.launch(Dispatchers.IO) { setPermissions(continueCorePath) }
// Set proper permissions synchronously
runBlocking(Dispatchers.IO) {
setPermissions(continueCorePath)
}
// Start the subprocess
val processBuilder =
ProcessBuilder(continueCorePath).directory(File(continueCorePath).parentFile)

View File

@ -2,12 +2,10 @@ package com.github.continuedev.continueintellijextension.`continue`
import com.github.continuedev.continueintellijextension.services.TelemetryService
import com.github.continuedev.continueintellijextension.utils.castNestedOrNull
import com.github.continuedev.continueintellijextension.utils.getContinueBinaryPath
import com.github.continuedev.continueintellijextension.utils.getMachineUniqueID
import com.intellij.ide.plugins.PluginManager
import com.intellij.openapi.components.service
import com.intellij.openapi.extensions.PluginId
import com.intellij.openapi.project.Project
import java.nio.file.Paths
import kotlinx.coroutines.*
class CoreMessengerManager(
@ -21,38 +19,8 @@ class CoreMessengerManager(
init {
coroutineScope.launch {
val myPluginId = "com.github.continuedev.continueintellijextension"
val pluginDescriptor =
PluginManager.getPlugin(PluginId.getId(myPluginId)) ?: throw Exception("Plugin not found")
val pluginPath = pluginDescriptor.pluginPath
val osName = System.getProperty("os.name").toLowerCase()
val os =
when {
osName.contains("mac") || osName.contains("darwin") -> "darwin"
osName.contains("win") -> "win32"
osName.contains("nix") || osName.contains("nux") || osName.contains("aix") -> "linux"
else -> "linux"
}
val osArch = System.getProperty("os.arch").toLowerCase()
val arch =
when {
osArch.contains("aarch64") || (osArch.contains("arm") && osArch.contains("64")) ->
"arm64"
osArch.contains("amd64") || osArch.contains("x86_64") -> "x64"
else -> "x64"
}
val target = "$os-$arch"
println("Identified OS: $os, Arch: $arch")
val corePath = Paths.get(pluginPath.toString(), "core").toString()
val targetPath = Paths.get(corePath, target).toString()
val continueCorePath =
Paths.get(targetPath, "continue-binary" + (if (os == "win32") ".exe" else "")).toString()
setupCoreMessenger(continueCorePath)
val continueBinaryPath = getContinueBinaryPath()
setupCoreMessenger(continueBinaryPath)
}
}

View File

@ -40,6 +40,14 @@ class IdeProtocolClient(
) : DumbAware {
private val ide: IDE = IntelliJIDE(project, continuePluginService)
/**
* Create a dispatcher with limited parallelism to prevent UI freezing.
* Note that there are 64 total threads available to the IDE.
*
* See this thread for details: https://github.com/continuedev/continue/issues/4098#issuecomment-2854865310
*/
private val limitedDispatcher = Dispatchers.IO.limitedParallelism(4)
init {
// Setup config.json / config.ts save listeners
VirtualFileManager.getInstance().addAsyncFileListener(
@ -52,7 +60,7 @@ class IdeProtocolClient(
}
fun handleMessage(msg: String, respond: (Any?) -> Unit) {
coroutineScope.launch(Dispatchers.IO) {
coroutineScope.launch(limitedDispatcher) {
val message = Gson().fromJson(msg, Message::class.java)
val messageType = message.messageType
val dataElement = message.data
@ -685,4 +693,4 @@ class IdeProtocolClient(
fun deleteAtIndex(index: Int) {
continuePluginService.sendToWebview("deleteAtIndex", DeleteAtIndex(index), uuid())
}
}
}

View File

@ -1,5 +1,6 @@
import com.github.continuedev.continueintellijextension.*
import com.github.continuedev.continueintellijextension.constants.getContinueGlobalPath
import com.github.continuedev.continueintellijextension.constants.ContinueConstants
import com.github.continuedev.continueintellijextension.`continue`.GitService
import com.github.continuedev.continueintellijextension.services.ContinueExtensionSettings
import com.github.continuedev.continueintellijextension.services.ContinuePluginService
@ -46,20 +47,12 @@ class IntelliJIDE(
private val gitService = GitService(project, continuePluginService)
private val ripgrep: String
private val ripgrep: String = getRipgrepPath()
init {
val myPluginId = "com.github.continuedev.continueintellijextension"
val pluginDescriptor =
PluginManager.getPlugin(PluginId.getId(myPluginId)) ?: throw Exception("Plugin not found")
val pluginPath = pluginDescriptor.pluginPath
val os = getOS()
ripgrep =
Paths.get(pluginPath.toString(), "ripgrep", "bin", "rg" + if (os == OS.WINDOWS) ".exe" else "").toString()
// Make ripgrep executable if on Unix-like systems
try {
val os = getOS()
if (os == OS.LINUX || os == OS.MAC) {
val file = File(ripgrep)
if (!file.canExecute()) {
@ -69,7 +62,6 @@ class IntelliJIDE(
} catch (e: Throwable) {
e.printStackTrace()
}
}
/**
@ -91,7 +83,7 @@ class IntelliJIDE(
remoteName = "ssh"
}
val pluginId = "com.github.continuedev.continueintellijextension"
val pluginId = ContinueConstants.PLUGIN_ID
val plugin = PluginManagerCore.getPlugin(PluginId.getId(pluginId))
val extensionVersion = plugin?.version ?: "Unknown"
@ -330,109 +322,62 @@ class IntelliJIDE(
override suspend fun getFileResults(pattern: String): List<String> {
val ideInfo = this.getIdeInfo()
if (ideInfo.remoteName == "local") {
val command = GeneralCommandLine(
ripgrep,
"--files",
"--iglob",
pattern,
"--ignore-file",
".continueignore",
"--ignore-file",
".gitignore",
)
command.setWorkDirectory(project.basePath)
val results = ExecUtil.execAndGetOutput(command).stdout
return results.split("\n")
try {
val command = GeneralCommandLine(
ripgrep,
"--files",
"--iglob",
pattern,
"--ignore-file",
".continueignore",
"--ignore-file",
".gitignore",
)
command.setWorkDirectory(project.basePath)
val results = ExecUtil.execAndGetOutput(command).stdout
return results.split("\n")
} catch (e: Exception) {
showToast(
ToastType.ERROR,
"Error executing ripgrep: ${e.message}"
)
return emptyList()
}
} else {
throw NotImplementedError("Ripgrep not supported, this workspace is remote")
// Leaving in here for ideas
// val projectBasePath = project.basePath ?: return emptyList()
// val scope = GlobalSearchScope.projectScope(project)
//
// // Get all ignore patterns from .continueignore files
// val ignorePatterns = mutableSetOf<String>()
// VirtualFileManager.getInstance().findFileByUrl("file://$projectBasePath")?.let { root ->
// VfsUtil.collectChildrenRecursively(root).forEach { file ->
// if (file.name == ".continueignore") {
// file.inputStream.bufferedReader().useLines { lines ->
// ignorePatterns.addAll(lines.filter { it.isNotBlank() && !it.startsWith("#") })
// }
// }
// }
// }
//
// return FilenameIndex.getAllFilesByExt(project, "*", scope)
// .filter { file ->
// val relativePath = file.path.removePrefix("$projectBasePath/")
// // Check if file matches pattern and isn't ignored
// PatternUtil.(relativePath, pattern) &&
// !ignorePatterns.any { PatternUtil.matchesGlob(relativePath, it) }
// }
// .map { it.path.removePrefix("$projectBasePath/") }
}
}
override suspend fun getSearchResults(query: String): String {
val ideInfo = this.getIdeInfo()
if (ideInfo.remoteName == "local") {
val command = GeneralCommandLine(
ripgrep,
"-i",
"--ignore-file",
".continueignore",
"--ignore-file",
".gitignore",
"-C",
"2",
"--heading",
"-e",
query,
"."
)
command.setWorkDirectory(project.basePath)
return ExecUtil.execAndGetOutput(command).stdout
try {
val command = GeneralCommandLine(
ripgrep,
"-i",
"--ignore-file",
".continueignore",
"--ignore-file",
".gitignore",
"-C",
"2",
"--heading",
"-e",
query,
"."
)
command.setWorkDirectory(project.basePath)
return ExecUtil.execAndGetOutput(command).stdout
} catch (e: Exception) {
showToast(
ToastType.ERROR,
"Error executing ripgrep: ${e.message}"
)
return "Error: Unable to execute ripgrep command."
}
} else {
throw NotImplementedError("Ripgrep not supported, this workspace is remote")
// For remote workspaces, use JetBrains search functionality
// val searchResults = StringBuilder()
// ApplicationManager.getApplication().invokeAndWait {
// val options = FindModel().apply {
// stringToFind = query
// isCaseSensitive = false
// isRegularExpressions = false
// isWholeWordsOnly = false
// searchContext = FindModel.SearchContext.ANY // or IN_CODE, IN_COMMENTS, IN_STRING_LITERALS, etc.
// isMultiline = true // Allow matching across multiple lines
// }
//
// val progressIndicator = EmptyProgressIndicator()
// val presentation = FindUsagesProcessPresentation(
// UsageViewPresentation()
// )
// val filesToSearch = ProjectFileIndex.getInstance(project)
// .iterateContent(::ArrayList)
// .filterNot { it.isDirectory }
// .toSet()
//
//
// FindInProjectUtil.findUsages(
// options,
// project,
// progressIndicator,
// presentation,
// filesToSearch
// ) { result ->
// val virtualFile = result.virtualFile
// searchResults.append(virtualFile.path).append("\n")
// searchResults.append("${result..trim()}\n")
// true // continue searching
// }
// }
// return searchResults.toString()
}
}

View File

@ -0,0 +1,58 @@
package com.github.continuedev.continueintellijextension.utils
import com.intellij.ide.plugins.PluginManager
import com.intellij.openapi.extensions.PluginId
import com.github.continuedev.continueintellijextension.constants.ContinueConstants
import java.nio.file.Path
import java.nio.file.Paths
/**
* Gets the path to the Continue plugin directory
*
* @return Path to the plugin directory
* @throws Exception if the plugin is not found
*/
fun getContinuePluginPath(): Path {
val pluginDescriptor =
PluginManager.getPlugin(PluginId.getId(ContinueConstants.PLUGIN_ID)) ?: throw Exception("Plugin not found")
return pluginDescriptor.pluginPath
}
/**
* Gets the path to the Continue core directory with target platform
*
* @return Path to the Continue core directory with target platform
* @throws Exception if the plugin is not found
*/
fun getContinueCorePath(): String {
val pluginPath = getContinuePluginPath()
val corePath = Paths.get(pluginPath.toString(), "core").toString()
val target = getOsAndArchTarget()
return Paths.get(corePath, target).toString()
}
/**
* Gets the path to the Continue binary executable
*
* @return Path to the Continue binary executable
* @throws Exception if the plugin is not found
*/
fun getContinueBinaryPath(): String {
val targetPath = getContinueCorePath()
val os = getOS()
val exeSuffix = if (os == OS.WINDOWS) ".exe" else ""
return Paths.get(targetPath, "continue-binary$exeSuffix").toString()
}
/**
* Gets the path to the Ripgrep executable
*
* @return Path to the Ripgrep executable
* @throws Exception if the plugin is not found
*/
fun getRipgrepPath(): String {
val targetPath = getContinueCorePath()
val os = getOS()
val exeSuffix = if (os == OS.WINDOWS) ".exe" else ""
return Paths.get(targetPath, "rg$exeSuffix").toString()
}

View File

@ -4,7 +4,6 @@ import com.intellij.openapi.vfs.VirtualFile
import java.net.NetworkInterface
import java.util.*
import java.awt.event.KeyEvent.*
enum class OS {
MAC, WINDOWS, LINUX
}
@ -92,4 +91,30 @@ fun Any?.getNestedOrNull(vararg keys: String): Any? {
result = (result as? Map<*, *>)?.get(key) ?: return null
}
return result
}
/**
* Get the target string for Continue binary.
* The format is "$os-$arch" where:
* - os is one of: darwin, win32, or linux
* - arch is one of: arm64 or x64
*
* @return Target string in format "$os-$arch"
*/
fun getOsAndArchTarget(): String {
val os = getOS()
val osStr = when (os) {
OS.MAC -> "darwin"
OS.WINDOWS -> "win32"
OS.LINUX -> "linux"
}
val osArch = System.getProperty("os.arch").lowercase()
val arch = when {
osArch.contains("aarch64") || (osArch.contains("arm") && osArch.contains("64")) -> "arm64"
osArch.contains("amd64") || osArch.contains("x86_64") -> "x64"
else -> "x64"
}
return "$osStr-$arch"
}

View File

@ -4,22 +4,23 @@
// / /___ / /_/ /_ / / // /_ _ / _ / / // /_/ / / __/
// \____/ \____/ /_/ /_/ \__/ /_/ /_/ /_/ \__,_/ \___/
//
// Chat, Edit, and Autocomplete tutorial
// Autocomplete, Edit, Chat, and Agent tutorial
//
// Setup -
// Autocomplete //
// Autocomplete provides inline code suggestions as you type.
// First, open the Continue sidebar by pressing [Cmd + L] or clicking the Continue icon.
// 1. Place cursor after `sortingAlgorithm:` below and press [Enter]
// 2. Press [Tab] to accept the Autocomplete suggestion
// See an example at https://docs.continue.dev/getting-started/install
// Basic assertion for sortingAlgorithm:
// Follow the instructions in the sidebar to set up a Chat/Edit modela and an Autocomplete model.
// Edit //
// Edit is a convenient way to make quick changes to specific code and files.
// Chat
// Highlight the code below
// Press [Cmd + L] to add to Chat
// Try asking Continue "what sorting algorithm is this?"
// 1. Highlight the code below
// 2. Press [Cmd/Ctrl + I] to Edit
// 3. Try asking Continue to "make this more readable"
public static int[] sortingAlgorithm(int[] x) {
for (int i = 0; i < x.length; i++) {
for (int j = 0; j < x.length - 1; j++) {
@ -33,14 +34,12 @@ public static int[] sortingAlgorithm(int[] x) {
return x;
}
// [Cmd + L] always starts a new chat. Now, try the same thing using [Cmd + Shift + L].
// This will add the code into the current chat
// Chat //
// Chat makes it easy to ask for help from an LLM without needing to leave the IDE.
// Edit
// Highlight the code below
// Press [Cmd + I] to Edit
// Try asking Continue to "make this more readable"
// 1. Highlight the code below
// 2. Press [Cmd/Ctrl + J] to add to Chat
// 3. Try asking Continue "what sorting algorithm is this?"
public static int[] sortingAlgorithm2(int[] x) {
for (int i = 0; i < x.length; i++) {
for (int j = 0; j < x.length - 1; j++) {
@ -54,15 +53,11 @@ public static int[] sortingAlgorithm2(int[] x) {
return x;
}
// Autocomplete
// Agent //
// Agent equips the Chat model with the tools needed to handle a wide range of coding tasks, allowing
// the model to make decisions and save you the work of manually finding context and performing actions.
// Place cursor after `sortingAlgorithm:` below and press [Enter]
// Press [Tab] to accept the Autocomplete suggestion
// 1. Switch from "Chat" to "Agent" mode using the dropdown in the bottom left of the input box
// 2. Try asking Continue "Write unit tests for this code in a new file and run the test"
// Basic assertion for sortingAlgorithm:
// - Learn More -
// Visit the Continue Docs at https://docs.continue.dev/getting-started/overview
// Learn more at https://docs.continue.dev/getting-started/overview //

View File

@ -4,54 +4,49 @@
/ /___ / /_/ /_ / / // /_ _ / _ / / // /_/ / / __/
\____/ \____/ /_/ /_/ \__/ /_/ /_/ /_/ \__,_/ \___/
Chat, Edit, and Autocomplete tutorial
Autocomplete, Edit, Chat, and Agent tutorial
"""
# ———————————————————————————————————————————————— Setup ————————————————————————————————————————————————-
# First, open the Continue sidebar by pressing [Cmd + L] or clicking the Continue icon.
# ————————————————————————————————————————————— Autocomplete —————————————————————————————————————————————— #
# Autocomplete provides inline code suggestions as you type.
# See an example at https://docs.continue.dev/getting-started/install
# Follow the instructions in the sidebar to set up a Chat/Edit modela and an Autocomplete model.
# ————————————————————————————————————————————————— Chat —————————————————————————————————————————————————
## Highlight the code below
## Press [Cmd + L] to add to Chat
## Try asking Continue "what sorting algorithm is this?"
def sorting_algorithm(x):
for i in range(len(x)):
for j in range(len(x) - 1):
if x[j] > x[j + 1]:
x[j], x[j + 1] = x[j + 1], x[j]
return x
# [Cmd + L] always starts a new chat. Now, try the same thing using [Cmd + Shift + L].
# This will add the code into the current chat
# ————————————————————————————————————————————————— Edit —————————————————————————————————————————————————
## Highlight the code below
## Press [Cmd + I] to Edit
## Try asking Continue to "make this more readable"
def sorting_algorithm(x):
for i in range(len(x)):
for j in range(len(x) - 1):
if x[j] > x[j + 1]:
x[j], x[j + 1] = x[j + 1], x[j]
return x
# ————————————————————————————————————————————— Autocomplete ——————————————————————————————————————————————
## Place cursor after `sorting_algorithm:` below and press [Enter]
## Press [Tab] to accept the Autocomplete suggestion
# 1. Place cursor after `sorting_algorithm:` below and press [Enter]
# 2. Press [Tab] to accept the Autocomplete suggestion
# Basic assertion for sorting_algorithm:
# ————————————————————————————————————————————————— Edit ————————————————————————————————————————————————— #
# Edit is a convenient way to make quick changes to specific code and files.
# 1. Highlight the code below
# 2. Press [Cmd/Ctrl + I] to Edit
# 3. Try asking Continue to "make this more readable"
def sorting_algorithm(x):
for i in range(len(x)):
for j in range(len(x) - 1):
if x[j] > x[j + 1]:
x[j], x[j + 1] = x[j + 1], x[j]
return x
# ————————————————————————————————————————————————— Chat ————————————————————————————————————————————————— #
# Chat makes it easy to ask for help from an LLM without needing to leave the IDE.
# —————————————————————————————————————————————- Learn More -——————————————————————————————————————————————
# 1. Highlight the code below
# 2. Press [Cmd/Ctrl + J] to add to Chat
# 3. Try asking Continue "what sorting algorithm is this?"
def sorting_algorithm2(x):
for i in range(len(x)):
for j in range(len(x) - 1):
if x[j] > x[j + 1]:
x[j], x[j + 1] = x[j + 1], x[j]
return x
# Visit the Continue Docs at https://docs.continue.dev/getting-started/overview
# ————————————————————————————————————————————————— Agent ————————————————————————————————————————————————— #
# Agent equips the Chat model with the tools needed to handle a wide range of coding tasks, allowing
# the model to make decisions and save you the work of manually finding context and performing actions.
# 1. Switch from "Chat" to "Agent" mode using the dropdown in the bottom left of the input box
# 2. Try asking Continue "Write unit tests for this code in a new file and run the test"
# —————————————————— Learn more at https://docs.continue.dev/getting-started/overview ——————————————————— #

View File

@ -4,22 +4,23 @@
// / /___ / /_/ /_ / / // /_ _ / _ / / // /_/ / / __/
// \____/ \____/ /_/ /_/ \__/ /_/ /_/ /_/ \__,_/ \___/
//
// Chat, Edit, and Autocomplete tutorial
// Autocomplete, Edit, Chat, and Agent tutorial
//
// ———————————————————————————————————————————————— Setup ————————————————————————————————————————————————-
// ————————————————————————————————————————————— Autocomplete —————————————————————————————————————————————— //
// Autocomplete provides inline code suggestions as you type.
// First, open the Continue sidebar by pressing [Cmd + L] or clicking the Continue icon.
// 1. Place cursor after `sortingAlgorithm:` below and press [Enter]
// 2. Press [Tab] to accept the Autocomplete suggestion
// See an example at https://docs.continue.dev/getting-started/install
// Basic assertion for sortingAlgorithm:
// Follow the instructions in the sidebar to set up a Chat/Edit modela and an Autocomplete model.
// ————————————————————————————————————————————————— Edit ————————————————————————————————————————————————— //
// Edit is a convenient way to make quick changes to specific code and files.
// ————————————————————————————————————————————————— Chat —————————————————————————————————————————————————
// Highlight the code below
// Press [Cmd + L] to add to Chat
// Try asking Continue "what sorting algorithm is this?"
// 1. Highlight the code below
// 2. Press [Cmd/Ctrl + I] to Edit
// 3. Try asking Continue to "make this more readable"
function sortingAlgorithm(x: number[]): number[] {
for (let i = 0; i < x.length; i++) {
for (let j = 0; j < x.length - 1; j++) {
@ -33,14 +34,12 @@ function sortingAlgorithm(x: number[]): number[] {
return x;
}
// [Cmd + L] always starts a new chat. Now, try the same thing using [Cmd + Shift + L].
// This will add the code into the current chat
// ————————————————————————————————————————————————— Chat ————————————————————————————————————————————————— //
// Chat makes it easy to ask for help from an LLM without needing to leave the IDE.
// ————————————————————————————————————————————————— Edit —————————————————————————————————————————————————
// Highlight the code below
// Press [Cmd + I] to Edit
// Try asking Continue to "make this more readable"
// 1. Highlight the code below
// 2. Press [Cmd/Ctrl + J] to add to Chat
// 3. Try asking Continue "what sorting algorithm is this?"
function sortingAlgorithm2(x: number[]): number[] {
for (let i = 0; i < x.length; i++) {
for (let j = 0; j < x.length - 1; j++) {
@ -54,13 +53,11 @@ function sortingAlgorithm2(x: number[]): number[] {
return x;
}
// ————————————————————————————————————————————— Autocomplete ——————————————————————————————————————————————
// ————————————————————————————————————————————————— Agent ————————————————————————————————————————————————— //
// Agent equips the Chat model with the tools needed to handle a wide range of coding tasks, allowing
// the model to make decisions and save you the work of manually finding context and performing actions.
// Place cursor after `sortingAlgorithm:` below and press [Enter]
// Press [Tab] to accept the Autocomplete suggestion
// 1. Switch from "Chat" to "Agent" mode using the dropdown in the bottom left of the input box
// 2. Try asking Continue "Write unit tests for this code in a new file"
// Basic assertion for sortingAlgorithm:
// —————————————————————————————————————————————- Learn More -——————————————————————————————————————————————
// Visit the Continue Docs at https://docs.continue.dev/getting-started/overview
// —————————————————— Learn more at https://docs.continue.dev/getting-started/overview ——————————————————— //

View File

@ -12,7 +12,3 @@ See [Environment Setup](../../CONTRIBUTING.md#environment-setup)
# How to run and debug tests
After following the setup in [Environment Setup](../../CONTRIBUTING.md#environment-setup) you can run the `Extension (VSCode)` launch configuration in VS Code.
## Notes
- We require vscode engine `^1.67.0` and use `@types/vscode` version `1.67.0` because this is the earliest version that doesn't break any of the APIs we are using. If you go back to `1.66.0`, then it will break `vscode.window.tabGroups`.

View File

@ -4,28 +4,24 @@
/ /___ / /_/ /_ / / // /_ _ / _ / / // /_/ / / __/
\____/ \____/ /_/ /_/ \__/ /_/ /_/ /_/ \__,_/ \___/
Chat, Edit, and Autocomplete tutorial
Autocomplete, Edit, Chat, and Agent tutorial
"""
# ————————————————————————————————————————————————— Chat ————————————————————————————————————————————————— #
# ————————————————————————————————————————————— Autocomplete —————————————————————————————————————————————— #
# Autocomplete provides inline code suggestions as you type.
## Highlight the code below
## Press [Cmd/Ctrl + L] to add to Chat
## Try asking Continue "what sorting algorithm is this?"
def sorting_algorithm(x):
for i in range(len(x)):
for j in range(len(x) - 1):
if x[j] > x[j + 1]:
x[j], x[j + 1] = x[j + 1], x[j]
return x
# 1. Place cursor after `sorting_algorithm:` below and press [Enter]
# 2. Press [Tab] to accept the Autocomplete suggestion
# Basic assertion for sorting_algorithm:
# ————————————————————————————————————————————————— Edit ————————————————————————————————————————————————— #
# Edit is a convenient way to make quick changes to specific code and files.
## Highlight the code below
## Press [Cmd/Ctrl + I] to Edit
## Try asking Continue to "make this more readable"
# 1. Highlight the code below
# 2. Press [Cmd/Ctrl + I] to Edit
# 3. Try asking Continue to "make this more readable"
def sorting_algorithm(x):
for i in range(len(x)):
for j in range(len(x) - 1):
@ -33,12 +29,25 @@ def sorting_algorithm(x):
x[j], x[j + 1] = x[j + 1], x[j]
return x
# ————————————————————————————————————————————— Autocomplete —————————————————————————————————————————————— #
# ————————————————————————————————————————————————— Chat ————————————————————————————————————————————————— #
# Chat makes it easy to ask for help from an LLM without needing to leave the IDE.
## Place cursor after `sorting_algorithm...` below and press [Enter]
## Press [Tab] to accept the Autocomplete suggestion
# 1. Highlight the code below
# 2. Press [Cmd/Ctrl + L] to add to Chat
# 3. Try asking Continue "what sorting algorithm is this?"
def sorting_algorithm2(x):
for i in range(len(x)):
for j in range(len(x) - 1):
if x[j] > x[j + 1]:
x[j], x[j + 1] = x[j + 1], x[j]
return x
# Basic assertion for sorting_algorithm...
# ————————————————————————————————————————————————— Agent ————————————————————————————————————————————————— #
# Agent equips the Chat model with the tools needed to handle a wide range of coding tasks, allowing
# the model to make decisions and save you the work of manually finding context and performing actions.
# 1. Switch from "Chat" to "Agent" mode using the dropdown in the bottom left of the input box
# 2. Try asking Continue "Write unit tests for this code in a new file",
# or if you have Python installed, "Write unit tests for this code in a new file and run the test"
# —————————————————— Learn more at https://docs.continue.dev/getting-started/overview ——————————————————— #
# —————————————————— Learn more at https://docs.continue.dev/getting-started/overview ——————————————————— #

View File

@ -1,4 +1,6 @@
### Setup
# E2E Tests
## Setup
When running e2e tests for the first time
@ -6,7 +8,7 @@ When running e2e tests for the first time
npm run e2e:all
```
### Run
## Run
Depending on what code you update, you can use a faster loop to test your changes:
@ -14,7 +16,7 @@ Depending on what code you update, you can use a faster loop to test your change
- If you update the extension code, you can run `npm run e2e:recompile`
- If you update the gui code, you can run `npm run e2e:rebuild-gui`
### Writing tests
## Writing tests
All e2e tests are separated (by folder) into

View File

@ -46,7 +46,7 @@ export class GUISelectors {
}
public static getToolCallStatusMessage(view: WebView) {
return SelectorUtils.getElementByDataTestId(view, "toggle-div-title");
return SelectorUtils.getElementByDataTestId(view, "tool-call-title");
}
public static getToolButton(view: WebView) {
@ -89,6 +89,14 @@ export class GUISelectors {
);
}
public static getRulesPeek(view: WebView) {
return SelectorUtils.getElementByDataTestId(view, "rules-peek");
}
public static getFirstRulesPeekItem(view: WebView) {
return SelectorUtils.getElementByDataTestId(view, "rules-peek-item");
}
public static getNthHistoryTableRow(view: WebView, index: number) {
return SelectorUtils.getElementByDataTestId(view, `history-row-${index}`);
}

View File

@ -245,6 +245,47 @@ describe("GUI Test", () => {
await GUIActions.selectModeFromDropdown(view, "Agent");
});
it("should display rules peek and show rule details", async () => {
// Send a message to trigger the model response
const [messageInput] = await GUISelectors.getMessageInputFields(view);
await messageInput.sendKeys("Hello");
await messageInput.sendKeys(Key.ENTER);
// Wait for the response to appear
await TestUtils.waitForSuccess(() =>
GUISelectors.getThreadMessageByText(view, "I'm going to call a tool:"),
);
// Verify that "1 rule" text appears
const rulesPeek = await TestUtils.waitForSuccess(() =>
GUISelectors.getRulesPeek(view),
);
const rulesPeekText = await rulesPeek.getText();
expect(rulesPeekText).to.include("1 rule");
// Click on the rules peek to expand it
await rulesPeek.click();
// Wait for the rule details to appear
const ruleItem = await TestUtils.waitForSuccess(() =>
GUISelectors.getFirstRulesPeekItem(view),
);
await TestUtils.waitForSuccess(async () => {
const text = await ruleItem.getText();
if (!text || text.trim() === "") {
throw new Error("Rule item text is empty");
}
return ruleItem;
});
// Verify the rule content
const ruleItemText = await ruleItem.getText();
expect(ruleItemText).to.include("Assistant rule");
expect(ruleItemText).to.include("Always applied");
expect(ruleItemText).to.include("TEST_SYS_MSG");
}).timeout(DEFAULT_TIMEOUT.MD);
it("should render tool call", async () => {
const [messageInput] = await GUISelectors.getMessageInputFields(view);
await messageInput.sendKeys("Hello");
@ -258,7 +299,9 @@ describe("GUI Test", () => {
expect(await statusMessage.getText()).contain(
"Continue viewed the git diff",
);
}).timeout(DEFAULT_TIMEOUT.MD);
// wait for 30 seconds, promise
await new Promise((resolve) => setTimeout(resolve, 30000));
}).timeout(DEFAULT_TIMEOUT.MD * 100);
it("should call tool after approval", async () => {
await GUIActions.toggleToolPolicy(view, "builtin_view_diff", 2);

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
"name": "continue",
"icon": "media/icon.png",
"author": "Continue Dev, Inc",
"version": "1.1.33",
"version": "1.1.35",
"repository": {
"type": "git",
"url": "https://github.com/continuedev/continue"
@ -612,14 +612,14 @@
"e2e:compile": "tsc -p ./tsconfig.e2e.json",
"e2e:build": "npm --prefix ../../gui run build && npm run package",
"e2e:create-storage": "mkdir -p ./e2e/storage",
"e2e:get-chromedriver": "CODE_VERSION=\"1.95.0\" extest get-chromedriver --storage ./e2e/storage",
"e2e:get-vscode": "CODE_VERSION=\"1.95.0\" extest get-vscode --storage ./e2e/storage",
"e2e:get-chromedriver": "extest get-chromedriver --storage ./e2e/storage --code_version 1.95.0",
"e2e:get-vscode": "extest get-vscode --storage ./e2e/storage --code_version 1.95.0",
"e2e:sign-vscode": "codesign --entitlements entitlements.plist --deep --force -s - './e2e/storage/Visual Studio Code.app'",
"e2e:copy-vsix": "chmod +x ./e2e/get-latest-vsix.sh && bash ./e2e/get-latest-vsix.sh",
"e2e:install-vsix": "extest install-vsix -f ./e2e/vsix/continue.vsix --extensions_dir ./e2e/.test-extensions --storage ./e2e/storage",
"e2e:install-extensions": "extest install-from-marketplace ms-vscode-remote.remote-ssh --extensions_dir ./e2e/.test-extensions --storage ./e2e/storage && extest install-from-marketplace ms-vscode-remote.remote-containers --extensions_dir ./e2e/.test-extensions --storage ./e2e/storage && extest install-from-marketplace ms-vscode-remote.remote-wsl --extensions_dir ./e2e/.test-extensions --storage ./e2e/storage",
"e2e:test": "extest run-tests ${TEST_FILE:-'./e2e/_output/tests/*.test.js'} --code_settings settings.json --extensions_dir ./e2e/.test-extensions --storage ./e2e/storage",
"e2e:clean": "rm -rf ./e2e/_output",
"e2e:clean": "rm -rf ./e2e/_output ./e2e/storage",
"e2e:all": "npm run e2e:build && npm run e2e:compile && npm run e2e:create-storage && npm run e2e:get-chromedriver && npm run e2e:get-vscode && npm run e2e:sign-vscode && npm run e2e:copy-vsix && npm run e2e:install-vsix && npm run e2e:install-extensions && CONTINUE_GLOBAL_DIR=e2e/test-continue npm run e2e:test && npm run e2e:clean",
"e2e:recompile-extension": "npm run package && npm run e2e:compile && npm run e2e:copy-vsix && npm run e2e:install-vsix && npm run e2e:install-extensions && CONTINUE_GLOBAL_DIR=e2e/test-continue npm run e2e:test && npm run e2e:clean",
"e2e:rebuild-gui": "rm -rf gui && cp -r ../../gui/dist gui && npm run package && npm run e2e:copy-vsix && npm run e2e:install-vsix && npm run e2e:install-extensions && CONTINUE_GLOBAL_DIR=e2e/test-continue npm run e2e:test && npm run e2e:clean",
@ -657,8 +657,7 @@
"rimraf": "^5.0.5",
"typescript": "^5.6.3",
"vite": "^4.5.14",
"vsce": "^2.15.0",
"vscode-extension-tester": "^8.10.0"
"vscode-extension-tester": "^8.14.1"
},
"dependencies": {
"@continuedev/config-types": "^1.0.14",
@ -705,7 +704,7 @@
"svg-builder": "^2.0.0",
"systeminformation": "^5.23.7",
"tailwindcss": "^3.3.2",
"undici": "^6.2.0",
"undici": "^6.21.3",
"uuid": "^9.0.1",
"uuidv4": "^6.2.13",
"vectordb": "^0.4.20",

View File

@ -19,8 +19,8 @@ if (!fs.existsSync("build")) {
const isPreRelease = args.includes("--pre-release");
let command = isPreRelease
? "npx vsce package --out ./build --pre-release --no-dependencies" // --yarn"
: "npx vsce package --out ./build --no-dependencies"; // --yarn";
? "npx @vscode/vsce package --out ./build --pre-release --no-dependencies" // --yarn"
: "npx @vscode/vsce package --out ./build --no-dependencies"; // --yarn";
if (target) {
command += ` --target ${target}`;

View File

@ -22,6 +22,8 @@ import readLastLines from "read-last-lines";
import * as vscode from "vscode";
import * as YAML from "yaml";
import { convertJsonToYamlConfig } from "../../../packages/config-yaml/dist";
import {
getAutocompleteStatusBarDescription,
getAutocompleteStatusBarTitle,
@ -36,17 +38,15 @@ import { ContinueGUIWebviewViewProvider } from "./ContinueGUIWebviewViewProvider
import { VerticalDiffManager } from "./diff/vertical/manager";
import EditDecorationManager from "./quickEdit/EditDecorationManager";
import { QuickEdit, QuickEditShowParams } from "./quickEdit/QuickEditQuickPick";
import { Battery } from "./util/battery";
import { getMetaKeyLabel } from "./util/util";
import { VsCodeIde } from "./VsCodeIde";
import { convertJsonToYamlConfig } from "../../../packages/config-yaml/dist";
import {
addCodeToContextFromRange,
addEntireFileToContext,
addHighlightedCodeToContext,
} from "./util/addCode";
import { Battery } from "./util/battery";
import { getMetaKeyLabel } from "./util/util";
import { openEditorAndRevealRange } from "./util/vscode";
import { VsCodeIde } from "./VsCodeIde";
let fullScreenPanel: vscode.WebviewPanel | undefined;
@ -673,6 +673,8 @@ const getCommandsMap: (
},
{
label: quickPickStatusText(targetStatus),
description:
getMetaKeyLabel() + " + K, " + getMetaKeyLabel() + " + A",
},
{
label: "$(feedback) Give feedback",

View File

@ -127,6 +127,11 @@ export class VerticalDiffHandler implements vscode.Disposable {
private incrementCurrentLineIndex() {
this.currentLineIndex++;
this.updateIndexLineDecorations();
const range = new vscode.Range(this.currentLineIndex, 0, this.currentLineIndex, 0);
this.editor.revealRange(
range,
vscode.TextEditorRevealType.Default,
);
}
private async insertTextAboveLine(index: number, text: string) {
@ -338,6 +343,12 @@ export class VerticalDiffHandler implements vscode.Disposable {
await this.reapplyWithMyersDiff(diffLines);
const range = new vscode.Range(this.startLine, 0, this.startLine, 0);
this.editor.revealRange(
range,
vscode.TextEditorRevealType.Default,
);
this.options.onStatusUpdate(
"done",
this.editorToVerticalDiffCodeLens.get(this.fileUri)?.length ?? 0,

View File

@ -3,7 +3,7 @@
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint", "eslint-plugin-react"],
"parserOptions": {
"project": "gui/tsconfig.json"
"project": ["tsconfig.json", "tsconfig.node.json"]
},
"extends": [
// "eslint:recommended",

View File

@ -12,7 +12,8 @@
"test": "vitest run",
"test:coverage": "vitest run --coverage",
"test:ui": "vitest --ui",
"test:watch": "vitest"
"test:watch": "vitest",
"lint": "eslint --ext ts"
},
"dependencies": {
"@continuedev/config-yaml": "file:../packages/config-yaml",

View File

@ -40,7 +40,10 @@ export default function AcceptRejectAllButtons({
}
return (
<div className="flex flex-row items-center justify-evenly gap-2 p-1 px-3">
<div
className="flex flex-row items-center justify-evenly gap-2 p-1 px-3"
data-testid="accept-reject-all-buttons"
>
<button
className="flex cursor-pointer flex-row flex-wrap justify-center gap-1 border-none bg-transparent px-2 py-1 text-xs text-gray-300 opacity-80 hover:opacity-100 hover:brightness-125"
onClick={() => handleAcceptOrReject("rejectDiff")}

View File

@ -58,14 +58,6 @@ export function ApplyActions(props: ApplyActionsProps) {
);
switch (props.applyState ? props.applyState.status : null) {
case "not-started":
return (
<div className="flex select-none items-center rounded bg-zinc-700 pl-2 pr-1">
<span className="text-lightgray inline-flex w-min items-center gap-2 text-center text-xs">
Pending
</span>
</div>
);
case "streaming":
return (
<div className="flex select-none items-center rounded bg-zinc-700 pl-2 pr-1">

View File

@ -1,26 +0,0 @@
import Spinner from "../../gui/Spinner";
export interface GeneratingCodeLoaderProps {
showLineCount: boolean;
codeBlockContent: string;
isPending: boolean;
}
export function GeneratingCodeLoader({
showLineCount,
codeBlockContent,
isPending,
}: GeneratingCodeLoaderProps) {
const numLinesCodeBlock = codeBlockContent.split("\n").length;
const linesGeneratedText =
numLinesCodeBlock === 1
? `1 line generated`
: `${numLinesCodeBlock} lines ${isPending ? "pending" : "generated"}`;
return (
<span className="text-lightgray inline-flex items-center gap-2">
{showLineCount ? linesGeneratedText : "Generating"}
{!isPending && <Spinner />}
</span>
);
}

View File

@ -11,14 +11,15 @@ import { IdeMessengerContext } from "../../../context/IdeMessenger";
import { useIdeMessengerRequest } from "../../../hooks";
import { useWebviewListener } from "../../../hooks/useWebviewListener";
import { useAppSelector } from "../../../redux/hooks";
import { selectCurrentToolCallApplyState } from "../../../redux/selectors/selectCurrentToolCall";
import { selectApplyStateByStreamId } from "../../../redux/slices/sessionSlice";
import { getFontSize } from "../../../util";
import Spinner from "../../gui/Spinner";
import { isTerminalCodeBlock } from "../utils";
import { ApplyActions } from "./ApplyActions";
import { CopyButton } from "./CopyButton";
import { CreateFileButton } from "./CreateFileButton";
import { FileInfo } from "./FileInfo";
import { GeneratingCodeLoader } from "./GeneratingCodeLoader";
import { InsertButton } from "./InsertButton";
import { RunInTerminalButton } from "./RunInTerminalButton";
@ -49,8 +50,9 @@ export interface StepContainerPreToolbarProps {
codeBlockContent: string;
language: string | null;
relativeFilepath?: string;
isFinalCodeblock: boolean;
itemIndex?: number;
codeBlockIndex: number; // To track which codeblock we are applying
isLastCodeblock: boolean;
codeBlockStreamId: string;
range?: string;
children: any;
@ -62,8 +64,9 @@ export function StepContainerPreToolbar({
codeBlockContent,
language,
relativeFilepath,
isFinalCodeblock,
itemIndex,
codeBlockIndex,
isLastCodeblock,
codeBlockStreamId,
range,
children,
@ -71,6 +74,7 @@ export function StepContainerPreToolbar({
disableManualApply,
}: StepContainerPreToolbarProps) {
const ideMessenger = useContext(IdeMessengerContext);
const history = useAppSelector((state) => state.session.history);
const [isExpanded, setIsExpanded] = useState(expanded ?? true);
const [relativeFilepathUri, setRelativeFilepathUri] = useState<string | null>(
@ -95,6 +99,9 @@ export function StepContainerPreToolbar({
const applyState = useAppSelector((state) =>
selectApplyStateByStreamId(state, codeBlockStreamId),
);
const currentToolCallApplyState = useAppSelector(
selectCurrentToolCallApplyState,
);
/**
* In the case where `relativeFilepath` is defined, this will just be `relativeFilepathUri`.
@ -110,7 +117,12 @@ export function StepContainerPreToolbar({
relativeFilepath && /\.[0-9a-z]+$/i.test(relativeFilepath);
const isStreaming = useAppSelector((store) => store.session.isStreaming);
const isGeneratingCodeBlock = isFinalCodeblock && isStreaming;
const isLastItem = useMemo(() => {
return itemIndex === history.length - 1;
}, [history.length, itemIndex]);
const isGeneratingCodeBlock = isLastItem && isLastCodeblock && isStreaming;
// If we are creating a file, we already render that in the button
// so we don't want to dispaly it twice here
@ -221,6 +233,32 @@ export function StepContainerPreToolbar({
}
const renderActionButtons = () => {
const isPendingToolCall =
currentToolCallApplyState &&
currentToolCallApplyState.streamId === applyState?.streamId &&
currentToolCallApplyState.status === "not-started";
if (isGeneratingCodeBlock || isPendingToolCall) {
const numLines = codeBlockContent.split("\n").length;
const plural = numLines === 1 ? "" : "s";
if (isGeneratingCodeBlock) {
return (
<span className="text-lightgray inline-flex items-center gap-2 text-right">
{!isExpanded ? `${numLines} line${plural}` : "Generating"}{" "}
<div>
<Spinner />
</div>
</span>
);
} else {
return (
<span className="text-lightgray inline-flex items-center gap-2 text-right">
{`${numLines} line${plural} pending`}
</span>
);
}
}
if (isTerminalCodeBlock(language, codeBlockContent)) {
return <RunInTerminalButton command={codeBlockContent} />;
}
@ -281,17 +319,7 @@ export function StepContainerPreToolbar({
</div>
)}
{isGeneratingCodeBlock || applyState?.status === "not-started" ? (
<GeneratingCodeLoader
showLineCount={!isExpanded}
codeBlockContent={codeBlockContent}
isPending={
applyState?.status === "not-started" && !isGeneratingCodeBlock
}
/>
) : (
renderActionButtons()
)}
{renderActionButtons()}
</div>
</ToolbarDiv>

View File

@ -193,11 +193,7 @@ const StyledMarkdownPreview = memo(function StyledMarkdownPreview(
};
}, [props.itemIndex, history, allSymbols]);
const pastFileInfoRef = useUpdatingRef(pastFileInfo);
const isLastItem = useMemo(() => {
return props.itemIndex === history.length - 1;
}, [history.length, props.itemIndex]);
const isLastItemRef = useUpdatingRef(isLastItem);
const itemIndexRef = useUpdatingRef(props.itemIndex);
const codeblockStreamIds = useRef<string[]>([]);
useEffect(() => {
@ -296,8 +292,7 @@ const StyledMarkdownPreview = memo(function StyledMarkdownPreview(
const language = getLanguageFromClassName(className);
const isFinalCodeblock =
preChildProps["data-islastcodeblock"] && isLastItemRef.current;
const isLastCodeblock = preChildProps["data-islastcodeblock"];
if (codeblockStreamIds.current[codeBlockIndex] === undefined) {
codeblockStreamIds.current[codeBlockIndex] =
@ -307,10 +302,11 @@ const StyledMarkdownPreview = memo(function StyledMarkdownPreview(
return (
<StepContainerPreToolbar
codeBlockContent={codeBlockContent}
itemIndex={itemIndexRef.current}
codeBlockIndex={codeBlockIndex}
language={language}
relativeFilepath={relativeFilePath}
isFinalCodeblock={isFinalCodeblock}
isLastCodeblock={isLastCodeblock}
range={range}
codeBlockStreamId={codeblockStreamIds.current[codeBlockIndex]}
expanded={props.expandCodeblocks}

View File

@ -0,0 +1,65 @@
import { headerIsMarkdown } from './headerIsMarkdown';
describe('headerIsMarkdown', () => {
// Test exact match with common Markdown identifiers
it('should identify exact matches with common Markdown identifiers', () => {
expect(headerIsMarkdown('md')).toBe(true);
expect(headerIsMarkdown('markdown')).toBe(true);
expect(headerIsMarkdown('gfm')).toBe(true);
expect(headerIsMarkdown('github-markdown')).toBe(true);
});
// Test identifiers preceded by a space
it('should identify identifiers preceded by a space', () => {
expect(headerIsMarkdown('lang md')).toBe(true);
expect(headerIsMarkdown('something markdown')).toBe(true);
expect(headerIsMarkdown('language gfm')).toBe(true);
expect(headerIsMarkdown('spec github-markdown')).toBe(true);
});
// Test file extensions
it('should identify file names with markdown extensions', () => {
expect(headerIsMarkdown('example.md')).toBe(true);
expect(headerIsMarkdown('document.markdown')).toBe(true);
expect(headerIsMarkdown('readme.gfm')).toBe(true);
});
// Test more complex cases with extensions
it('should identify file names with markdown extensions followed by other text', () => {
expect(headerIsMarkdown('example.md additional text')).toBe(true);
expect(headerIsMarkdown('document.markdown with some description')).toBe(true);
expect(headerIsMarkdown('readme.gfm v2.0')).toBe(true);
});
// Test non-markdown cases
it('should not identify non-markdown headers', () => {
expect(headerIsMarkdown('javascript')).toBe(false);
expect(headerIsMarkdown('typescript')).toBe(false);
expect(headerIsMarkdown('plain')).toBe(false);
expect(headerIsMarkdown('python')).toBe(false);
});
// Test edge cases
it('should handle edge cases correctly', () => {
expect(headerIsMarkdown('')).toBe(false);
expect(headerIsMarkdown('mdx')).toBe(false); // Similar but not exactly "md"
expect(headerIsMarkdown('readme md')).toBe(true);
expect(headerIsMarkdown('md.js')).toBe(false); // "md" is not the extension
});
// Test case sensitivity
it('should respect case sensitivity', () => {
expect(headerIsMarkdown('MD')).toBe(false);
expect(headerIsMarkdown('MARKDOWN')).toBe(false);
expect(headerIsMarkdown('example.MD')).toBe(false);
expect(headerIsMarkdown('lang MD')).toBe(false);
});
// Test with special characters and spacing
it('should handle special characters and spacing correctly', () => {
expect(headerIsMarkdown(' md')).toBe(true); // Space before "md"
expect(headerIsMarkdown('md ')).toBe(true); // Space after "md"
expect(headerIsMarkdown('hello-md')).toBe(false); // "md" with hyphen prefix
expect(headerIsMarkdown('markdown:')).toBe(false); // "markdown" with suffix
});
});

View File

@ -0,0 +1,26 @@
/**
* Determines if a given header string represents Markdown content.
*
* The function checks various patterns to identify Markdown headers:
* - Exact match with common Markdown identifiers (md, markdown, gfm, github-markdown)
* - Contains these identifiers preceded by a space
* - First word has a file extension of md, markdown, or gfm
*
* @param header - The string to check for Markdown indicators
* @returns True if the header represents Markdown content, false otherwise
*/
export function headerIsMarkdown(header: string): boolean {
return (
header === "md" ||
header === "markdown" ||
header === "gfm" ||
header === "github-markdown" ||
header.includes(" md") ||
header.includes(" markdown") ||
header.includes(" gfm") ||
header.includes(" github-markdown") ||
(header.split(" ")[0]?.split(".").pop() === "md") ||
(header.split(" ")[0]?.split(".").pop() === "markdown") ||
(header.split(" ")[0]?.split(".").pop() === "gfm")
);
}

View File

@ -0,0 +1,105 @@
import { patchNestedMarkdown } from './patchNestedMarkdown';
describe('patchNestedMarkdown', () => {
it('should return unchanged content when no markdown codeblocks are present', () => {
const source = 'Regular text\n```javascript\nconsole.log("hello");\n```';
expect(patchNestedMarkdown(source)).toBe(source);
});
it('should replace backticks with tildes for markdown codeblocks', () => {
const source = '```markdown\n# Header\nSome text\n```';
const expected = '~~~markdown\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle nested codeblocks within markdown blocks', () => {
const source = '```markdown\n# Example\n```js\nconsole.log("nested");\n```\n```';
const expected = '~~~markdown\n# Example\n```js\nconsole.log("nested");\n```\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle .md file extension', () => {
const source = '```test.md\nContent\n```';
const expected = '~~~test.md\nContent\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle multiple levels of nesting', () => {
const source = '```markdown\n# Doc\n```js\nlet x = "```nested```";\n```\n```';
const expected = '~~~markdown\n# Doc\n```js\nlet x = "```nested```";\n```\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle blocks that start with md', () => {
const source = '```md\n# Header\nSome text\n```';
const expected = '~~~md\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle blocks with language specifier followed by md', () => {
const source = '```lang md\n# Header\nSome text\n```';
const expected = '~~~lang md\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle blocks with language specifier followed by markdown', () => {
const source = '```lang markdown\n# Header\nSome text\n```';
const expected = '~~~lang markdown\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should not replace backticks for non-markdown file extensions', () => {
const source = '```test.js\nContent\n```';
expect(patchNestedMarkdown(source)).toBe(source);
});
it('should check the file extension branch when extension is not md/markdown/gfm', () => {
// This tests the extension check branch with an unrecognized extension
const source = '```test.txt\nContent with md keyword\n```';
expect(patchNestedMarkdown(source)).toBe(source);
});
it('should handle empty file name in code block header', () => {
// This covers the branch where file is empty or undefined
const source = '``` \nSome content\n```';
expect(patchNestedMarkdown(source)).toBe(source);
});
it('should handle file names with no extension', () => {
// This covers the branch where ext might be undefined
const source = '```filename\nContent\n```';
expect(patchNestedMarkdown(source)).toBe(source);
});
it('should correctly identify .markdown extension', () => {
// This specifically tests the ext === "markdown" condition in the extension check
const source = '```example.markdown\n# Some markdown content\n```';
const expected = '~~~example.markdown\n# Some markdown content\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
// GitHub-specific tests
it('should handle gfm language identifier', () => {
const source = '```gfm\n# Header\nSome text\n```';
const expected = '~~~gfm\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle github-markdown language identifier', () => {
const source = '```github-markdown\n# Header\nSome text\n```';
const expected = '~~~github-markdown\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle language specifier followed by gfm', () => {
const source = '```lang gfm\n# Header\nSome text\n```';
const expected = '~~~lang gfm\n# Header\nSome text\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
it('should handle .gfm file extension', () => {
const source = '```example.gfm\n# Some GitHub markdown content\n```';
const expected = '~~~example.gfm\n# Some GitHub markdown content\n~~~';
expect(patchNestedMarkdown(source)).toBe(expected);
});
});

View File

@ -1,47 +1,52 @@
/*
This is a patch for outputing markdown code that contains codeblocks
It notices markdown blocks, keeps track of when that specific block is closed,
It notices markdown blocks (including GitHub-specific variants),
keeps track of when that specific block is closed,
and uses ~~~ instead of ``` for that block
Note, this was benchmarked at sub-millisecond
// TODO support github-specific markdown as well, edge case
*/
import { headerIsMarkdown } from './headerIsMarkdown';
export const patchNestedMarkdown = (source: string): string => {
if (!source.match(/```(\w+\.(md|markdown))/)) return source; // For performance
// const start = Date.now();
// Early return if no markdown codeblock pattern is found (including GitHub variants)
if (!source.match(/```(\w*|.*)(md|markdown|gfm|github-markdown)/)) return source;
let nestCount = 0;
const lines = source.split("\n");
const trimmedLines = lines.map((l) => l.trim());
for (let i = 0; i < trimmedLines.length; i++) {
const line = trimmedLines[i];
if (nestCount) {
if (nestCount > 0) {
// Inside a markdown block
if (line.match(/^`+$/)) {
// Ending a block
if (nestCount === 1) lines[i] = "~~~"; // End of markdown block
// Ending a block with just backticks (```)
nestCount--;
if (nestCount === 0) {
lines[i] = "~~~"; // End of markdown block
}
} else if (line.startsWith("```")) {
// Going into a nested codeblock
nestCount++;
}
} else {
// Enter the markdown block, start tracking nesting
// Not inside a markdown block yet
if (line.startsWith("```")) {
const header = line.replaceAll("`", "");
const file = header.split(" ")[0];
if (file) {
const ext = file.split(".").at(-1);
if (ext === "md" || ext === "markdown") {
nestCount = 1;
lines[i] = lines[i].replaceAll("`", "~"); // Replace backticks with tildes
}
// Check if this is a markdown codeblock using a consolidated approach (including GitHub-specific variants)
const isMarkdown = headerIsMarkdown(header);
if (isMarkdown) {
nestCount = 1;
lines[i] = lines[i].replaceAll("`", "~");
}
}
}
}
const out = lines.join("\n");
// console.log(`patched in ${Date.now() - start}ms`);
return out;
return lines.join("\n");
};

View File

@ -6,15 +6,21 @@ interface ToggleProps {
children: React.ReactNode;
title: React.ReactNode;
icon?: ComponentType<React.SVGProps<SVGSVGElement>>;
testId?: string;
}
function ToggleDiv({ children, title, icon: Icon }: ToggleProps) {
function ToggleDiv({
children,
title,
icon: Icon,
testId = "context-items-peek",
}: ToggleProps) {
const [open, setOpen] = useState(false);
const [isHovered, setIsHovered] = useState(false);
return (
<div
className={`pl-2 pt-2`}
className={`pl-2`}
style={{
backgroundColor: vscBackground,
}}
@ -24,7 +30,7 @@ function ToggleDiv({ children, title, icon: Icon }: ToggleProps) {
onClick={() => setOpen((prev) => !prev)}
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
data-testid="context-items-peek"
data-testid={testId}
>
<div className="relative mr-1 h-4 w-4">
{Icon && !isHovered && !open ? (
@ -44,10 +50,7 @@ function ToggleDiv({ children, title, icon: Icon }: ToggleProps) {
</>
)}
</div>
<span
className="ml-1 text-xs text-gray-400 transition-colors duration-200"
data-testid="toggle-div-title"
>
<span className="ml-1 text-xs text-gray-400 transition-colors duration-200">
{title}
</span>
</div>

View File

@ -1,4 +1,4 @@
import React from "react";
import React, { useState } from "react";
interface NumberInputProps {
value: number;
@ -13,25 +13,51 @@ const NumberInput: React.FC<NumberInputProps> = ({
max,
min,
}) => {
const [inputValue, setInputValue] = useState(value.toString());
const handleIncrement = () => {
if (value < max) {
onChange(value + 1);
const newValue = value + 1;
onChange(newValue);
setInputValue(newValue.toString());
}
};
const handleDecrement = () => {
if (value > min) {
onChange(value - 1);
const newValue = value - 1;
onChange(newValue);
setInputValue(newValue.toString());
}
};
const handleInputChange = (e: React.ChangeEvent<HTMLInputElement>) => {
const newInputValue = e.target.value;
setInputValue(newInputValue);
// Only update the actual value if it's a valid number
const numValue = parseInt(newInputValue, 10);
if (!isNaN(numValue)) {
// Apply min/max constraints
const constrainedValue = Math.min(Math.max(numValue, min), max);
onChange(constrainedValue);
}
};
const handleBlur = () => {
// When input loses focus, ensure the displayed value matches the actual value
// This handles cases where the user entered an invalid value
setInputValue(value.toString());
};
return (
<div className="border-vsc-input-border bg-vsc-input-background flex flex-row overflow-hidden rounded-md border border-solid">
<input
type="text"
value={value}
readOnly
className="text-vsc-foreground max-w-7 border-none bg-inherit pr-1.5 text-right outline-none ring-0 focus:border-none focus:outline-none focus:ring-0"
value={inputValue}
onChange={handleInputChange}
onBlur={handleBlur}
className="text-vsc-foreground max-w-9 border-none bg-inherit pr-1.5 text-right outline-none ring-0 focus:border-none focus:outline-none focus:ring-0"
style={{
appearance: "none",
WebkitAppearance: "none",

View File

@ -1,11 +1,12 @@
import { Editor, JSONContent } from "@tiptap/react";
import { ContextItemWithId, InputModifiers } from "core";
import { ContextItemWithId, InputModifiers, RuleWithSource } from "core";
import { useMemo } from "react";
import styled, { keyframes } from "styled-components";
import { defaultBorderRadius, vscBackground } from "..";
import { useAppSelector } from "../../redux/hooks";
import { selectSlashCommandComboBoxInputs } from "../../redux/selectors";
import { ContextItemsPeek } from "./belowMainInput/ContextItemsPeek";
import { RulesPeek } from "./belowMainInput/RulesPeek";
import { ToolbarOptions } from "./InputToolbar";
import { Lump } from "./Lump";
import { TipTapEditor } from "./TipTapEditor";
@ -20,6 +21,7 @@ interface ContinueInputBoxProps {
) => void;
editorState?: JSONContent;
contextItems?: ContextItemWithId[];
appliedRules?: RuleWithSource[];
hidden?: boolean;
inputId: string; // used to keep track of things per input in redux
}
@ -116,6 +118,8 @@ function ContinueInputBox(props: ContinueInputBoxProps) {
}
: {};
const { appliedRules = [], contextItems = [] } = props;
return (
<div
className={`${props.hidden ? "hidden" : ""}`}
@ -143,10 +147,15 @@ function ContinueInputBox(props: ContinueInputBoxProps) {
/>
</GradientBorder>
</div>
<ContextItemsPeek
contextItems={props.contextItems}
isCurrentContextPeek={props.isLastUserInput}
/>
{(appliedRules.length > 0 || contextItems.length > 0) && (
<div className="mt-2 flex flex-col">
<RulesPeek appliedRules={props.appliedRules} />
<ContextItemsPeek
contextItems={props.contextItems}
isCurrentContextPeek={props.isLastUserInput}
/>
</div>
)}
</div>
);
}

View File

@ -4,6 +4,8 @@ import {
} from "@heroicons/react/24/outline";
import { BookmarkIcon as BookmarkSolid } from "@heroicons/react/24/solid";
import { SlashCommandDescription } from "core";
import { useContext } from "react";
import { IdeMessengerContext } from "../../../../context/IdeMessenger";
import { useBookmarkedSlashCommands } from "../../../../hooks/useBookmarkedSlashCommands";
import { useAppSelector } from "../../../../redux/hooks";
import { fontSize } from "../../../../util";
@ -89,14 +91,16 @@ function PromptRow({
*/
export function PromptsSection() {
const { isCommandBookmarked, toggleBookmark } = useBookmarkedSlashCommands();
const ideMessenger = useContext(IdeMessengerContext);
const slashCommands = useAppSelector(
(state) => state.config.config.slashCommands ?? [],
);
const handleEdit = (prompt: any) => {
// Handle edit action here
console.log("Editing prompt:", prompt);
const handleEdit = (_prompt: SlashCommandDescription) => {
ideMessenger.post("config/openProfile", {
profileId: undefined,
});
};
const sortedCommands = [...slashCommands].sort((a, b) => {

View File

@ -0,0 +1,122 @@
import { DocumentTextIcon, GlobeAltIcon } from "@heroicons/react/24/outline";
import { RuleSource, RuleWithSource } from "core";
import { ComponentType, useMemo, useState } from "react";
import ToggleDiv from "../../ToggleDiv";
interface RulesPeekProps {
appliedRules?: RuleWithSource[];
icon?: ComponentType<React.SVGProps<SVGSVGElement>>;
}
interface RulesPeekItemProps {
rule: RuleWithSource;
}
// Convert technical source to user-friendly text
const getSourceLabel = (source: RuleSource): string => {
switch (source) {
case "default-chat":
return "Default Chat";
case "default-agent":
return "Default Agent";
case "model-chat-options":
return "Model Chat Options";
case "model-agent-options":
return "Model Agent Options";
case "rules-block":
return "Rules Block";
case "json-systemMessage":
return "System Message";
case ".continuerules":
return "Project Rules";
default:
return source;
}
};
export function RulesPeekItem({ rule }: RulesPeekItemProps) {
const isGlobal = !rule.globs;
const [expanded, setExpanded] = useState(false);
// Define maximum length for rule text display
const maxRuleLength = 100;
const isRuleLong = rule.rule.length > maxRuleLength;
// Get the displayed rule text based on expanded state
const displayedRule =
isRuleLong && !expanded
? `${rule.rule.slice(0, maxRuleLength)}...`
: rule.rule;
const toggleExpand = () => {
if (isRuleLong) {
setExpanded(!expanded);
}
};
return (
<div
className={`group mr-2 flex flex-col overflow-hidden rounded px-1.5 py-1 text-xs hover:bg-white/10 ${isRuleLong ? "cursor-pointer hover:text-gray-200" : ""}`}
data-testid="rules-peek-item"
onClick={toggleExpand}
>
<div className="flex w-full items-center">
{isGlobal ? (
<GlobeAltIcon className="mr-2 h-4 w-4 flex-shrink-0 text-gray-400" />
) : (
<DocumentTextIcon className="mr-2 h-4 w-4 flex-shrink-0 text-gray-400" />
)}
<div className="flex min-w-0 flex-1 gap-2 text-xs">
<div className="max-w-[50%] flex-shrink-0 truncate font-medium">
{rule.name || "Assistant rule"}
</div>
<div className="min-w-0 flex-1 overflow-hidden truncate whitespace-nowrap text-xs text-gray-500">
{isGlobal
? "Always applied"
: `Pattern: ${typeof rule.globs === "string" ? rule.globs : Array.isArray(rule.globs) ? rule.globs.join(", ") : ""}`}
</div>
</div>
</div>
<div
className={`mt-1 whitespace-pre-line pl-6 pr-2 text-xs italic text-gray-300`}
title={
isRuleLong ? (expanded ? "Click to collapse" : "Click to expand") : ""
}
>
{displayedRule}
{isRuleLong && (
<span className="ml-1 text-gray-400 opacity-0 transition-opacity group-hover:opacity-100">
{expanded ? "(collapse)" : "(expand)"}
</span>
)}
</div>
<div className="mt-1 pl-6 pr-2 text-xs text-gray-500">
Source: {getSourceLabel(rule.source)}
</div>
</div>
);
}
export function RulesPeek({ appliedRules, icon }: RulesPeekProps) {
const rules = useMemo(() => {
return appliedRules ?? [];
}, [appliedRules]);
if (!rules || rules.length === 0) {
return null;
}
return (
<ToggleDiv
icon={icon}
title={`${rules.length} rule${rules.length > 1 ? "s" : ""}`}
testId="rules-peek"
>
{rules.map((rule, idx) => (
<RulesPeekItem key={`rule-${idx}`} rule={rule} />
))}
</ToggleDiv>
);
}

View File

@ -1,9 +1,11 @@
import { IDE, PromptLog } from "core";
import {
FromIdeProtocol,
FromWebviewProtocol,
ToCoreProtocol,
ToWebviewProtocol,
} from "core/protocol";
import { Message } from "core/protocol/messenger";
import { MessageIde } from "core/protocol/messenger/messageIde";
import {
GeneratorReturnType,
@ -60,13 +62,48 @@ async function defaultMockHandleMessage<T extends keyof FromWebviewProtocol>(
export class MockIdeMessenger implements IIdeMessenger {
ide: IDE;
private messageHandlers: Map<
keyof FromIdeProtocol,
Array<(data: any) => void>
> = new Map();
constructor() {
this.ide = new MessageIde(
(messageType, data) => {
throw new Error("Not implemented");
},
(messageType, callback) => {},
(messageType, callback) => {
// Store the callback in our handlers map
if (!this.messageHandlers.has(messageType)) {
this.messageHandlers.set(messageType, []);
}
this.messageHandlers.get(messageType)?.push(callback);
},
);
}
/**
* Simulates a message being sent from the IDE to the webview
* @param messageType The type of message to send
* @param data The data to send with the message
*/
mockMessageToWebview<T extends keyof ToWebviewProtocol>(
messageType: T,
data: ToWebviewProtocol[T][0],
): void {
// Create a message object that matches what the useWebviewListener hook expects
const messageData: Message<ToWebviewProtocol[T][0]> = {
messageType,
data,
messageId: `mock-${Date.now()}-${Math.random().toString(36).substring(2)}`,
};
// Dispatch a custom message event that the window event listener will pick up
window.dispatchEvent(
new MessageEvent("message", {
data: messageData,
origin: window.location.origin,
}),
);
}
@ -80,7 +117,6 @@ export class MockIdeMessenger implements IIdeMessenger {
content: "This is a test",
},
];
return undefined;
}

View File

@ -11,7 +11,7 @@ interface KeyboardShortcutProps {
function KeyboardShortcut(props: KeyboardShortcutProps) {
return (
<div
className={`flex flex-col items-start p-2 py-2 sm:flex-row sm:items-center ${props.isEven ? "bg-list-active" : ""}`}
className={`flex flex-col items-start p-2 py-2 sm:flex-row sm:items-center ${props.isEven ? "" : "bg-table-odd-rows"}`}
>
<div className="w-full flex-grow pb-1 pr-4 sm:w-auto sm:pb-0">
<span className="block break-words text-xs">{props.description}:</span>

View File

@ -77,6 +77,8 @@ export function UserSettingsForm() {
// const codeBlockToolbarPosition = config.ui?.codeBlockToolbarPosition ?? "top";
const useAutocompleteMultilineCompletions =
config.tabAutocompleteOptions?.multilineCompletions ?? "auto";
const modelTimeout = config.tabAutocompleteOptions?.modelTimeout ?? 150;
const debounceDelay = config.tabAutocompleteOptions?.debounceDelay ?? 250;
const fontSize = getFontSize();
const cancelChangeDisableAutocomplete = () => {
@ -99,7 +101,7 @@ export function UserSettingsForm() {
}, [ideMessenger]);
return (
<div className="flex flex-col pt-3">
<div className="flex flex-col">
{/* {selectedProfile && isLocalProfile(selectedProfile) ? (
<div className="flex items-center justify-center">
<SecondaryButton
@ -232,9 +234,8 @@ export function UserSettingsForm() {
max={50}
/>
</label>
<label className="flex items-center justify-between gap-3">
<span className="line-clamp-1 text-left">
<span className="lines lines-1 text-left">
Multiline Autocompletions
</span>
<Select
@ -253,7 +254,32 @@ export function UserSettingsForm() {
<option value="never">Never</option>
</Select>
</label>
<label className="flex items-center justify-between gap-3">
<span className="text-left">Model Timeout (ms)</span>
<NumberInput
value={modelTimeout}
onChange={(val) =>
handleUpdate({
modelTimeout: val,
})
}
min={100}
max={5000}
/>
</label>
<label className="flex items-center justify-between gap-3">
<span className="text-left">Model Debounce (ms)</span>
<NumberInput
value={debounceDelay}
onChange={(val) =>
handleUpdate({
debounceDelay: val,
})
}
min={0}
max={2500}
/>
</label>
<form
className="flex flex-col gap-1"
onSubmit={(e) => {

View File

@ -1,5 +1,5 @@
import {
AcademicCapIcon,
BoltIcon,
CircleStackIcon,
Cog6ToothIcon,
QuestionMarkCircleIcon,
@ -52,7 +52,7 @@ function ConfigPage() {
id: "shortcuts",
label: "Shortcuts",
component: <KeyboardShortcuts />,
icon: <AcademicCapIcon className="xs:h-4 xs:w-4 h-3 w-3 flex-shrink-0" />,
icon: <BoltIcon className="xs:h-4 xs:w-4 h-3 w-3 flex-shrink-0" />,
},
];
@ -67,14 +67,14 @@ function ConfigPage() {
/>
{/* Tab Headers */}
<div className="grid grid-cols-2 border-0 border-b-[1px] border-solid border-b-zinc-700 p-0.5 sm:flex sm:justify-center md:gap-x-2">
<div className="bg-vsc-input-background grid cursor-pointer grid-cols-2 border-0 border-b-[1px] border-solid border-b-zinc-700 p-0.5 sm:flex sm:justify-center md:gap-x-2">
{tabs.map((tab) => (
<div
style={{
fontSize: fontSize(-2),
}}
key={tab.id}
className={`hover:bg-vsc-input-background flex cursor-pointer items-center justify-center gap-1.5 rounded-md px-2 py-2 ${
className={`flex cursor-pointer items-center justify-center gap-1.5 rounded-md px-2 py-2 hover:brightness-125 ${
activeTab === tab.id ? "" : "text-gray-400"
}`}
onClick={() => setActiveTab(tab.id)}

View File

@ -59,4 +59,51 @@ describe("Chat page test", () => {
});
expect(await screen.findByText("Hello, world!")).toBeInTheDocument();
});
it("should handle apply updates and display the accept / reject all buttons", async () => {
const { ideMessenger } = await renderWithProviders(<Chat />);
// Use queryByText which returns null when element isn't found
const acceptAllButton = screen.queryByText("Accept All");
const rejectAllButton = screen.queryByText("Reject All");
// Assert that the buttons don't exist
expect(acceptAllButton).not.toBeInTheDocument();
expect(rejectAllButton).not.toBeInTheDocument();
for (let i = 0; i < 5; i++) {
ideMessenger.mockMessageToWebview("updateApplyState", {
status: "streaming",
streamId: `12345`,
});
}
ideMessenger.mockMessageToWebview("updateApplyState", {
status: "done",
streamId: "12345",
});
// Wait for the buttons to appear
await waitFor(() => {
expect(
screen.getByTestId("accept-reject-all-buttons"),
).toBeInTheDocument();
});
// IDE sends back message that it is done
ideMessenger.mockMessageToWebview("updateApplyState", {
status: "closed",
streamId: "12345",
});
// Wait for the buttons to disappear
await waitFor(() => {
expect(
screen.queryByTestId("edit-accept-button"),
).not.toBeInTheDocument();
expect(
screen.queryByTestId("edit-reject-button"),
).not.toBeInTheDocument();
});
});
});

View File

@ -320,6 +320,7 @@ export function Chat() {
isMainInput={false}
editorState={item.editorState}
contextItems={item.contextItems}
appliedRules={item.appliedRules}
inputId={item.message.id}
/>
</>

View File

@ -8,6 +8,10 @@ interface CreateFileToolCallProps {
}
export function CreateFile(props: CreateFileToolCallProps) {
if (!props.fileContents) {
return null;
}
const src = `\`\`\`${getMarkdownLanguageTagForFile(props.relativeFilepath ?? "output.txt")} ${props.relativeFilepath}\n${props.fileContents ?? ""}\n\`\`\``;
return props.relativeFilepath ? (

View File

@ -16,13 +16,12 @@ type EditToolCallProps = {
};
export function EditFile(props: EditToolCallProps) {
const src = `\`\`\`${getMarkdownLanguageTagForFile(props.relativeFilePath ?? "test.txt")} ${props.relativeFilePath}\n${props.changes ?? ""}\n\`\`\``;
const dispatch = useAppDispatch();
const isStreaming = useAppSelector((state) => state.session.isStreaming);
const applyState = useAppSelector((state) =>
selectApplyStateByToolCallId(state, props.toolCallId),
);
useEffect(() => {
if (!applyState) {
dispatch(
@ -35,10 +34,12 @@ export function EditFile(props: EditToolCallProps) {
}
}, [applyState, props.toolCallId]);
if (!props.relativeFilePath) {
if (!props.relativeFilePath || !props.changes) {
return null;
}
const src = `\`\`\`${getMarkdownLanguageTagForFile(props.relativeFilePath ?? "test.txt")} ${props.relativeFilePath}\n${props.changes}\n\`\`\``;
return (
<StyledMarkdownPreview
isRenderingInStepContainer

View File

@ -27,8 +27,8 @@ function FunctionSpecificToolCallDiv({
case BuiltInToolNames.EditExistingFile:
return (
<EditFile
relativeFilePath={args.filepath}
changes={args.changes}
relativeFilePath={args.filepath ?? ""}
changes={args.changes ?? ""}
toolCallId={toolCall.id}
historyIndex={historyIndex}
/>

View File

@ -61,7 +61,7 @@ export function SimpleToolCallUI({
</div>
<span
className="ml-1 text-xs text-gray-400 transition-colors duration-200"
data-testid="toggle-div-title"
data-testid="tool-call-title"
>
<ToolCallStatusMessage tool={tool} toolCallState={toolCallState} />
</span>

View File

@ -15,6 +15,7 @@ import {
FileSymbolMap,
MessageModes,
PromptLog,
RuleWithSource,
Session,
SessionMetadata,
ToolCallDelta,
@ -247,6 +248,19 @@ export const sessionSlice = createSlice({
...payload.contextItems,
];
},
setAppliedRulesAtIndex: (
state,
{
payload,
}: PayloadAction<{
index: number;
appliedRules: RuleWithSource[];
}>,
) => {
if (state.history[payload.index]) {
state.history[payload.index].appliedRules = payload.appliedRules;
}
},
setInactive: (state) => {
const curMessage = state.history.at(-1);
@ -698,6 +712,7 @@ export const {
updateFileSymbols,
setContextItemsAtIndex,
addContextItemsAtIndex,
setAppliedRulesAtIndex,
setInactive,
streamUpdate,
newSession,

View File

@ -1,12 +1,14 @@
import { createAsyncThunk, unwrapResult } from "@reduxjs/toolkit";
import { JSONContent } from "@tiptap/core";
import { InputModifiers } from "core";
import { InputModifiers, ToolResultChatMessage, UserChatMessage } from "core";
import { constructMessages } from "core/llm/constructMessages";
import { getApplicableRules } from "core/llm/rules/getSystemMessageWithRules";
import posthog from "posthog-js";
import { v4 as uuidv4 } from "uuid";
import { getBaseSystemMessage } from "../../util";
import { selectSelectedChatModel } from "../slices/configSlice";
import {
setAppliedRulesAtIndex,
submitEditorAndInitAtIndex,
updateHistoryItemAtIndex,
} from "../slices/sessionSlice";
@ -84,17 +86,41 @@ export const streamResponseThunk = createAsyncThunk<
}),
);
// Construct messages from updated history
// Get updated history after the update
const updatedHistory = getState().session.history;
const messageMode = getState().session.mode
const baseChatOrAgentSystemMessage = getBaseSystemMessage(selectedChatModel, messageMode)
// Determine which rules apply to this message
const userMsg = updatedHistory[inputIndex].message;
const rules = getState().config.config.rules;
// Calculate applicable rules once
// We need to check the message type to match what getApplicableRules expects
const applicableRules = getApplicableRules(
userMsg.role === "user" || userMsg.role === "tool"
? (userMsg as UserChatMessage | ToolResultChatMessage)
: undefined,
rules,
);
// Store in history for UI display
dispatch(
setAppliedRulesAtIndex({
index: inputIndex,
appliedRules: applicableRules,
}),
);
const messageMode = getState().session.mode;
const baseChatOrAgentSystemMessage = getBaseSystemMessage(
selectedChatModel,
messageMode,
);
const messages = constructMessages(
messageMode,
[...updatedHistory],
baseChatOrAgentSystemMessage,
state.config.config.rules,
applicableRules,
);
posthog.capture("step run", {

View File

@ -72,6 +72,7 @@ export async function renderWithProviders(
return {
user,
store,
ideMessenger,
...rendered!,
};
}

View File

@ -43,6 +43,7 @@ module.exports = {
// TODO make it all non-IDE-specific naming
"find-match-selected":
"var(--vscode-editor-findMatchHighlightBackground, rgba(255, 223, 0))",
"table-odd-rows":"var(--vscode-tree-tableOddRowsBackground, #1bbe84)",
"list-active": "var(--vscode-list-activeSelectionBackground, #1bbe84)",
"list-active-foreground":
"var(--vscode-quickInputList-focusForeground, var(--vscode-editor-foreground))",

View File

@ -3,6 +3,7 @@ import * as http from "node:http";
import { AddressInfo } from "node:net";
import * as os from "node:os";
import * as path from "node:path";
import { pathToFileURL } from "url";
import { PackageIdentifier } from "./interfaces/slugs.js";
import { RegistryClient } from "./registryClient.js";
@ -134,6 +135,7 @@ describe("RegistryClient", () => {
describe("getContentFromFilePath", () => {
let absoluteFilePath: string;
let fileUrl: string;
let relativeFilePath: string;
beforeEach(() => {
@ -141,6 +143,11 @@ describe("RegistryClient", () => {
absoluteFilePath = path.join(tempDir, "absolute-path.yaml");
fs.writeFileSync(absoluteFilePath, "absolute file content", "utf8");
const urlFilePath = path.join(tempDir, "file-url-path.yaml");
fs.writeFileSync(urlFilePath, "file:// file content", "utf8");
const url = pathToFileURL(urlFilePath);
fileUrl = url.toString();
// Create a subdirectory and file in the temp directory
const subDir = path.join(tempDir, "sub");
fs.mkdirSync(subDir, { recursive: true });
@ -160,6 +167,14 @@ describe("RegistryClient", () => {
expect(result).toBe("absolute file content");
});
it("should read from local file url directly", () => {
const client = new RegistryClient();
const result = (client as any).getContentFromFilePath(fileUrl);
expect(result).toBe("file:// file content");
});
it("should use rootPath for relative paths when provided", () => {
const client = new RegistryClient({ rootPath: tempDir });

View File

@ -38,11 +38,9 @@ export class RegistryClient implements Registry {
private getContentFromFilePath(filepath: string): string {
if (filepath.startsWith("file://")) {
const pathWithoutProtocol = filepath.slice(7);
// For Windows file:///C:/path/to/file, we need to handle it properly
// On other systems, we might have file:///path/to/file
return fs.readFileSync(decodeURIComponent(pathWithoutProtocol), "utf8");
return fs.readFileSync(new URL(filepath), "utf8");
} else if (path.isAbsolute(filepath)) {
return fs.readFileSync(filepath, "utf8");
} else if (this.rootPath) {

View File

@ -49,18 +49,30 @@ export const completionOptionsSchema = z.object({
n: z.number().optional(),
reasoning: z.boolean().optional(),
reasoningBudgetTokens: z.number().optional(),
promptCaching: z.boolean().optional(),
});
export type CompletionOptions = z.infer<typeof completionOptionsSchema>;
export const embeddingTasksSchema = z.union([
z.literal("chunk"),
z.literal("query")
]);
export type EmbeddingTasks = z.infer<typeof embeddingTasksSchema>;
export const embeddingPrefixesSchema = z.record(embeddingTasksSchema, z.string());
export type EmbeddingPrefixes = z.infer<typeof embeddingPrefixesSchema>;
export const cacheBehaviorSchema = z.object({
cacheSystemMessage: z.boolean().optional(),
cacheConversation: z.boolean().optional(),
});
export type CacheBehavior = z.infer<typeof cacheBehaviorSchema>;
export const embedOptionsSchema = z.object({
maxChunkSize: z.number().optional(),
maxBatchSize: z.number().optional(),
embeddingPrefixes: embeddingPrefixesSchema.optional(),
});
export type EmbedOptions = z.infer<typeof embedOptionsSchema>;

View File

@ -76,6 +76,12 @@ if (($null -eq $node)) {
Write-Host "`nInstalling root-level dependencies..." -ForegroundColor White
npm install
Write-Host "`nBuilding config-yaml..." -ForegroundColor White
Push-Location packages/config-yaml
npm install
npm run build
Pop-Location
Write-Host "`nInstalling Core extension dependencies..." -ForegroundColor White
Push-Location core
npm install