parent
2eafdff54b
commit
0ef443cafc
|
@ -446,7 +446,7 @@ export class CompletionProvider {
|
|||
) {
|
||||
shownGptClaudeWarning = true;
|
||||
throw new Error(
|
||||
`Warning: ${llm.model} is not trained for tab-autocomplete, and will result in low-quality suggestions. See the docs to learn more about why: https://docs.continue.dev/walkthroughs/tab-autocomplete#i-want-better-completions-should-i-use-gpt-4`,
|
||||
`Warning: ${llm.model} is not trained for tab-autocomplete, and will result in low-quality suggestions. See the docs to learn more about why: https://docs.continue.dev/features/tab-autocomplete#i-want-better-completions-should-i-use-gpt-4`,
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ export async function getPromptFiles(
|
|||
const DEFAULT_PROMPT_FILE = `# This is an example ".prompt" file
|
||||
# It is used to define and reuse prompts within Continue
|
||||
# Continue will automatically create a slash command for each prompt in the .prompts folder
|
||||
# To learn more, see the full .prompt file reference: https://docs.continue.dev/walkthroughs/prompt-files
|
||||
# To learn more, see the full .prompt file reference: https://docs.continue.dev/features/prompt-files
|
||||
temperature: 0.0
|
||||
---
|
||||
{{{ diff }}}
|
||||
|
|
|
@ -25,7 +25,7 @@ export async function retrieveContextItemsFromEmbeddings(
|
|||
(await extras.ide.getIdeInfo()).ideType === "jetbrains"
|
||||
) {
|
||||
throw new Error(
|
||||
"The transformers.js context provider is not currently supported in JetBrains. For now, you can use Ollama to set up local embeddings, or use our 'free-trial' embeddings provider. See here to learn more: https://docs.continue.dev/walkthroughs/codebase-embeddings#embeddings-providers",
|
||||
"The transformers.js context provider is not currently supported in JetBrains. For now, you can use Ollama to set up local embeddings, or use our 'free-trial' embeddings provider. See here to learn more: https://docs.continue.dev/features/codebase-embeddings#embeddings-providers",
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ Type '@open' to reference the contents of all of your open files. Set `onlyPinne
|
|||
|
||||
### Codebase Retrieval
|
||||
|
||||
Type '@codebase' to automatically retrieve the most relevant snippets from your codebase. Read more about indexing and retrieval [here](../walkthroughs/codebase-embeddings.md).
|
||||
Type '@codebase' to automatically retrieve the most relevant snippets from your codebase. Read more about indexing and retrieval [here](../features/codebase-embeddings.md).
|
||||
|
||||
```json
|
||||
{ "name": "codebase" }
|
||||
|
@ -498,9 +498,7 @@ Continue exposes an API for registering context providers from a 3rd party VSCod
|
|||
|
||||
```json
|
||||
{
|
||||
"extensionDependencies": [
|
||||
"continue.continue"
|
||||
],
|
||||
"extensionDependencies": ["continue.continue"]
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -513,7 +511,6 @@ Here is an example:
|
|||
import * as vscode from "vscode";
|
||||
|
||||
class MyCustomProvider implements IContextProvider {
|
||||
|
||||
get description(): ContextProviderDescription {
|
||||
return {
|
||||
title: "custom",
|
||||
|
@ -525,7 +522,7 @@ class MyCustomProvider implements IContextProvider {
|
|||
|
||||
async getContextItems(
|
||||
query: string,
|
||||
extras: ContextProviderExtras
|
||||
extras: ContextProviderExtras,
|
||||
): Promise<ContextItem[]> {
|
||||
return [
|
||||
{
|
||||
|
@ -537,7 +534,7 @@ class MyCustomProvider implements IContextProvider {
|
|||
}
|
||||
|
||||
async loadSubmenuItems(
|
||||
args: LoadSubmenuItemsArgs
|
||||
args: LoadSubmenuItemsArgs,
|
||||
): Promise<ContextSubmenuItem[]> {
|
||||
return [];
|
||||
}
|
||||
|
@ -554,5 +551,4 @@ const continueApi = continueExt?.exports;
|
|||
|
||||
// register your custom provider
|
||||
continueApi?.registerCustomContextProvider(customProvider);
|
||||
|
||||
```
|
||||
```
|
||||
|
|
|
@ -75,7 +75,7 @@ _You can also use other autocomplete models by adding them to your `config.json`
|
|||
|
||||
## Embeddings
|
||||
|
||||
We recommend the following embeddings models, which are used for codebase retrieval as described [here](../walkthroughs/codebase-embeddings.md#embeddings-providers)
|
||||
We recommend the following embeddings models, which are used for codebase retrieval as described [here](../features/codebase-embeddings.md#embeddings-providers)
|
||||
|
||||
### Open-source models
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ You can use commercial LLMs via APIs using:
|
|||
- [Azure OpenAI Service](../reference/Model%20Providers/openai.md)
|
||||
- [Google Gemini API](../reference/Model%20Providers/geminiapi.md)
|
||||
- [Mistral API](../reference/Model%20Providers/mistral.md)
|
||||
- [Voyage AI API](../walkthroughs/codebase-embeddings.md#openai)
|
||||
- [Voyage AI API](../features/codebase-embeddings.md#openai)
|
||||
- [Cohere API](../reference/Model%20Providers/cohere.md)
|
||||
|
||||
**In addition to selecting providers, you will need to figure out [what models to use](./select-model.md).**
|
||||
|
|
|
@ -1,103 +0,0 @@
|
|||
---
|
||||
title: Using Code Llama with Continue
|
||||
description: How to use Code Llama with Continue
|
||||
keywords: [code llama, meta, togetherai, ollama, replciate, fastchat, msty]
|
||||
---
|
||||
|
||||
# Using Code Llama with Continue
|
||||
|
||||
With Continue, you can use Code Llama as a drop-in replacement for GPT-4, either by running locally with Ollama, Msty, or GGML or through Replicate.
|
||||
|
||||
If you haven't already installed Continue, you can do that [here](https://marketplace.visualstudio.com/items?itemName=Continue.continue). For more general information on customizing Continue, read [our customization docs](../customization/overview.md).
|
||||
|
||||
## TogetherAI
|
||||
|
||||
1. Create an account [here](https://api.together.xyz/signup)
|
||||
2. Copy your API key that appears on the welcome screen
|
||||
3. Update your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Code Llama",
|
||||
"provider": "together",
|
||||
"model": "togethercomputer/CodeLlama-13b-Instruct",
|
||||
"apiKey": "<API_KEY>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Ollama
|
||||
|
||||
1. Download Ollama [here](https://ollama.ai/) (it should walk you through the rest of these steps)
|
||||
2. Open a terminal and run `ollama run codellama`
|
||||
3. Change your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Code Llama",
|
||||
"provider": "ollama",
|
||||
"model": "codellama-7b"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Replicate
|
||||
|
||||
1. Get your Replicate API key [here](https://replicate.ai/)
|
||||
2. Change your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Code Llama",
|
||||
"provider": "replicate",
|
||||
"model": "codellama-7b",
|
||||
"apiKey": "<API_KEY>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## FastChat API
|
||||
|
||||
1. Setup the FastChat API (https://github.com/lm-sys/FastChat) to use one of the Codellama models on Hugging Face (e.g: codellama/CodeLlama-7b-Instruct-hf).
|
||||
2. Start the OpenAI compatible API (ref: https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md).
|
||||
3. Change your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Code Llama",
|
||||
"provider": "openai",
|
||||
"model": "codellama-7b",
|
||||
"apiBase": "http://localhost:8000/v1/"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Msty
|
||||
|
||||
1. Download Msty [here](https://msty.app/) for your platform (Windows, Mac, or Linux)
|
||||
2. Open the app and click "Setup Local AI". Optionally, download any model you want with just a click of a button from the Text Module page.
|
||||
3. Change your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Code Llama",
|
||||
"provider": "msty",
|
||||
"model": "codellama:7b"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
|
@ -0,0 +1,94 @@
|
|||
---
|
||||
title: Using Llama 3.1 with Continue
|
||||
description: How to use Llama 3.1 with Continue
|
||||
keywords: [llama, meta, togetherai, ollama, replicate]
|
||||
---
|
||||
|
||||
# Using Llama 3.1 with Continue
|
||||
|
||||
Continue makes it easy to code with the latest open-source models, including the entire Llama 3.1 family of models.
|
||||
|
||||
If you haven't already installed Continue, you can do that [here for VS Code](https://marketplace.visualstudio.com/items?itemName=Continue.continue) or [here for JetBrains](https://plugins.jetbrains.com/plugin/22707-continue). For more general information on customizing Continue, read [our customization docs](../customization/overview.md).
|
||||
|
||||
Below we share some of the easiest ways to get up and running, depending on your use-case.
|
||||
|
||||
## Ollama
|
||||
|
||||
Ollama is the fastest way to get up and running with local language models. We recommend trying Llama 3.1 8b, which is impressive for its size and will perform well on most hardware.
|
||||
|
||||
1. Download Ollama [here](https://ollama.ai/) (it should walk you through the rest of these steps)
|
||||
2. Open a terminal and run `ollama run llama3.1-8b`
|
||||
3. Change your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Llama 3.1 8b",
|
||||
"provider": "ollama",
|
||||
"model": "llama3.1-8b"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Groq
|
||||
|
||||
Groq provides the fastest available inference for open-source language models, including the entire Llama 3.1 family.
|
||||
|
||||
1. Obtain an API key [here](https://console.groq.com/keys)
|
||||
2. Update your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Llama 3.1 405b",
|
||||
"provider": "groq",
|
||||
"model": "llama3.1-405b",
|
||||
"apiKey": "<API_KEY>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Together AI
|
||||
|
||||
Together AI provides fast and reliable inference of open-source models. You'll be able to run the 405b model with good speed.
|
||||
|
||||
1. Create an account [here](https://api.together.xyz/signup)
|
||||
2. Copy your API key that appears on the welcome screen
|
||||
3. Update your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Llama 3.1 405b",
|
||||
"provider": "together",
|
||||
"model": "llama3.1-405b",
|
||||
"apiKey": "<API_KEY>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Replicate
|
||||
|
||||
Replicate makes it easy to host and run open-source AI with an API.
|
||||
|
||||
1. Get your Replicate API key [here](https://replicate.ai/)
|
||||
2. Change your Continue config file like this:
|
||||
|
||||
```json title="~/.continue/config.json"
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"title": "Llama 3.1 405b",
|
||||
"provider": "replicate",
|
||||
"model": "llama3.1-405b",
|
||||
"apiKey": "<API_KEY>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
|
@ -185,6 +185,22 @@ const config = {
|
|||
from: "/model-setup/configuration",
|
||||
to: "/setup/configuration",
|
||||
},
|
||||
{
|
||||
from: "/walkthroughs/codebase-embeddings",
|
||||
to: "/features/codebase-embeddings",
|
||||
},
|
||||
{
|
||||
from: "/walkthroughs/tab-autocomplete",
|
||||
to: "/features/tab-autocomplete",
|
||||
},
|
||||
{
|
||||
from: "/walkthroughs/prompt-files",
|
||||
to: "/features/prompt-files",
|
||||
},
|
||||
{
|
||||
from: "/walkthroughs/quick-actions",
|
||||
to: "/features/quick-actions",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
|
|
@ -42,17 +42,23 @@ const sidebars = {
|
|||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "🚶 Walkthroughs",
|
||||
label: "🪄 Features",
|
||||
collapsible: true,
|
||||
collapsed: true,
|
||||
items: [
|
||||
"walkthroughs/set-up-codestral",
|
||||
"walkthroughs/codebase-embeddings",
|
||||
"walkthroughs/tab-autocomplete",
|
||||
"walkthroughs/prompt-files",
|
||||
"walkthroughs/quick-actions",
|
||||
"features/codebase-embeddings",
|
||||
"features/tab-autocomplete",
|
||||
"features/prompt-files",
|
||||
"features/quick-actions",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "🚶 Walkthroughs",
|
||||
collapsible: true,
|
||||
collapsed: true,
|
||||
items: ["walkthroughs/set-up-codestral", "walkthroughs/llama3.1"],
|
||||
},
|
||||
"development-data",
|
||||
"telemetry",
|
||||
"troubleshooting",
|
||||
|
|
|
@ -1879,7 +1879,7 @@
|
|||
},
|
||||
"embeddingsProvider": {
|
||||
"title": "Embeddings Provider",
|
||||
"markdownDescription": "The method that will be used to generate codebase embeddings. The default is transformers.js, which will run locally in the browser. Learn about the other options [here](https://docs.continue.dev/walkthroughs/codebase-embeddings#embeddings-providers).",
|
||||
"markdownDescription": "The method that will be used to generate codebase embeddings. The default is transformers.js, which will run locally in the browser. Learn about the other options [here](https://docs.continue.dev/features/codebase-embeddings#embeddings-providers).",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
|
@ -2060,7 +2060,7 @@
|
|||
"tabAutocompleteOptions": {
|
||||
"title": "TabAutocompleteOptions",
|
||||
"type": "object",
|
||||
"markdownDescription": "These options let you customize your tab-autocomplete experience. Read about all options in [the docs](https://docs.continue.dev/walkthroughs/tab-autocomplete#configuration-options).",
|
||||
"markdownDescription": "These options let you customize your tab-autocomplete experience. Read about all options in [the docs](https://docs.continue.dev/features/tab-autocomplete#configuration-options).",
|
||||
"properties": {
|
||||
"disable": {
|
||||
"type": "boolean",
|
||||
|
|
|
@ -1879,7 +1879,7 @@
|
|||
},
|
||||
"embeddingsProvider": {
|
||||
"title": "Embeddings Provider",
|
||||
"markdownDescription": "The method that will be used to generate codebase embeddings. The default is transformers.js, which will run locally in the browser. Learn about the other options [here](https://docs.continue.dev/walkthroughs/codebase-embeddings#embeddings-providers).",
|
||||
"markdownDescription": "The method that will be used to generate codebase embeddings. The default is transformers.js, which will run locally in the browser. Learn about the other options [here](https://docs.continue.dev/features/codebase-embeddings#embeddings-providers).",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
|
@ -2060,7 +2060,7 @@
|
|||
"tabAutocompleteOptions": {
|
||||
"title": "TabAutocompleteOptions",
|
||||
"type": "object",
|
||||
"markdownDescription": "These options let you customize your tab-autocomplete experience. Read about all options in [the docs](https://docs.continue.dev/walkthroughs/tab-autocomplete#configuration-options).",
|
||||
"markdownDescription": "These options let you customize your tab-autocomplete experience. Read about all options in [the docs](https://docs.continue.dev/features/tab-autocomplete#configuration-options).",
|
||||
"properties": {
|
||||
"disable": {
|
||||
"type": "boolean",
|
||||
|
|
|
@ -82,12 +82,12 @@
|
|||
"continue.enableQuickActions": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"markdownDescription": "Enable the experimental Quick Actions feature. Read our walkthrough to learn about configuration and how to share feedback: [continue.dev › Walkthrough: Quick Actions (experimental)](https://docs.continue.dev/walkthroughs/quick-actions)"
|
||||
"markdownDescription": "Enable the experimental Quick Actions feature. Read our walkthrough to learn about configuration and how to share feedback: [continue.dev › Walkthrough: Quick Actions (experimental)](https://docs.continue.dev/features/quick-actions)"
|
||||
},
|
||||
"continue.enableTabAutocomplete": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"markdownDescription": "Enable Continue's tab autocomplete feature. Read our walkthrough to learn about configuration and how to share feedback: [continue.dev › Walkthrough: Tab Autocomplete (beta)](https://docs.continue.dev/walkthroughs/tab-autocomplete)"
|
||||
"markdownDescription": "Enable Continue's tab autocomplete feature. Read our walkthrough to learn about configuration and how to share feedback: [continue.dev › Walkthrough: Tab Autocomplete (beta)](https://docs.continue.dev/features/tab-autocomplete)"
|
||||
},
|
||||
"continue.pauseTabAutocompleteOnBattery": {
|
||||
"type": "boolean",
|
||||
|
|
|
@ -35,7 +35,7 @@ export class ContinueCompletionProvider
|
|||
if (val === "Documentation") {
|
||||
vscode.env.openExternal(
|
||||
vscode.Uri.parse(
|
||||
"https://docs.continue.dev/walkthroughs/tab-autocomplete",
|
||||
"https://docs.continue.dev/features/tab-autocomplete",
|
||||
),
|
||||
);
|
||||
} else if (val === "Download Ollama") {
|
||||
|
|
|
@ -42,7 +42,7 @@ export class TabAutocompleteModel {
|
|||
if (value === "Documentation") {
|
||||
vscode.env.openExternal(
|
||||
vscode.Uri.parse(
|
||||
"https://docs.continue.dev/walkthroughs/tab-autocomplete",
|
||||
"https://docs.continue.dev/features/tab-autocomplete",
|
||||
),
|
||||
);
|
||||
} else if (value === "Copy Command") {
|
||||
|
@ -65,7 +65,7 @@ export class TabAutocompleteModel {
|
|||
if (value === "Documentation") {
|
||||
vscode.env.openExternal(
|
||||
vscode.Uri.parse(
|
||||
"https://docs.continue.dev/walkthroughs/tab-autocomplete",
|
||||
"https://docs.continue.dev/features/tab-autocomplete",
|
||||
),
|
||||
);
|
||||
} else if (value === "Download Ollama") {
|
||||
|
|
|
@ -109,7 +109,7 @@ const IndexingProgressBar = ({
|
|||
|
||||
function getIndexingErrMsg(msg: string): string {
|
||||
if (isJetBrains() && embeddingsProvider === "all-MiniLM-L6-v2") {
|
||||
return "The 'transformers.js' embeddingsProvider is currently unsupported in JetBrains. To enable codebase indexing, you can use any of the other providers described in the docs: https://docs.continue.dev/walkthroughs/codebase-embeddings#embeddings-providers";
|
||||
return "The 'transformers.js' embeddingsProvider is currently unsupported in JetBrains. To enable codebase indexing, you can use any of the other providers described in the docs: https://docs.continue.dev/features/codebase-embeddings#embeddings-providers";
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue