feat: remove `models` property

This commit is contained in:
Patrick Erichsen 2025-04-08 19:28:48 -07:00
parent eae1dd20bb
commit 7f8882eee1
17 changed files with 84 additions and 73 deletions

View File

@ -234,15 +234,17 @@ describe("Test Suite", () => {
result: { config },
} = await messenger.request("config/getSerializedProfileInfo", undefined);
expect(config!.models.some((m) => m.title === model.title)).toBe(true);
expect(config!.modelsByRole.chat.some((m) => m.title === model.title)).toBe(
true,
);
await messenger.request("config/deleteModel", { title: model.title });
const {
result: { config: configAfterDelete },
} = await messenger.request("config/getSerializedProfileInfo", undefined);
expect(configAfterDelete!.models.some((m) => m.title === model.title)).toBe(
false,
);
expect(
configAfterDelete!.modelsByRole.chat.some((m) => m.title === model.title),
).toBe(false);
});
it("should make an LLM completion", async () => {

View File

@ -19,7 +19,9 @@ describe.skip("Test the ConfigHandler and E2E config loading", () => {
test("should load the default config successfully", async () => {
const result = await testConfigHandler.loadConfig();
expect(result.config!.models.length).toBe(defaultConfig.models?.length);
expect(result.config!.modelsByRole.chat.length).toBe(
defaultConfig.models?.length,
);
});
test.skip("should add a system message from config.ts", async () => {

View File

@ -11,17 +11,11 @@ import {
IContextProvider,
IDE,
IdeSettings,
ILLM,
ILLMLogger,
} from "../index.js";
import Ollama from "../llm/llms/Ollama.js";
import { GlobalContext } from "../util/GlobalContext.js";
import { getAllAssistantFiles } from "./loadLocalAssistants.js";
import {
LOCAL_ONBOARDING_CHAT_MODEL,
LOCAL_ONBOARDING_PROVIDER_TITLE,
} from "./onboarding.js";
import ControlPlaneProfileLoader from "./profile/ControlPlaneProfileLoader.js";
import LocalProfileLoader from "./profile/LocalProfileLoader.js";
import PlatformProfileLoader from "./profile/PlatformProfileLoader.js";
@ -473,27 +467,6 @@ export class ConfigHandler {
}
}
// Only used till we move to using selectedModelByRole.chat
async llmFromTitle(title?: string): Promise<ILLM> {
const { config } = await this.loadConfig();
const model = config?.models.find((m) => m.title === title);
if (!model) {
if (title === LOCAL_ONBOARDING_PROVIDER_TITLE) {
// Special case, make calls to Ollama before we have it in the config
const ollama = new Ollama({
model: LOCAL_ONBOARDING_CHAT_MODEL,
});
return ollama;
} else if (config?.models?.length) {
return config?.models[0];
}
throw new Error("No model found");
}
return model;
}
// Ancient method of adding custom providers through vs code
private additionalContextProviders: IContextProvider[] = [];
registerCustomContextProvider(contextProvider: IContextProvider) {

View File

@ -504,7 +504,6 @@ async function intermediateToFinalConfig(
const continueConfig: ContinueConfig = {
...config,
contextProviders,
models,
tools: [...allTools],
mcpServerStatuses: [],
slashCommands: config.slashCommands ?? [],
@ -544,7 +543,7 @@ async function intermediateToFinalConfig(
// Handle experimental modelRole config values for apply and edit
const inlineEditModel = getModelByRole(continueConfig, "inlineEdit")?.title;
if (inlineEditModel) {
const match = continueConfig.models.find(
const match = continueConfig.modelsByRole.chat.find(
(m) => m.title === inlineEditModel,
);
if (match) {
@ -563,7 +562,7 @@ async function intermediateToFinalConfig(
"applyCodeBlock",
)?.title;
if (applyBlockModel) {
const match = continueConfig.models.find(
const match = continueConfig.modelsByRole.chat.find(
(m) => m.title === applyBlockModel,
);
if (match) {
@ -617,7 +616,6 @@ async function finalToBrowserConfig(
): Promise<BrowserSerializedContinueConfig> {
return {
allowAnonymousTelemetry: final.allowAnonymousTelemetry,
models: final.models.map(llmToSerializedModelDescription),
systemMessage: final.systemMessage,
completionOptions: final.completionOptions,
slashCommands: final.slashCommands?.map(

View File

@ -258,7 +258,7 @@ async function injectControlPlaneProxyInfo(
}
});
config.models.forEach((model) => {
config.modelsByRole.chat.forEach((model) => {
if (model.providerName === "continue-proxy") {
(model as ContinueProxy).controlPlaneProxyInfo = info;
}

View File

@ -56,7 +56,7 @@ export function rectifySelectedModelsFromGlobalContext(
role === "apply" &&
newModel?.getConfigurationStatus() !== LLMConfigurationStatuses.VALID
) {
break;
continue;
}
configCopy.selectedModelByRole[role] = newModel;

View File

@ -94,7 +94,7 @@ export function getModelByRole<T extends keyof ExperimentalModelRoles>(
return undefined;
}
const matchingModel = config.models.find(
const matchingModel = config.modelsByRole.chat.find(
(model) => model.title === roleTitle,
);

View File

@ -125,7 +125,6 @@ async function configYamlToContinueConfig(
const localErrors: ConfigValidationError[] = [];
const continueConfig: ContinueConfig = {
slashCommands: [],
models: [],
tools: [...allTools],
mcpServerStatuses: [],
systemMessage: undefined,
@ -222,10 +221,6 @@ async function configYamlToContinueConfig(
config: continueConfig,
});
if (modelsArrayRoles.some((role) => model.roles?.includes(role))) {
continueConfig.models.push(...llms);
}
if (model.roles?.includes("chat")) {
continueConfig.modelsByRole.chat.push(...llms);
}
@ -323,7 +318,7 @@ async function configYamlToContinueConfig(
if (allowFreeTrial) {
// Obtain auth token (iff free trial being used)
const freeTrialModels = continueConfig.models.filter(
const freeTrialModels = continueConfig.modelsByRole.chat.filter(
(model) => model.providerName === "free-trial",
);
if (freeTrialModels.length > 0) {
@ -338,14 +333,15 @@ async function configYamlToContinueConfig(
message: `Failed to obtain GitHub auth token for free trial:\n${e instanceof Error ? e.message : e}`,
});
// Remove free trial models
continueConfig.models = continueConfig.models.filter(
(model) => model.providerName !== "free-trial",
);
continueConfig.modelsByRole.chat =
continueConfig.modelsByRole.chat.filter(
(model) => model.providerName !== "free-trial",
);
}
}
} else {
// Remove free trial models
continueConfig.models = continueConfig.models.filter(
continueConfig.modelsByRole.chat = continueConfig.modelsByRole.chat.filter(
(model) => model.providerName !== "free-trial",
);
}

View File

@ -45,7 +45,7 @@ async function getContextProviderExtras(
config,
embeddingsProvider: config.selectedModelByRole.embed,
fetch: fetch,
llm: config.models[0],
llm: config.modelsByRole.chat[0],
reranker: config.selectedModelByRole.rerank,
selectedCode: [],
};

View File

@ -1,3 +1,4 @@
/* eslint-disable max-lines-per-function */
import { fetchwithRequestOptions } from "@continuedev/fetch";
import * as URI from "uri-js";
import { v4 as uuidv4 } from "uuid";
@ -60,7 +61,13 @@ async function* streamDiffLinesGenerator(
msg: Message<ToCoreProtocol["streamDiffLines"][0]>,
): AsyncGenerator<DiffLine> {
const data = msg.data;
const llm = await configHandler.llmFromTitle(msg.data.modelTitle);
const llm = (await configHandler.loadConfig()).config?.selectedModelByRole
.chat;
if (!llm) {
throw new Error("No chat model selected");
}
for await (const diffLine of streamDiffLines(
data.prefix,
data.highlighted,
@ -430,7 +437,13 @@ export class Core {
);
on("llm/complete", async (msg) => {
const model = await this.configHandler.llmFromTitle(msg.data.title);
const model = (await this.configHandler.loadConfig()).config
?.selectedModelByRole.chat;
if (!model) {
throw new Error("No chat model selected");
}
const completion = await model.complete(
msg.data.prompt,
new AbortController().signal,
@ -449,9 +462,13 @@ export class Core {
});
on("chatDescriber/describe", async (msg) => {
const currentModel = await this.configHandler.llmFromTitle(
msg.data.selectedModelTitle,
);
const currentModel = (await this.configHandler.loadConfig()).config
?.selectedModelByRole.chat;
if (!currentModel) {
throw new Error("No chat model selected");
}
return await ChatDescriber.describe(currentModel, {}, msg.data.text);
});
@ -681,7 +698,12 @@ export class Core {
throw new Error(`Tool ${toolCall.function.name} not found`);
}
const llm = await this.configHandler.llmFromTitle(selectedModelTitle);
const llm = (await this.configHandler.loadConfig()).config
?.selectedModelByRole.chat;
if (!llm) {
throw new Error("No chat model selected");
}
const contextItems = await callTool(
tool,
@ -711,7 +733,12 @@ export class Core {
return false;
}
const llm = await this.configHandler.llmFromTitle(selectedModelTitle);
const llm = (await this.configHandler.loadConfig()).config
?.selectedModelByRole.chat;
if (!llm) {
throw new Error("No chat model selected");
}
// Count the size of the file tokenwise
const tokens = countTokens(item.content);
@ -789,8 +816,13 @@ export class Core {
}
const model =
config.models.find((model) => model.title === msg.data.title) ??
config.models.find((model) => model.title?.startsWith(msg.data.title));
config.modelsByRole.chat.find(
(model) => model.title === msg.data.title,
) ??
config.modelsByRole.chat.find((model) =>
model.title?.startsWith(msg.data.title),
);
try {
if (model) {
return await model.listModels();
@ -854,10 +886,15 @@ export class Core {
return [];
}
const { name, query, fullInput, selectedCode, selectedModelTitle } =
msg.data;
const { name, query, fullInput, selectedCode } = msg.data;
const llm = (await this.configHandler.loadConfig()).config
?.selectedModelByRole.chat;
if (!llm) {
throw new Error("No chat model selected");
}
const llm = await this.configHandler.llmFromTitle(selectedModelTitle);
const provider =
config.contextProviders?.find(
(provider) => provider.description.title === name,

2
core/index.d.ts vendored
View File

@ -1393,7 +1393,6 @@ export interface Config {
// in the actual Continue source code
export interface ContinueConfig {
allowAnonymousTelemetry?: boolean;
models: ILLM[];
systemMessage?: string;
completionOptions?: BaseCompletionOptions;
requestOptions?: RequestOptions;
@ -1417,7 +1416,6 @@ export interface ContinueConfig {
export interface BrowserSerializedContinueConfig {
allowAnonymousTelemetry?: boolean;
models: ModelDescription[];
systemMessage?: string;
completionOptions?: BaseCompletionOptions;
requestOptions?: RequestOptions;

View File

@ -23,19 +23,22 @@ export async function* llmStreamChat(
void TTS.kill();
}
const { title, legacySlashCommandData, completionOptions, messages } =
msg.data;
const { legacySlashCommandData, completionOptions, messages } = msg.data;
const model = await configHandler.llmFromTitle(title);
const model = config.selectedModelByRole.chat;
if (!model) {
throw new Error("No chat model selected");
}
// Log to return in case of error
const errorPromptLog = {
modelTitle: model.title ?? model.model,
modelTitle: model?.title ?? model?.model,
completion: "",
prompt: "",
completionOptions: {
...msg.data.completionOptions,
model: model.model,
model: model?.model,
},
};

View File

@ -37,7 +37,7 @@ export function OnboardingCard({ isDialog }: OnboardingCardProps) {
return (
<ReusableCard
showCloseButton={!isDialog && !!config.models.length}
showCloseButton={!isDialog && !!config.modelsByRole.chat.length}
onClose={() => onboardingCard.close()}
testId="onboarding-card"
>

View File

@ -28,7 +28,7 @@ export function PlatformOnboardingCard({ isDialog }: OnboardingCardProps) {
return (
<ReusableCard
showCloseButton={!isDialog && !!config.models.length}
showCloseButton={!isDialog && !!config.modelsByRole.chat.length}
onClose={() => onboardingCard.close()}
>
<div className="flex h-full w-full items-center justify-center">

View File

@ -121,7 +121,9 @@ function ModelOption({
function ModelSelect() {
const dispatch = useAppDispatch();
const selectedChatModel = useAppSelector(selectSelectedChatModel);
const allModels = useAppSelector((state) => state.config.config.models);
const allModels = useAppSelector(
(state) => state.config.config.modelsByRole.chat,
);
const buttonRef = useRef<HTMLButtonElement>(null);
const [options, setOptions] = useState<Option[]>([]);
const [sortedOptions, setSortedOptions] = useState<Option[]>([]);

View File

@ -50,6 +50,7 @@ function useSetup() {
organizations,
selectedOrgId,
} = result;
debugger;
if (isInitial && hasDoneInitialConfigLoad.current) {
return;
}

View File

@ -20,7 +20,6 @@ const EMPTY_CONFIG: BrowserSerializedContinueConfig = {
},
],
contextProviders: [],
models: [],
tools: [],
mcpServerStatuses: [],
usePlatform: true,