diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx
index f79e0e8..9fd4677 100644
--- a/app/components/model-config.tsx
+++ b/app/components/model-config.tsx
@@ -48,6 +48,25 @@ export function ModelConfigList(props: {
}}
>
+
+ {
+ props.updateConfig(
+ (config) =>
+ (config.temperature = ModalConfigValidator.top_p(
+ e.currentTarget.valueAsNumber,
+ )),
+ );
+ }}
+ >
+
`With ${x} contextual prompts`,
- Edit: "Contextual and Memory Prompts",
+ Edit: "Current Chat Settings",
Add: "Add a Prompt",
Clear: "Context Cleared",
Revert: "Revert",
diff --git a/app/store/config.ts b/app/store/config.ts
index 945e1be..68e2991 100644
--- a/app/store/config.ts
+++ b/app/store/config.ts
@@ -33,6 +33,7 @@ export const DEFAULT_CONFIG = {
modelConfig: {
model: "gpt-3.5-turbo" as ModelType,
temperature: 0.5,
+ top_p: 1,
max_tokens: 2000,
presence_penalty: 0,
frequency_penalty: 0,
@@ -158,6 +159,9 @@ export const ModalConfigValidator = {
temperature(x: number) {
return limitNumber(x, 0, 1, 1);
},
+ top_p(x: number) {
+ return limitNumber(x, 0, 1, 1);
+ },
};
export const useAppConfig = create()(
@@ -177,15 +181,16 @@ export const useAppConfig = create()(
}),
{
name: StoreKey.Config,
- version: 3.2,
+ version: 3.3,
migrate(persistedState, version) {
- if (version === 3.2) return persistedState as any;
+ if (version === 3.3) return persistedState as any;
const state = persistedState as ChatConfig;
state.modelConfig.sendMemory = true;
state.modelConfig.historyMessageCount = 4;
state.modelConfig.compressMessageLengthThreshold = 1000;
state.modelConfig.frequency_penalty = 0;
+ state.modelConfig.top_p = 1;
state.modelConfig.template = DEFAULT_INPUT_TEMPLATE;
state.dontShowMaskSplashScreen = false;