diff --git a/app/requests.ts b/app/requests.ts
index 3cb838e6..9159f1cf 100644
--- a/app/requests.ts
+++ b/app/requests.ts
@@ -1,5 +1,11 @@
 import type { ChatRequest, ChatResponse } from "./api/openai/typing";
-import { Message, ModelConfig, useAccessStore, useChatStore } from "./store";
+import {
+  Message,
+  ModelConfig,
+  ModelType,
+  useAccessStore,
+  useChatStore,
+} from "./store";
 import { showToast } from "./components/ui-lib";
 
 const TIME_OUT_MS = 60000;
@@ -9,6 +15,7 @@ const makeRequestParam = (
   options?: {
     filterBot?: boolean;
     stream?: boolean;
+    model?: ModelType;
   },
 ): ChatRequest => {
   let sendMessages = messages.map((v) => ({
@@ -26,6 +33,11 @@ const makeRequestParam = (
   // @ts-expect-error
   delete modelConfig.max_tokens;
 
+  // override model config
+  if (options?.model) {
+    modelConfig.model = options.model;
+  }
+
   return {
     messages: sendMessages,
     stream: options?.stream,
@@ -50,7 +62,7 @@ function getHeaders() {
 
 export function requestOpenaiClient(path: string) {
   return (body: any, method = "POST") =>
-    fetch("/api/openai?_vercel_no_cache=1", {
+    fetch("/api/openai", {
       method,
       headers: {
         "Content-Type": "application/json",
@@ -61,8 +73,16 @@ export function requestOpenaiClient(path: string) {
     });
 }
 
-export async function requestChat(messages: Message[]) {
-  const req: ChatRequest = makeRequestParam(messages, { filterBot: true });
+export async function requestChat(
+  messages: Message[],
+  options?: {
+    model?: ModelType;
+  },
+) {
+  const req: ChatRequest = makeRequestParam(messages, {
+    filterBot: true,
+    model: options?.model,
+  });
 
   const res = await requestOpenaiClient("v1/chat/completions")(req);
 
@@ -204,7 +224,13 @@ export async function requestChatStream(
   }
 }
 
-export async function requestWithPrompt(messages: Message[], prompt: string) {
+export async function requestWithPrompt(
+  messages: Message[],
+  prompt: string,
+  options?: {
+    model?: ModelType;
+  },
+) {
   messages = messages.concat([
     {
       role: "user",
@@ -213,7 +239,7 @@ export async function requestWithPrompt(messages: Message[], prompt: string) {
     },
   ]);
 
-  const res = await requestChat(messages);
+  const res = await requestChat(messages, options);
 
   return res?.choices?.at(0)?.message?.content ?? "";
 }
diff --git a/app/store/app.ts b/app/store/app.ts
index 0a3ff865..fe2a07da 100644
--- a/app/store/app.ts
+++ b/app/store/app.ts
@@ -535,14 +535,14 @@ export const useChatStore = create<ChatStore>()(
           session.topic === DEFAULT_TOPIC &&
           countMessages(session.messages) >= SUMMARIZE_MIN_LEN
         ) {
-          requestWithPrompt(session.messages, Locale.Store.Prompt.Topic).then(
-            (res) => {
-              get().updateCurrentSession(
-                (session) =>
-                  (session.topic = res ? trimTopic(res) : DEFAULT_TOPIC),
-              );
-            },
-          );
+          requestWithPrompt(session.messages, Locale.Store.Prompt.Topic, {
+            model: "gpt-3.5-turbo",
+          }).then((res) => {
+            get().updateCurrentSession(
+              (session) =>
+                (session.topic = res ? trimTopic(res) : DEFAULT_TOPIC),
+            );
+          });
         }
 
         const config = get().config;