forked from XiaoMo/ChatGPT-Next-Web
fix: fix gemini issue when using app (#4013)
* chore: update path * fix: fix google auth logic * fix: not using header authorization for google api * chore: revert to allow stream
This commit is contained in:
parent
9d5801fb5f
commit
bca74241e6
@ -144,10 +144,10 @@ export function getHeaders() {
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
"Accept": "application/json",
|
||||
Accept: "application/json",
|
||||
};
|
||||
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.model === "gemini-pro";
|
||||
const isGoogle = modelConfig.model.startsWith("gemini");
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
const authHeader = isAzure ? "api-key" : "Authorization";
|
||||
const apiKey = isGoogle
|
||||
@ -155,20 +155,23 @@ export function getHeaders() {
|
||||
: isAzure
|
||||
? accessStore.azureApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
|
||||
const clientConfig = getClientConfig();
|
||||
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
// when using google api in app, not set auth header
|
||||
if (!(isGoogle && clientConfig?.isApp)) {
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return headers;
|
||||
|
@ -1,15 +1,8 @@
|
||||
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import Locale from "../../locales";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import de from "@/app/locales/de";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
export class GeminiProApi implements LLMApi {
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] gemini-pro response: ", res);
|
||||
@ -21,7 +14,7 @@ export class GeminiProApi implements LLMApi {
|
||||
);
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const apiClient = this;
|
||||
// const apiClient = this;
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role.replace("assistant", "model").replace("system", "user"),
|
||||
parts: [{ text: v.content }],
|
||||
@ -79,20 +72,31 @@ export class GeminiProApi implements LLMApi {
|
||||
],
|
||||
};
|
||||
|
||||
console.log("[Request] google payload: ", requestPayload);
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
const accessStore = useAccessStore.getState();
|
||||
try {
|
||||
const chatPath = this.path(Google.ChatPath);
|
||||
let chatPath = this.path(Google.ChatPath);
|
||||
|
||||
// let baseUrl = accessStore.googleUrl;
|
||||
|
||||
chatPath = isApp
|
||||
? DEFAULT_API_HOST +
|
||||
"/api/proxy/google/" +
|
||||
Google.ChatPath +
|
||||
`?key=${accessStore.googleApiKey}`
|
||||
: chatPath;
|
||||
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
console.log("[Request] google chatPath: ", chatPath, isApp);
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
@ -134,6 +138,8 @@ export class GeminiProApi implements LLMApi {
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
console.log("[Proxy Endpoint] ", streamChatPath);
|
||||
fetch(streamChatPath, chatPayload)
|
||||
.then((response) => {
|
||||
const reader = response?.body?.getReader();
|
||||
@ -187,9 +193,7 @@ export class GeminiProApi implements LLMApi {
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
|
||||
if (resJson?.promptFeedback?.blockReason) {
|
||||
// being blocked
|
||||
options.onError?.(
|
||||
|
@ -1,3 +1,4 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
@ -45,7 +46,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI
|
||||
: ApiPath.OpenAI;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
@ -59,6 +62,8 @@ export class ChatGPTApi implements LLMApi {
|
||||
path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
|
@ -307,7 +307,7 @@ export function PreviewActions(props: {
|
||||
setShouldExport(false);
|
||||
|
||||
var api: ClientApi;
|
||||
if (config.modelConfig.model === "gemini-pro") {
|
||||
if (config.modelConfig.model.startsWith("gemini")) {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
|
@ -171,7 +171,7 @@ export function useLoadData() {
|
||||
const config = useAppConfig();
|
||||
|
||||
var api: ClientApi;
|
||||
if (config.modelConfig.model === "gemini-pro") {
|
||||
if (config.modelConfig.model.startsWith("gemini")) {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
|
@ -92,7 +92,7 @@ export function ModelConfigList(props: {
|
||||
></input>
|
||||
</ListItem>
|
||||
|
||||
{props.modelConfig.model === "gemini-pro" ? null : (
|
||||
{props.modelConfig.model.startsWith("gemini") ? null : (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.PresencePenalty.Title}
|
||||
|
@ -8,8 +8,10 @@ export const FETCH_COMMIT_URL = `https://api.github.com/repos/${OWNER}/${REPO}/c
|
||||
export const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tags?per_page=1`;
|
||||
export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
|
||||
|
||||
export const DEFAULT_CORS_HOST = "https://a.nextweb.fun";
|
||||
export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
|
||||
// export const DEFAULT_CORS_HOST = "https://api.nextchat.dev";
|
||||
// export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
|
||||
|
||||
export const DEFAULT_API_HOST = "https://api.nextchat.dev";
|
||||
export const OPENAI_BASE_URL = "https://api.openai.com";
|
||||
|
||||
export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";
|
||||
@ -111,7 +113,7 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
"gpt-4-1106-preview": "2023-04",
|
||||
"gpt-4-0125-preview": "2023-04",
|
||||
"gpt-4-vision-preview": "2023-04",
|
||||
// After improvements,
|
||||
// After improvements,
|
||||
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
||||
"gemini-pro": "2023-12",
|
||||
};
|
||||
|
@ -12,7 +12,9 @@ import { ensure } from "../utils/clone";
|
||||
let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
|
||||
|
||||
const DEFAULT_OPENAI_URL =
|
||||
getClientConfig()?.buildMode === "export" ? DEFAULT_API_HOST : ApiPath.OpenAI;
|
||||
getClientConfig()?.buildMode === "export"
|
||||
? DEFAULT_API_HOST + "/api/proxy/openai"
|
||||
: ApiPath.OpenAI;
|
||||
|
||||
const DEFAULT_ACCESS_STATE = {
|
||||
accessCode: "",
|
||||
|
@ -316,7 +316,7 @@ export const useChatStore = createPersistStore(
|
||||
});
|
||||
|
||||
var api: ClientApi;
|
||||
if (modelConfig.model === "gemini-pro") {
|
||||
if (modelConfig.model.startsWith("gemini")) {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
@ -501,7 +501,7 @@ export const useChatStore = createPersistStore(
|
||||
const modelConfig = session.mask.modelConfig;
|
||||
|
||||
var api: ClientApi;
|
||||
if (modelConfig.model === "gemini-pro") {
|
||||
if (modelConfig.model.startsWith("gemini")) {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
|
@ -1,8 +1,8 @@
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { ApiPath, DEFAULT_CORS_HOST } from "../constant";
|
||||
import { ApiPath, DEFAULT_API_HOST } from "../constant";
|
||||
|
||||
export function corsPath(path: string) {
|
||||
const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_CORS_HOST}` : "";
|
||||
const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_API_HOST}` : "";
|
||||
|
||||
if (!path.startsWith("/")) {
|
||||
path = "/" + path;
|
||||
|
@ -64,8 +64,17 @@ if (mode !== "export") {
|
||||
|
||||
nextConfig.rewrites = async () => {
|
||||
const ret = [
|
||||
// adjust for previous verison directly using "/api/proxy/" as proxy base route
|
||||
{
|
||||
source: "/api/proxy/:path*",
|
||||
source: "/api/proxy/v1/:path*",
|
||||
destination: "https://api.openai.com/v1/:path*",
|
||||
},
|
||||
{
|
||||
source: "/api/proxy/google/:path*",
|
||||
destination: "https://generativelanguage.googleapis.com/:path*",
|
||||
},
|
||||
{
|
||||
source: "/api/proxy/openai/:path*",
|
||||
destination: "https://api.openai.com/:path*",
|
||||
},
|
||||
{
|
||||
|
@ -1,5 +1,5 @@
|
||||
{
|
||||
"name": "chatgpt-next-web",
|
||||
"name": "nextchat",
|
||||
"private": false,
|
||||
"license": "mit",
|
||||
"scripts": {
|
||||
@ -64,4 +64,4 @@
|
||||
"resolutions": {
|
||||
"lint-staged/yaml": "^2.2.2"
|
||||
}
|
||||
}
|
||||
}
|
@ -54,7 +54,7 @@ if ! command -v node >/dev/null || ! command -v git >/dev/null || ! command -v y
|
||||
fi
|
||||
|
||||
# Clone the repository and install dependencies
|
||||
git clone https://github.com/Yidadaa/ChatGPT-Next-Web
|
||||
git clone https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web
|
||||
cd ChatGPT-Next-Web
|
||||
yarn install
|
||||
|
||||
|
22
src-tauri/Cargo.lock
generated
22
src-tauri/Cargo.lock
generated
@ -431,17 +431,6 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "chatgpt-next-web"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tauri",
|
||||
"tauri-build",
|
||||
"tauri-plugin-window-state",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.24"
|
||||
@ -1824,6 +1813,17 @@ version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
|
||||
|
||||
[[package]]
|
||||
name = "nextchat"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tauri",
|
||||
"tauri-build",
|
||||
"tauri-plugin-window-state",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.26.4"
|
||||
|
@ -1,11 +1,11 @@
|
||||
[package]
|
||||
name = "chatgpt-next-web"
|
||||
name = "nextchat"
|
||||
version = "0.1.0"
|
||||
description = "A cross platform app for LLM ChatBot."
|
||||
authors = ["Yidadaa"]
|
||||
license = "mit"
|
||||
repository = ""
|
||||
default-run = "chatgpt-next-web"
|
||||
default-run = "nextchat"
|
||||
edition = "2021"
|
||||
rust-version = "1.60"
|
||||
|
||||
@ -17,11 +17,29 @@ tauri-build = { version = "1.3.0", features = [] }
|
||||
[dependencies]
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
tauri = { version = "1.3.0", features = ["notification-all", "fs-all", "clipboard-all", "dialog-all", "shell-open", "updater", "window-close", "window-hide", "window-maximize", "window-minimize", "window-set-icon", "window-set-ignore-cursor-events", "window-set-resizable", "window-show", "window-start-dragging", "window-unmaximize", "window-unminimize"] }
|
||||
tauri = { version = "1.3.0", features = [
|
||||
"notification-all",
|
||||
"fs-all",
|
||||
"clipboard-all",
|
||||
"dialog-all",
|
||||
"shell-open",
|
||||
"updater",
|
||||
"window-close",
|
||||
"window-hide",
|
||||
"window-maximize",
|
||||
"window-minimize",
|
||||
"window-set-icon",
|
||||
"window-set-ignore-cursor-events",
|
||||
"window-set-resizable",
|
||||
"window-show",
|
||||
"window-start-dragging",
|
||||
"window-unmaximize",
|
||||
"window-unminimize",
|
||||
] }
|
||||
tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" }
|
||||
|
||||
[features]
|
||||
# this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled.
|
||||
# If you use cargo directly instead of tauri's cli you can use this feature flag to switch between tauri's `dev` and `build` modes.
|
||||
# DO NOT REMOVE!!
|
||||
custom-protocol = [ "tauri/custom-protocol" ]
|
||||
custom-protocol = ["tauri/custom-protocol"]
|
||||
|
@ -91,7 +91,7 @@
|
||||
"updater": {
|
||||
"active": true,
|
||||
"endpoints": [
|
||||
"https://github.com/Yidadaa/ChatGPT-Next-Web/releases/latest/download/latest.json"
|
||||
"https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/releases/latest/download/latest.json"
|
||||
],
|
||||
"dialog": false,
|
||||
"windows": {
|
||||
|
Loading…
Reference in New Issue
Block a user