diff --git a/.env.template b/.env.template
index 3e329036..89bab2cb 100644
--- a/.env.template
+++ b/.env.template
@@ -8,6 +8,16 @@ CODE=your-password
# You can start service behind a proxy
PROXY_URL=http://localhost:7890
+# (optional)
+# Default: Empty
+# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API.
+GOOGLE_API_KEY=
+
+# (optional)
+# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent
+# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url.
+GOOGLE_URL=
+
# Override openai api request base url. (optional)
# Default: https://api.openai.com
# Examples: http://your-openai-proxy.com
@@ -36,3 +46,4 @@ ENABLE_BALANCE_QUERY=
# Default: Empty
# If you want to disable parse settings from url, set this value to 1.
DISABLE_FAST_LINK=
+
diff --git a/Dockerfile b/Dockerfile
index 720a0cfe..436d39d8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -16,6 +16,7 @@ FROM base AS builder
RUN apk update && apk add --no-cache git
ENV OPENAI_API_KEY=""
+ENV GOOGLE_API_KEY=""
ENV CODE=""
WORKDIR /app
@@ -31,6 +32,7 @@ RUN apk add proxychains-ng
ENV PROXY_URL=""
ENV OPENAI_API_KEY=""
+ENV GOOGLE_API_KEY=""
ENV CODE=""
COPY --from=builder /app/public ./public
@@ -41,22 +43,22 @@ COPY --from=builder /app/.next/server ./.next/server
EXPOSE 3000
CMD if [ -n "$PROXY_URL" ]; then \
- export HOSTNAME="127.0.0.1"; \
- protocol=$(echo $PROXY_URL | cut -d: -f1); \
- host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \
- port=$(echo $PROXY_URL | cut -d: -f3); \
- conf=/etc/proxychains.conf; \
- echo "strict_chain" > $conf; \
- echo "proxy_dns" >> $conf; \
- echo "remote_dns_subnet 224" >> $conf; \
- echo "tcp_read_time_out 15000" >> $conf; \
- echo "tcp_connect_time_out 8000" >> $conf; \
- echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \
- echo "localnet ::1/128" >> $conf; \
- echo "[ProxyList]" >> $conf; \
- echo "$protocol $host $port" >> $conf; \
- cat /etc/proxychains.conf; \
- proxychains -f $conf node server.js; \
+ export HOSTNAME="127.0.0.1"; \
+ protocol=$(echo $PROXY_URL | cut -d: -f1); \
+ host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \
+ port=$(echo $PROXY_URL | cut -d: -f3); \
+ conf=/etc/proxychains.conf; \
+ echo "strict_chain" > $conf; \
+ echo "proxy_dns" >> $conf; \
+ echo "remote_dns_subnet 224" >> $conf; \
+ echo "tcp_read_time_out 15000" >> $conf; \
+ echo "tcp_connect_time_out 8000" >> $conf; \
+ echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \
+ echo "localnet ::1/128" >> $conf; \
+ echo "[ProxyList]" >> $conf; \
+ echo "$protocol $host $port" >> $conf; \
+ cat /etc/proxychains.conf; \
+ proxychains -f $conf node server.js; \
else \
- node server.js; \
+ node server.js; \
fi
diff --git a/README.md b/README.md
index 874b169a..55061759 100644
--- a/README.md
+++ b/README.md
@@ -5,9 +5,9 @@
English / [简体中文](./README_CN.md)
-One-Click to get a well-designed cross-platform ChatGPT web UI.
+One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support.
-一键免费部署你的跨平台私人 ChatGPT 应用。
+一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。
[![Web][Web-image]][web-url]
[![Windows][Windows-image]][download-url]
@@ -25,7 +25,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI.
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
-[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web)
+[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web)
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA)
@@ -191,6 +191,14 @@ Azure Api Key.
Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions).
+### `GOOGLE_API_KEY` (optional)
+
+Google Gemini Pro Api Key.
+
+### `GOOGLE_URL` (optional)
+
+Google Gemini Pro Api Url.
+
### `HIDE_USER_API_KEY` (optional)
> Default: Empty
diff --git a/README_CN.md b/README_CN.md
index b7a95b11..0f390a51 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -3,7 +3,7 @@
NextChat
-一键免费部署你的私人 ChatGPT 网页应用。
+一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。
[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) / [QQ 群](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donate](#捐赠-donate-usdt)
@@ -21,7 +21,7 @@
1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys);
2. 点击右侧按钮开始部署:
- [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE;
+ [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE;
3. 部署完毕后,即可开始使用;
4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。
@@ -106,6 +106,14 @@ Azure 密钥。
Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。
+### `GOOGLE_API_KEY` (optional)
+
+Google Gemini Pro 密钥.
+
+### `GOOGLE_URL` (optional)
+
+Google Gemini Pro Api Url.
+
### `HIDE_USER_API_KEY` (可选)
如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。
@@ -207,6 +215,7 @@ bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/s
[见项目贡献者列表](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors)
### 相关项目
+
- [one-api](https://github.com/songquanpeng/one-api): 一站式大模型额度管理平台,支持市面上所有主流大语言模型
## 开源协议
diff --git a/app/api/common.ts b/app/api/common.ts
index 6b0d619d..13cfab03 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -1,6 +1,6 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server";
-import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
+import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant";
import { collectModelTable } from "../utils/model";
import { makeAzurePath } from "../azure";
diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts
new file mode 100644
index 00000000..21755678
--- /dev/null
+++ b/app/api/google/[...path]/route.ts
@@ -0,0 +1,114 @@
+import { NextRequest, NextResponse } from "next/server";
+import { auth } from "../../auth";
+import { getServerSideConfig } from "@/app/config/server";
+import { GEMINI_BASE_URL, Google } from "@/app/constant";
+
+async function handle(
+ req: NextRequest,
+ { params }: { params: { path: string[] } },
+) {
+ console.log("[Google Route] params ", params);
+
+ if (req.method === "OPTIONS") {
+ return NextResponse.json({ body: "OK" }, { status: 200 });
+ }
+
+ const controller = new AbortController();
+
+ const serverConfig = getServerSideConfig();
+
+ let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL;
+
+ if (!baseUrl.startsWith("http")) {
+ baseUrl = `https://${baseUrl}`;
+ }
+
+ if (baseUrl.endsWith("/")) {
+ baseUrl = baseUrl.slice(0, -1);
+ }
+
+ let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", "");
+
+ console.log("[Proxy] ", path);
+ console.log("[Base Url]", baseUrl);
+
+ const timeoutId = setTimeout(
+ () => {
+ controller.abort();
+ },
+ 10 * 60 * 1000,
+ );
+
+ const bearToken = req.headers.get("Authorization") ?? "";
+ const token = bearToken.trim().replaceAll("Bearer ", "").trim();
+
+ const key = token ? token : serverConfig.googleApiKey;
+ if (!key) {
+ return NextResponse.json(
+ {
+ error: true,
+ message: `missing GOOGLE_API_KEY in server env vars`,
+ },
+ {
+ status: 401,
+ },
+ );
+ }
+
+ const fetchUrl = `${baseUrl}/${path}?key=${key}`;
+
+ const fetchOptions: RequestInit = {
+ headers: {
+ "Content-Type": "application/json",
+ "Cache-Control": "no-store",
+ },
+ method: req.method,
+ body: req.body,
+ // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
+ redirect: "manual",
+ // @ts-ignore
+ duplex: "half",
+ signal: controller.signal,
+ };
+
+ try {
+ const res = await fetch(fetchUrl, fetchOptions);
+ // to prevent browser prompt for credentials
+ const newHeaders = new Headers(res.headers);
+ newHeaders.delete("www-authenticate");
+ // to disable nginx buffering
+ newHeaders.set("X-Accel-Buffering", "no");
+
+ return new Response(res.body, {
+ status: res.status,
+ statusText: res.statusText,
+ headers: newHeaders,
+ });
+ } finally {
+ clearTimeout(timeoutId);
+ }
+}
+
+export const GET = handle;
+export const POST = handle;
+
+export const runtime = "edge";
+export const preferredRegion = [
+ "arn1",
+ "bom1",
+ "cdg1",
+ "cle1",
+ "cpt1",
+ "dub1",
+ "fra1",
+ "gru1",
+ "hnd1",
+ "iad1",
+ "icn1",
+ "kix1",
+ "lhr1",
+ "pdx1",
+ "sfo1",
+ "sin1",
+ "syd1",
+];
diff --git a/app/client/api.ts b/app/client/api.ts
index c7e33c71..30a220ea 100644
--- a/app/client/api.ts
+++ b/app/client/api.ts
@@ -1,8 +1,13 @@
import { getClientConfig } from "../config/client";
-import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
-import { ChatMessage, ModelType, useAccessStore } from "../store";
+import {
+ ACCESS_CODE_PREFIX,
+ Azure,
+ ModelProvider,
+ ServiceProvider,
+} from "../constant";
+import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
import { ChatGPTApi } from "./platforms/openai";
-
+import { GeminiProApi } from "./platforms/google";
export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];
@@ -41,6 +46,13 @@ export interface LLMUsage {
export interface LLMModel {
name: string;
available: boolean;
+ provider: LLMModelProvider;
+}
+
+export interface LLMModelProvider {
+ id: string;
+ providerName: string;
+ providerType: string;
}
export abstract class LLMApi {
@@ -73,7 +85,11 @@ interface ChatProvider {
export class ClientApi {
public llm: LLMApi;
- constructor() {
+ constructor(provider: ModelProvider = ModelProvider.GPT) {
+ if (provider === ModelProvider.GeminiPro) {
+ this.llm = new GeminiProApi();
+ return;
+ }
this.llm = new ChatGPTApi();
}
@@ -123,18 +139,21 @@ export class ClientApi {
}
}
-export const api = new ClientApi();
-
export function getHeaders() {
const accessStore = useAccessStore.getState();
const headers: Record = {
"Content-Type": "application/json",
"x-requested-with": "XMLHttpRequest",
};
-
+ const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
+ const isGoogle = modelConfig.model === "gemini-pro";
const isAzure = accessStore.provider === ServiceProvider.Azure;
const authHeader = isAzure ? "api-key" : "Authorization";
- const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
+ const apiKey = isGoogle
+ ? accessStore.googleApiKey
+ : isAzure
+ ? accessStore.azureApiKey
+ : accessStore.openaiApiKey;
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
const validString = (x: string) => x && x.length > 0;
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
new file mode 100644
index 00000000..ec7d7956
--- /dev/null
+++ b/app/client/platforms/google.ts
@@ -0,0 +1,216 @@
+import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
+import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
+import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
+import {
+ EventStreamContentType,
+ fetchEventSource,
+} from "@fortaine/fetch-event-source";
+import { prettyObject } from "@/app/utils/format";
+import { getClientConfig } from "@/app/config/client";
+import Locale from "../../locales";
+import { getServerSideConfig } from "@/app/config/server";
+export class GeminiProApi implements LLMApi {
+ extractMessage(res: any) {
+ console.log("[Response] gemini-pro response: ", res);
+
+ return (
+ res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
+ res?.error?.message ||
+ ""
+ );
+ }
+ async chat(options: ChatOptions): Promise {
+ const messages = options.messages.map((v) => ({
+ role: v.role.replace("assistant", "model").replace("system", "model"),
+ parts: [{ text: v.content }],
+ }));
+
+ const modelConfig = {
+ ...useAppConfig.getState().modelConfig,
+ ...useChatStore.getState().currentSession().mask.modelConfig,
+ ...{
+ model: options.config.model,
+ },
+ };
+ const requestPayload = {
+ contents: messages,
+ generationConfig: {
+ // stopSequences: [
+ // "Title"
+ // ],
+ temperature: modelConfig.temperature,
+ maxOutputTokens: modelConfig.max_tokens,
+ topP: modelConfig.top_p,
+ // "topK": modelConfig.top_k,
+ },
+ // stream: options.config.stream,
+ // model: modelConfig.model,
+ // temperature: modelConfig.temperature,
+ // presence_penalty: modelConfig.presence_penalty,
+ // frequency_penalty: modelConfig.frequency_penalty,
+ // top_p: modelConfig.top_p,
+ // max_tokens: Math.max(modelConfig.max_tokens, 1024),
+ // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
+ };
+
+ console.log("[Request] google payload: ", requestPayload);
+
+ // todo: support stream later
+ const shouldStream = false;
+ const controller = new AbortController();
+ options.onController?.(controller);
+ try {
+ const chatPath = this.path(Google.ChatPath);
+ const chatPayload = {
+ method: "POST",
+ body: JSON.stringify(requestPayload),
+ signal: controller.signal,
+ headers: getHeaders(),
+ };
+
+ // make a fetch request
+ const requestTimeoutId = setTimeout(
+ () => controller.abort(),
+ REQUEST_TIMEOUT_MS,
+ );
+ if (shouldStream) {
+ let responseText = "";
+ let remainText = "";
+ let finished = false;
+
+ // animate response to make it looks smooth
+ function animateResponseText() {
+ if (finished || controller.signal.aborted) {
+ responseText += remainText;
+ console.log("[Response Animation] finished");
+ return;
+ }
+
+ if (remainText.length > 0) {
+ const fetchCount = Math.max(1, Math.round(remainText.length / 60));
+ const fetchText = remainText.slice(0, fetchCount);
+ responseText += fetchText;
+ remainText = remainText.slice(fetchCount);
+ options.onUpdate?.(responseText, fetchText);
+ }
+
+ requestAnimationFrame(animateResponseText);
+ }
+
+ // start animaion
+ animateResponseText();
+
+ const finish = () => {
+ if (!finished) {
+ finished = true;
+ options.onFinish(responseText + remainText);
+ }
+ };
+
+ controller.signal.onabort = finish;
+
+ fetchEventSource(chatPath, {
+ ...chatPayload,
+ async onopen(res) {
+ clearTimeout(requestTimeoutId);
+ const contentType = res.headers.get("content-type");
+ console.log(
+ "[OpenAI] request response content type: ",
+ contentType,
+ );
+
+ if (contentType?.startsWith("text/plain")) {
+ responseText = await res.clone().text();
+ return finish();
+ }
+
+ if (
+ !res.ok ||
+ !res.headers
+ .get("content-type")
+ ?.startsWith(EventStreamContentType) ||
+ res.status !== 200
+ ) {
+ const responseTexts = [responseText];
+ let extraInfo = await res.clone().text();
+ try {
+ const resJson = await res.clone().json();
+ extraInfo = prettyObject(resJson);
+ } catch {}
+
+ if (res.status === 401) {
+ responseTexts.push(Locale.Error.Unauthorized);
+ }
+
+ if (extraInfo) {
+ responseTexts.push(extraInfo);
+ }
+
+ responseText = responseTexts.join("\n\n");
+
+ return finish();
+ }
+ },
+ onmessage(msg) {
+ if (msg.data === "[DONE]" || finished) {
+ return finish();
+ }
+ const text = msg.data;
+ try {
+ const json = JSON.parse(text) as {
+ choices: Array<{
+ delta: {
+ content: string;
+ };
+ }>;
+ };
+ const delta = json.choices[0]?.delta?.content;
+ if (delta) {
+ remainText += delta;
+ }
+ } catch (e) {
+ console.error("[Request] parse error", text);
+ }
+ },
+ onclose() {
+ finish();
+ },
+ onerror(e) {
+ options.onError?.(e);
+ throw e;
+ },
+ openWhenHidden: true,
+ });
+ } else {
+ const res = await fetch(chatPath, chatPayload);
+ clearTimeout(requestTimeoutId);
+
+ const resJson = await res.json();
+
+ if (resJson?.promptFeedback?.blockReason) {
+ // being blocked
+ options.onError?.(
+ new Error(
+ "Message is being blocked for reason: " +
+ resJson.promptFeedback.blockReason,
+ ),
+ );
+ }
+ const message = this.extractMessage(resJson);
+ options.onFinish(message);
+ }
+ } catch (e) {
+ console.log("[Request] failed to make a chat request", e);
+ options.onError?.(e as Error);
+ }
+ }
+ usage(): Promise {
+ throw new Error("Method not implemented.");
+ }
+ async models(): Promise {
+ return [];
+ }
+ path(path: string): string {
+ return "/api/google/" + path;
+ }
+}
diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index 8ea86469..68a0fda7 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -323,6 +323,11 @@ export class ChatGPTApi implements LLMApi {
return chatModels.map((m) => ({
name: m.id,
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
}));
}
}
diff --git a/app/components/auth.tsx b/app/components/auth.tsx
index 7962d46b..57118349 100644
--- a/app/components/auth.tsx
+++ b/app/components/auth.tsx
@@ -64,6 +64,17 @@ export function AuthPage() {
);
}}
/>
+ {
+ accessStore.update(
+ (access) => (access.googleApiKey = e.currentTarget.value),
+ );
+ }}
+ />
>
) : null}
diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx
index 8cae7ac9..dff17e4a 100644
--- a/app/components/exporter.tsx
+++ b/app/components/exporter.tsx
@@ -29,10 +29,11 @@ import NextImage from "next/image";
import { toBlob, toPng } from "html-to-image";
import { DEFAULT_MASK_AVATAR } from "../store/mask";
-import { api } from "../client/api";
+
import { prettyObject } from "../utils/format";
-import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
+import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant";
import { getClientConfig } from "../config/client";
+import { ClientApi } from "../client/api";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => ,
@@ -301,10 +302,17 @@ export function PreviewActions(props: {
}) {
const [loading, setLoading] = useState(false);
const [shouldExport, setShouldExport] = useState(false);
-
+ const config = useAppConfig();
const onRenderMsgs = (msgs: ChatMessage[]) => {
setShouldExport(false);
+ var api: ClientApi;
+ if (config.modelConfig.model === "gemini-pro") {
+ api = new ClientApi(ModelProvider.GeminiPro);
+ } else {
+ api = new ClientApi(ModelProvider.GPT);
+ }
+
api
.share(msgs)
.then((res) => {
diff --git a/app/components/home.tsx b/app/components/home.tsx
index 811cbdf5..4be7da0f 100644
--- a/app/components/home.tsx
+++ b/app/components/home.tsx
@@ -12,7 +12,7 @@ import LoadingIcon from "../icons/three-dots.svg";
import { getCSSVar, useMobileScreen } from "../utils";
import dynamic from "next/dynamic";
-import { Path, SlotID } from "../constant";
+import { ModelProvider, Path, SlotID } from "../constant";
import { ErrorBoundary } from "./error";
import { getISOLang, getLang } from "../locales";
@@ -27,7 +27,7 @@ import { SideBar } from "./sidebar";
import { useAppConfig } from "../store/config";
import { AuthPage } from "./auth";
import { getClientConfig } from "../config/client";
-import { api } from "../client/api";
+import { ClientApi } from "../client/api";
import { useAccessStore } from "../store";
export function Loading(props: { noLogo?: boolean }) {
@@ -128,7 +128,8 @@ function Screen() {
const isHome = location.pathname === Path.Home;
const isAuth = location.pathname === Path.Auth;
const isMobileScreen = useMobileScreen();
- const shouldTightBorder = getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen);
+ const shouldTightBorder =
+ getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen);
useEffect(() => {
loadAsyncGoogleFont();
@@ -169,6 +170,12 @@ function Screen() {
export function useLoadData() {
const config = useAppConfig();
+ var api: ClientApi;
+ if (config.modelConfig.model === "gemini-pro") {
+ api = new ClientApi(ModelProvider.GeminiPro);
+ } else {
+ api = new ClientApi(ModelProvider.GPT);
+ }
useEffect(() => {
(async () => {
const models = await api.llm.models();
diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx
index 214a18c7..b9f81167 100644
--- a/app/components/model-config.tsx
+++ b/app/components/model-config.tsx
@@ -29,7 +29,7 @@ export function ModelConfigList(props: {
.filter((v) => v.available)
.map((v, i) => (
))}
@@ -91,79 +91,84 @@ export function ModelConfigList(props: {
}
>
-
- {
- props.updateConfig(
- (config) =>
- (config.presence_penalty =
- ModalConfigValidator.presence_penalty(
- e.currentTarget.valueAsNumber,
- )),
- );
- }}
- >
-
-
- {
- props.updateConfig(
- (config) =>
- (config.frequency_penalty =
- ModalConfigValidator.frequency_penalty(
- e.currentTarget.valueAsNumber,
- )),
- );
- }}
- >
-
+ {props.modelConfig.model === "gemini-pro" ? null : (
+ <>
+
+ {
+ props.updateConfig(
+ (config) =>
+ (config.presence_penalty =
+ ModalConfigValidator.presence_penalty(
+ e.currentTarget.valueAsNumber,
+ )),
+ );
+ }}
+ >
+
-
-
- props.updateConfig(
- (config) =>
- (config.enableInjectSystemPrompts = e.currentTarget.checked),
- )
- }
- >
-
+
+ {
+ props.updateConfig(
+ (config) =>
+ (config.frequency_penalty =
+ ModalConfigValidator.frequency_penalty(
+ e.currentTarget.valueAsNumber,
+ )),
+ );
+ }}
+ >
+
-
-
- props.updateConfig(
- (config) => (config.template = e.currentTarget.value),
- )
- }
- >
-
+
+
+ props.updateConfig(
+ (config) =>
+ (config.enableInjectSystemPrompts =
+ e.currentTarget.checked),
+ )
+ }
+ >
+
+
+
+ props.updateConfig(
+ (config) => (config.template = e.currentTarget.value),
+ )
+ }
+ >
+
+ >
+ )}
{
const isOpenAiUrl = accessStore.openaiUrl.includes(OPENAI_BASE_URL);
+
return (
accessStore.hideBalanceQuery ||
isOpenAiUrl ||
@@ -635,7 +637,8 @@ export function Settings() {
navigate(Path.Home);
}
};
- if (clientConfig?.isApp) { // Force to set custom endpoint to true if it's app
+ if (clientConfig?.isApp) {
+ // Force to set custom endpoint to true if it's app
accessStore.update((state) => {
state.useCustomConfig = true;
});
@@ -997,7 +1000,7 @@ export function Settings() {
/>
>
- ) : (
+ ) : accessStore.provider === "Azure" ? (
<>
>
- )}
+ ) : accessStore.provider === "Google" ? (
+ <>
+
+
+ accessStore.update(
+ (access) =>
+ (access.googleUrl = e.currentTarget.value),
+ )
+ }
+ >
+
+
+ {
+ accessStore.update(
+ (access) =>
+ (access.googleApiKey = e.currentTarget.value),
+ );
+ }}
+ />
+
+
+
+ accessStore.update(
+ (access) =>
+ (access.googleApiVersion =
+ e.currentTarget.value),
+ )
+ }
+ >
+
+ >
+ ) : null}
>
)}
>
diff --git a/app/config/server.ts b/app/config/server.ts
index 2398805a..83c71124 100644
--- a/app/config/server.ts
+++ b/app/config/server.ts
@@ -26,6 +26,10 @@ declare global {
AZURE_URL?: string; // https://{azure-url}/openai/deployments/{deploy-name}
AZURE_API_KEY?: string;
AZURE_API_VERSION?: string;
+
+ // google only
+ GOOGLE_API_KEY?: string;
+ GOOGLE_URL?: string;
}
}
}
@@ -80,6 +84,9 @@ export const getServerSideConfig = () => {
azureApiKey: process.env.AZURE_API_KEY,
azureApiVersion: process.env.AZURE_API_VERSION,
+ googleApiKey: process.env.GOOGLE_API_KEY,
+ googleUrl: process.env.GOOGLE_URL,
+
needCode: ACCESS_CODES.size > 0,
code: process.env.CODE,
codes: ACCESS_CODES,
diff --git a/app/constant.ts b/app/constant.ts
index 69d5c511..7668381c 100644
--- a/app/constant.ts
+++ b/app/constant.ts
@@ -12,6 +12,8 @@ export const DEFAULT_CORS_HOST = "https://a.nextweb.fun";
export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
export const OPENAI_BASE_URL = "https://api.openai.com";
+export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";
+
export enum Path {
Home = "/",
Chat = "/chat",
@@ -65,6 +67,12 @@ export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
export enum ServiceProvider {
OpenAI = "OpenAI",
Azure = "Azure",
+ Google = "Google",
+}
+
+export enum ModelProvider {
+ GPT = "GPT",
+ GeminiPro = "GeminiPro",
}
export const OpenaiPath = {
@@ -78,6 +86,14 @@ export const Azure = {
ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}",
};
+export const Google = {
+ ExampleEndpoint:
+ "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
+ ChatPath: "v1beta/models/gemini-pro:generateContent",
+
+ // /api/openai/v1/chat/completions
+};
+
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
export const DEFAULT_SYSTEM_TEMPLATE = `
You are ChatGPT, a large language model trained by OpenAI.
@@ -100,58 +116,137 @@ export const DEFAULT_MODELS = [
{
name: "gpt-4",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-0314",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-0613",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-32k",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-32k-0314",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-32k-0613",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-1106-preview",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-4-vision-preview",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-3.5-turbo",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-3.5-turbo-0301",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-3.5-turbo-0613",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-3.5-turbo-1106",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-3.5-turbo-16k",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
},
{
name: "gpt-3.5-turbo-16k-0613",
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
+ },
+ {
+ name: "gemini-pro",
+ available: true,
+ provider: {
+ id: "google",
+ providerName: "Google",
+ providerType: "google",
+ },
},
] as const;
diff --git a/app/locales/cn.ts b/app/locales/cn.ts
index 50dd4428..8746047f 100644
--- a/app/locales/cn.ts
+++ b/app/locales/cn.ts
@@ -13,7 +13,7 @@ const cn = {
Auth: {
Title: "需要密码",
Tips: "管理员开启了密码验证,请在下方填入访问码",
- SubTips: "或者输入你的 OpenAI API 密钥",
+ SubTips: "或者输入你的 OpenAI 或 Google API 密钥",
Input: "在此处填写访问码",
Confirm: "确认",
Later: "稍后再说",
@@ -312,6 +312,23 @@ const cn = {
SubTitle: "选择指定的部分版本",
},
},
+ Google: {
+ ApiKey: {
+ Title: "接口密钥",
+ SubTitle: "使用自定义 Google AI Studio API Key 绕过密码访问限制",
+ Placeholder: "Google AI Studio API Key",
+ },
+
+ Endpoint: {
+ Title: "接口地址",
+ SubTitle: "样例:",
+ },
+
+ ApiVerion: {
+ Title: "接口版本 (gemini-pro api version)",
+ SubTitle: "选择指定的部分版本",
+ },
+ },
CustomModel: {
Title: "自定义模型名",
SubTitle: "增加自定义模型可选项,使用英文逗号隔开",
@@ -347,7 +364,7 @@ const cn = {
Prompt: {
History: (content: string) => "这是历史聊天总结作为前情提要:" + content,
Topic:
- "使用四到五个字直接返回这句话的简要主题,不要解释、不要标点、不要语气词、不要多余文本,如果没有主题,请直接返回“闲聊”",
+ "使用四到五个字直接返回这句话的简要主题,不要解释、不要标点、不要语气词、不要多余文本,不要加粗,如果没有主题,请直接返回“闲聊”",
Summarize:
"简要总结一下对话内容,用作后续的上下文提示 prompt,控制在 200 字以内",
},
@@ -441,9 +458,9 @@ const cn = {
Config: "配置",
},
Exporter: {
- Description : {
- Title: "只有清除上下文之后的消息会被展示"
- },
+ Description: {
+ Title: "只有清除上下文之后的消息会被展示",
+ },
Model: "模型",
Messages: "消息",
Topic: "主题",
diff --git a/app/locales/en.ts b/app/locales/en.ts
index f90cffd4..367161d6 100644
--- a/app/locales/en.ts
+++ b/app/locales/en.ts
@@ -15,7 +15,7 @@ const en: LocaleType = {
Auth: {
Title: "Need Access Code",
Tips: "Please enter access code below",
- SubTips: "Or enter your OpenAI API Key",
+ SubTips: "Or enter your OpenAI or Google API Key",
Input: "access code",
Confirm: "Confirm",
Later: "Later",
@@ -319,6 +319,24 @@ const en: LocaleType = {
Title: "Custom Models",
SubTitle: "Custom model options, seperated by comma",
},
+ Google: {
+ ApiKey: {
+ Title: "API Key",
+ SubTitle:
+ "Bypass password access restrictions using a custom Google AI Studio API Key",
+ Placeholder: "Google AI Studio API Key",
+ },
+
+ Endpoint: {
+ Title: "Endpoint Address",
+ SubTitle: "Example:",
+ },
+
+ ApiVerion: {
+ Title: "API Version (gemini-pro api version)",
+ SubTitle: "Select a specific part version",
+ },
+ },
},
Model: "Model",
@@ -353,7 +371,7 @@ const en: LocaleType = {
History: (content: string) =>
"This is a summary of the chat history as a recap: " + content,
Topic:
- "Please generate a four to five word title summarizing our conversation without any lead-in, punctuation, quotation marks, periods, symbols, or additional text. Remove enclosing quotation marks.",
+ "Please generate a four to five word title summarizing our conversation without any lead-in, punctuation, quotation marks, periods, symbols, bold text, or additional text. Remove enclosing quotation marks.",
Summarize:
"Summarize the discussion briefly in 200 words or less to use as a prompt for future context.",
},
@@ -443,8 +461,8 @@ const en: LocaleType = {
},
Exporter: {
Description: {
- Title: "Only messages after clearing the context will be displayed"
- },
+ Title: "Only messages after clearing the context will be displayed",
+ },
Model: "Model",
Messages: "Messages",
Topic: "Topic",
diff --git a/app/store/access.ts b/app/store/access.ts
index 3b9008ba..9e8024a6 100644
--- a/app/store/access.ts
+++ b/app/store/access.ts
@@ -29,6 +29,11 @@ const DEFAULT_ACCESS_STATE = {
azureApiKey: "",
azureApiVersion: "2023-08-01-preview",
+ // google ai studio
+ googleUrl: "",
+ googleApiKey: "",
+ googleApiVersion: "v1",
+
// server config
needCode: true,
hideUserApiKey: false,
@@ -56,6 +61,10 @@ export const useAccessStore = createPersistStore(
return ensure(get(), ["azureUrl", "azureApiKey", "azureApiVersion"]);
},
+ isValidGoogle() {
+ return ensure(get(), ["googleApiKey"]);
+ },
+
isAuthorized() {
this.fetch();
@@ -63,6 +72,7 @@ export const useAccessStore = createPersistStore(
return (
this.isValidOpenAI() ||
this.isValidAzure() ||
+ this.isValidGoogle() ||
!this.enabledAccessControl() ||
(this.enabledAccessControl() && ensure(get(), ["accessCode"]))
);
@@ -99,6 +109,7 @@ export const useAccessStore = createPersistStore(
token: string;
openaiApiKey: string;
azureApiVersion: string;
+ googleApiKey: string;
};
state.openaiApiKey = state.token;
state.azureApiVersion = "2023-08-01-preview";
diff --git a/app/store/chat.ts b/app/store/chat.ts
index 66a39d2b..1dcf4e64 100644
--- a/app/store/chat.ts
+++ b/app/store/chat.ts
@@ -8,10 +8,11 @@ import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_SYSTEM_TEMPLATE,
KnowledgeCutOffDate,
+ ModelProvider,
StoreKey,
SUMMARIZE_MODEL,
} from "../constant";
-import { api, RequestMessage } from "../client/api";
+import { ClientApi, RequestMessage } from "../client/api";
import { ChatControllerPool } from "../client/controller";
import { prettyObject } from "../utils/format";
import { estimateTokenLength } from "../utils/token";
@@ -301,6 +302,13 @@ export const useChatStore = createPersistStore(
]);
});
+ var api: ClientApi;
+ if (modelConfig.model === "gemini-pro") {
+ api = new ClientApi(ModelProvider.GeminiPro);
+ } else {
+ api = new ClientApi(ModelProvider.GPT);
+ }
+
// make request
api.llm.chat({
messages: sendMessages,
@@ -379,22 +387,26 @@ export const useChatStore = createPersistStore(
// system prompts, to get close to OpenAI Web ChatGPT
const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
- const systemPrompts = shouldInjectSystemPrompts
- ? [
- createMessage({
- role: "system",
- content: fillTemplateWith("", {
- ...modelConfig,
- template: DEFAULT_SYSTEM_TEMPLATE,
+
+ var systemPrompts: ChatMessage[] = [];
+ if (modelConfig.model !== "gemini-pro") {
+ systemPrompts = shouldInjectSystemPrompts
+ ? [
+ createMessage({
+ role: "system",
+ content: fillTemplateWith("", {
+ ...modelConfig,
+ template: DEFAULT_SYSTEM_TEMPLATE,
+ }),
}),
- }),
- ]
- : [];
- if (shouldInjectSystemPrompts) {
- console.log(
- "[Global System Prompt] ",
- systemPrompts.at(0)?.content ?? "empty",
- );
+ ]
+ : [];
+ if (shouldInjectSystemPrompts) {
+ console.log(
+ "[Global System Prompt] ",
+ systemPrompts.at(0)?.content ?? "empty",
+ );
+ }
}
// long term memory
@@ -473,6 +485,14 @@ export const useChatStore = createPersistStore(
summarizeSession() {
const config = useAppConfig.getState();
const session = get().currentSession();
+ const modelConfig = session.mask.modelConfig;
+
+ var api: ClientApi;
+ if (modelConfig.model === "gemini-pro") {
+ api = new ClientApi(ModelProvider.GeminiPro);
+ } else {
+ api = new ClientApi(ModelProvider.GPT);
+ }
// remove error messages if any
const messages = session.messages;
@@ -504,8 +524,6 @@ export const useChatStore = createPersistStore(
},
});
}
-
- const modelConfig = session.mask.modelConfig;
const summarizeIndex = Math.max(
session.lastSummarizeIndex,
session.clearContextIndex ?? 0,
diff --git a/app/store/update.ts b/app/store/update.ts
index 2ab7ec19..7253caff 100644
--- a/app/store/update.ts
+++ b/app/store/update.ts
@@ -1,9 +1,16 @@
-import { FETCH_COMMIT_URL, FETCH_TAG_URL, StoreKey } from "../constant";
-import { api } from "../client/api";
+import {
+ FETCH_COMMIT_URL,
+ FETCH_TAG_URL,
+ ModelProvider,
+ StoreKey,
+} from "../constant";
import { getClientConfig } from "../config/client";
import { createPersistStore } from "../utils/store";
import ChatGptIcon from "../icons/chatgpt.png";
import Locale from "../locales";
+import { use } from "react";
+import { useAppConfig } from ".";
+import { ClientApi } from "../client/api";
const ONE_MINUTE = 60 * 1000;
const isApp = !!getClientConfig()?.isApp;
@@ -85,35 +92,40 @@ export const useUpdateStore = createPersistStore(
}));
if (window.__TAURI__?.notification && isApp) {
// Check if notification permission is granted
- await window.__TAURI__?.notification.isPermissionGranted().then((granted) => {
- if (!granted) {
- return;
- } else {
- // Request permission to show notifications
- window.__TAURI__?.notification.requestPermission().then((permission) => {
- if (permission === 'granted') {
- if (version === remoteId) {
- // Show a notification using Tauri
- window.__TAURI__?.notification.sendNotification({
- title: "NextChat",
- body: `${Locale.Settings.Update.IsLatest}`,
- icon: `${ChatGptIcon.src}`,
- sound: "Default"
- });
- } else {
- const updateMessage = Locale.Settings.Update.FoundUpdate(`${remoteId}`);
- // Show a notification for the new version using Tauri
- window.__TAURI__?.notification.sendNotification({
- title: "NextChat",
- body: updateMessage,
- icon: `${ChatGptIcon.src}`,
- sound: "Default"
- });
- }
- }
- });
- }
- });
+ await window.__TAURI__?.notification
+ .isPermissionGranted()
+ .then((granted) => {
+ if (!granted) {
+ return;
+ } else {
+ // Request permission to show notifications
+ window.__TAURI__?.notification
+ .requestPermission()
+ .then((permission) => {
+ if (permission === "granted") {
+ if (version === remoteId) {
+ // Show a notification using Tauri
+ window.__TAURI__?.notification.sendNotification({
+ title: "NextChat",
+ body: `${Locale.Settings.Update.IsLatest}`,
+ icon: `${ChatGptIcon.src}`,
+ sound: "Default",
+ });
+ } else {
+ const updateMessage =
+ Locale.Settings.Update.FoundUpdate(`${remoteId}`);
+ // Show a notification for the new version using Tauri
+ window.__TAURI__?.notification.sendNotification({
+ title: "NextChat",
+ body: updateMessage,
+ icon: `${ChatGptIcon.src}`,
+ sound: "Default",
+ });
+ }
+ }
+ });
+ }
+ });
}
console.log("[Got Upstream] ", remoteId);
} catch (error) {
@@ -122,6 +134,7 @@ export const useUpdateStore = createPersistStore(
},
async updateUsage(force = false) {
+ // only support openai for now
const overOneMinute = Date.now() - get().lastUpdateUsage >= ONE_MINUTE;
if (!overOneMinute && !force) return;
@@ -130,6 +143,7 @@ export const useUpdateStore = createPersistStore(
}));
try {
+ const api = new ClientApi(ModelProvider.GPT);
const usage = await api.llm.usage();
if (usage) {
diff --git a/app/utils/model.ts b/app/utils/model.ts
index 74b28a66..c4a4833e 100644
--- a/app/utils/model.ts
+++ b/app/utils/model.ts
@@ -6,7 +6,12 @@ export function collectModelTable(
) {
const modelTable: Record<
string,
- { available: boolean; name: string; displayName: string }
+ {
+ available: boolean;
+ name: string;
+ displayName: string;
+ provider: LLMModel["provider"];
+ }
> = {};
// default models
@@ -37,6 +42,7 @@ export function collectModelTable(
name,
displayName: displayName || name,
available,
+ provider: modelTable[name].provider,
};
});
return modelTable;
diff --git a/docker-compose.yml b/docker-compose.yml
index 57ca12e0..935b126a 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,13 +1,14 @@
version: "3.9"
services:
chatgpt-next-web:
- profiles: ["no-proxy"]
+ profiles: [ "no-proxy" ]
container_name: chatgpt-next-web
image: yidadaa/chatgpt-next-web
ports:
- 3000:3000
environment:
- OPENAI_API_KEY=$OPENAI_API_KEY
+ - GOOGLE_API_KEY=$GOOGLE_API_KEY
- CODE=$CODE
- BASE_URL=$BASE_URL
- OPENAI_ORG_ID=$OPENAI_ORG_ID
@@ -18,13 +19,14 @@ services:
- OPENAI_SB=$OPENAI_SB
chatgpt-next-web-proxy:
- profiles: ["proxy"]
+ profiles: [ "proxy" ]
container_name: chatgpt-next-web-proxy
image: yidadaa/chatgpt-next-web
ports:
- 3000:3000
environment:
- OPENAI_API_KEY=$OPENAI_API_KEY
+ - GOOGLE_API_KEY=$GOOGLE_API_KEY
- CODE=$CODE
- PROXY_URL=$PROXY_URL
- BASE_URL=$BASE_URL