众所周知,Nextchat 更新后,新增了很多路由模式。新增了对很多网关的支持(比如Claude , Gemini)。
这相当于是把One Api的工作承揽了一部分。
然而,我并不需要它这么多才多艺。我喜欢One Api/New Api,他很好的管理了我的各种渠道。
因此,对于Nextchat的要求,我仅仅是要求他能兼容OpenAI渠道,其他的我都不在乎。
于是这个简单的脚本应运而生。它能部署在 Deno / Cloudflare Workers 等云函数平台,它有以下特性:
1.以最简化代码为目标,只兼容Openai格式的Router(比如 One Api/New Api).
2.它的静态资源依赖于别的网站(比如Neatchat官方演示站),但他会用cache进行缓存,提升性能的同时,不会对源站造成任何影响。(Deno 的缓存是一个月)
3.它进行了严格的CSP安全策略,前端只允许同域的传输,确保信息安全。
把这个脚本部署在 Workers。(或Deno,Deno的子域名可以直接访问)
外观使用上跟官方一样,毕竟是官方静态资源+重写Router,也支持R1思维链
当官方网站资源更新,你可以用 https://your_host/?refresh=1 强制更新缓存资源。
async function handler(req, res) {
var CONFIG = {
"neatchat_base_url": "https://neat.tz889.us.kg", // 静态资源反代地址(这里是neatchat演示站),有Cache机制(会在deno缓存一个月),不会对源站造成影响。有CSP机制确保跨站安全。
"password": "1234", // 当前站点密钥,由你定义,请妥善保存
"one_api_base_url": "https://one.api.com", // one_api / new_api 服务器地址 【不需要】末尾的 [ / ] ❌ 或 [ /v1 ] ❌
"one_api_key": "sk-xxxx", // one_api / new_api 密钥
"models": "gpt-4-turbo,deepseek-r1" // 模型支持,第一个是默认模型
}
var fetch = (url, opt = {}) => { opt.signal = req.signal; return globalThis.fetch(url, opt) }; // 客户端取消请求,自动取消所有fetch
async function unsafe_fetch_whith_cache(url, opt) {
const cache = await caches.open("my-cache");
var req = new Request(url);
if (opt && opt.refresh) {
delete opt.refresh;
} else {
const cached = await cache.match(req);
if (cached) {
console.log("命中缓存", url)
return cached;
}
}
const res = await fetch(url, opt)
if (res.status === 200) {
await cache.put(req, res.clone());
}
return res;
}
var path = req.url.match(/^https:?\/\/.*?(\/[^\?]*?)(\?|$)/)[1];
if (path === "/api/config") {
var config = await unsafe_fetch_whith_cache(CONFIG.neatchat_base_url + path, { refresh: (req.headers.get('referer') + req.url).indexOf("refresh=1") + 1 ? true : undefined });
config = await config.json()
config.customModels = "-all," + CONFIG.models.split(",").map(e => `+${e}@OpenAI`).join(',')
return new Response(JSON.stringify(config), { headers: { 'content-type': 'application/json' } })
}
if (path.indexOf("/api/openai/") === 0) {
// openai api
var body = await req.json();
path = path.replace(/^\/api\/openai/i, "")
var api_host = CONFIG.one_api_base_url
var Authorization = req.headers.get('Authorization')
var headers = { 'Content-Type': 'application/json' }
if (Authorization?.indexOf(CONFIG.password) > -1) {
if (CONFIG.one_api_key) headers.Authorization = "Bearer " + CONFIG.one_api_key;
} else {
//如果验证失败,用这个key直接请求 api.openai.com 试一下
// api_host = "https://api.openai.com"
return new Response("Auth Error!", { status: 401 })
}
return await fetch(api_host + path, {
headers,
method: req.method,
body: JSON.stringify(body)
})
}
/*
//因为CSP设置了禁止跨域,前端默认无法指定其他base_url地址
//这个可以反代任意地址(https://your_host/proxy/https://api.openai.com/)
//可以在前端绕过同源策列,相当于万能反代,默认关闭
if (path.indexOf("/proxy/") === 0) {
var proxyURL = ((req.url.match(/\/proxy\/(https?:\/?\/?.*?$)/i) || [])[1] || "").replace(/\/\/?/, "//");
var newHeaders = new Headers(req.headers);
newHeaders.delete('host');
newHeaders.delete('traceparent');
return await fetch(proxyURL, {
headers: newHeaders,
method: req.method,
body: req.body,
});
}*/
var resp = await unsafe_fetch_whith_cache(CONFIG.neatchat_base_url + path, { refresh: (req.headers.get('referer') + req.url).indexOf("refresh=1") + 1 ? true : undefined });
var content_type = resp.headers.get('content-type')
if (content_type.toLowerCase().indexOf("html") === -1 && path.indexOf("/api") !== 0) {
var Cache_Control = "max-age=31536000, immutable"
}
return new Response(resp.body, {
headers: {
'content-type': content_type,
"Content-Security-Policy": "default-src 'self' 'unsafe-inline'; img-src 'self' data:; script-src 'self' 'unsafe-inline'; object-src 'none'; connect-src 'self' wss://speech.platform.bing.com wss://api.openai.com",
"Cache-Control": Cache_Control
}
})
}
; (async () => {
if (typeof Deno !== 'undefined') {
//For Deno
try {
var port = Deno.env.get("PORT") || 8000;
} catch (e) { port = 8000; }
return Deno.serve({ port }, handler);
}
if (typeof EdgeRuntime !== 'undefined') {
//For vercel edge serverless
return
}
if (typeof addEventListener === "function") {
//For Cloudflare Workers
return
}
//For Nodejs
const { Readable } = await import('stream');
const { pipeline } = await import('stream/promises');
const http = await import('http');
http.createServer(async (req, res) => {
try {
var abort_signal = new AbortController();
const request = new Request(`http://${req.headers.host}${req.url}`, {
method: req.method,
headers: req.headers,
body: ["GET", "HEAD"].includes(req.method) ? undefined : Readable.toWeb(req),
signal: abort_signal.signal,
duplex: "half"
});
req.on('close', () => {
abort_signal.abort();
});
const response = await handler(request, res);
if (response instanceof Response) {
try {
response.headers.forEach((value, name) => {
if (name === "content-encoding") return;
res.setHeader(name, value);
});
res.statusCode = response.status;
const nodeStream = Readable.fromWeb(response.body);
// 处理客户端断开连接
res.on('close', () => {
abort_signal.abort();
nodeStream.destroy()
});
nodeStream.pipe(res)
await pipeline(nodeStream, res).catch(err => {
if (!res.headersSent) {
res.statusCode = 500;
res.end();
}
});
} catch (e) {
console.log(e.message)
}
}
} catch (e) {
console.error(e);
if (!res.headersSent) {
res.statusCode = 500;
res.end('Internal Server Error');
}
}
}).listen(process.env.PORT || 8000, () => {
console.log(`Listening on http://localhost:${process.env.PORT || 8000}`);
});
})()
//For vercel edge serverless - START
export const config = {
runtime: 'edge',
regions: ['hkg1'],
}
export const GET = handler
export const POST = handler
export const PUT = handler
export const PATCH = handler
export const DELETE = handler
export const HEAD = handler
export const OPTIONS = handler
//For vercel edge serverless - END
//For Cloudflare Pages Function - START
export function onRequest(context) {
return exports.fetch(context.request)
}
//For Cloudflare Pages Function - END
//For Cloudflare Workers Function - START
export default {
fetch(req, env, ctx) {
return handler(req);
}
}
//For Cloudflare Workers Function - END