由于旧帖子无法编辑,只能开新帖更新,旧贴请访问一下链接
【第一弹】【更新】用不完,根本用不完,部署cf worker无限免费绘画,可Api,支持多模型切换 - 资源荟萃 - LINUX DO
【第二弹】用不完,根本用不完,部署cf worker无限免费绘画,可Api,支持多模型切换 - 资源荟萃 - LINUX DO
准备工作
1、注册硅基流动,并生成api token
2、注册cloudflare
3、复制以下代码部署到cloudflare workers中即可
10月3日更新
1、移除已下线的绘图模型
2、增加—ntl(强制关闭翻译),—tl(强制翻译)参数可强制开启或关闭提示词优化翻译
3、增加测速模型,避免one-api/new-api测速时直接调用画图接口
4、优化自定义图像大小,可使用参数:—1:1,—1:2,—3:2,—4:3,—16:9,—9:16
我是隐藏代码
//本项目授权api_key,防止被恶意调用
const API_KEY = "sk-1234567890";
//硅基流动Token列表,每次请求都会随机从列表里取一个Token
const SILICONFLOW_TOKEN_LIST = ["sk-xxxxxxxxx"];
//是否开启提示词翻译、优化功能
const SILICONFLOW_IS_TRANSLATE = true;
//提示词翻译、优化模型
const SILICONFLOW_TRANSLATE_MODEL = "Qwen/Qwen2-7B-Instruct";
//模型映射,设置客户端可用的模型。one-api,new-api在添加渠道时可使用“获取模型列表”功能,一键添加模型
//test为测速模型,方便one-api/new-api测速时使用,避免测速时直接调用画图接口
const CUSTOMER_MODEL_MAP = {
"test": {
body: {
model: "test"
}
},
"FLUX.1-schnell": {
isImage2Image: false,
body: {
model: "black-forest-labs/FLUX.1-schnell",
prompt: "",
image_size: "1024x1024",
seed: 1
},
RATIO_MAP: {
"1:1": "1024x1024",
"1:2": "512x1024",
"3:2": "768x512",
"4:3": "768x1024",
"16:9": "1024x576",
"9:16": "576x1024"
}
},
"stable-diffusion-xl-base-1.0": {
isImage2Image: true,
body: {
model: "stabilityai/stable-diffusion-xl-base-1.0",
prompt: "",
image_size: "1024x1024",
seed: 1,
batch_size: 1,
num_inference_steps: 20,
guidance_scale: 7.5,
image: ""
},
RATIO_MAP: {
"1:1": "1024x1024",
"1:2": "1024x2048",
"3:2": "1536x1024",
"4:3": "1536x2048",
"16:9": "2048x1152",
"9:16": "1152x2048"
}
},
"stable-diffusion-2-1": {
isImage2Image: true,
body: {
model: "stabilityai/stable-diffusion-2-1",
prompt: "",
image_size: "512x512",
seed: 1,
batch_size: 1,
num_inference_steps: 20,
guidance_scale: 7.5,
image: ""
},
RATIO_MAP: {
"1:1": "512x512",
"1:2": "512x1024",
"3:2": "768x512",
"4:3": "768x1024",
"16:9": "1024x576",
"9:16": "576x1024"
}
},
};
async function handleRequest(request) {
try {
if (request.method === "OPTIONS") {
return getResponse("", 204);
}
const authHeader = request.headers.get("Authorization");
if (!authHeader || !authHeader.startsWith("Bearer ") || authHeader.split(" ")[1] !== API_KEY) {
return getResponse("Unauthorized", 401);
}
if (request.url.endsWith("/v1/models")) {
const arrs = [];
Object.keys(CUSTOMER_MODEL_MAP).map(element => arrs.push({
id: element,
object: "model"
}))
const response = {
data: arrs,
success: true
};
return getResponse(JSON.stringify(response), 200);
}
if (request.method !== "POST") {
return getResponse("Only POST requests are allowed", 405);
}
if (!request.url.endsWith("/v1/chat/completions")) {
return getResponse("Not Found", 404);
}
const data = await request.json();
const messages = data.messages || [];
const modelInfo = CUSTOMER_MODEL_MAP[data.model] || CUSTOMER_MODEL_MAP["FLUX.1-schnell"];
const stream = data.stream || false;
const userMessage = messages.reverse().find((msg) => msg.role === "user")?.content;
if (!userMessage) {
return getResponse(JSON.stringify({
error: "未找到用户消息"
}), 400);
}
if (modelInfo.body.model == "test") {
if (stream) {
return handleStreamResponse(userMessage, "", "", data.model, "");
} else {
return handleNonStreamResponse(userMessage, "", "", data.model, "");
}
}
const is_translate = extractTranslate(userMessage);
const size = extractImageSize(userMessage, modelInfo.RATIO_MAP);
const imageUrl = extractImageUrl(userMessage);
const originalPrompt = cleanPromptString(userMessage);
const translatedPrompt = is_translate ? await getPrompt(originalPrompt) : originalPrompt;
let url;
if (!imageUrl) {
url = await generateImage(translatedPrompt, "", modelInfo, size);
} else {
const base64 = await convertImageToBase64(imageUrl);
url = await generateImage(translatedPrompt, base64, modelInfo, size);
}
if (!url) {
url = "https://pic.netbian.com/uploads/allimg/240808/192001-17231160015724.jpg";
}
if (stream) {
return handleStreamResponse(originalPrompt, translatedPrompt, size, data.model, url);
} else {
return handleNonStreamResponse(originalPrompt, translatedPrompt, size, data.model, url);
}
} catch (error) {
return getResponse(JSON.stringify({
error: `处理请求失败: ${error.message}`
}), 500);
}
}
async function generateImage(translatedPrompt, base64Image, modelInfo, imageSize) {
const jsonBody = modelInfo.body;
jsonBody.prompt = translatedPrompt;
jsonBody.imageSize = imageSize;
if (modelInfo.isImage2Image && base64Image) {
jsonBody.image = base64Image;
}
return await getImageUrl("https://api.siliconflow.cn/v1/image/generations", jsonBody);
}
function handleStreamResponse(originalPrompt, translatedPrompt, size, model, imageUrl) {
const uniqueId = `chatcmpl-${Date.now()}`;
const createdTimestamp = Math.floor(Date.now() / 1000);
const systemFingerprint = "fp_" + Math.random().toString(36).substr(2, 9);
const content = `🎨 原始提示词:${originalPrompt}\n` +
`🌐 翻译后的提示词:${translatedPrompt}\n` +
`📐 图像规格:${size}\n` +
`🌟 图像生成成功!\n` +
`以下是结果:\n\n` +
``;
const responsePayload = {
id: uniqueId,
object: "chat.completion.chunk",
created: createdTimestamp,
model: model,
system_fingerprint: systemFingerprint,
choices: [{
index: 0,
delta: {
content: content,
},
finish_reason: "stop",
}, ],
};
const dataString = JSON.stringify(responsePayload);
return new Response(`data: ${dataString}\n\n`, {
status: 200,
headers: {
"Content-Type": "text/event-stream",
'Access-Control-Allow-Origin': '*',
"Access-Control-Allow-Headers": '*',
},
});
}
function handleNonStreamResponse(originalPrompt, translatedPrompt, size, model, imageUrl) {
const uniqueId = `chatcmpl-${Date.now()}`;
const createdTimestamp = Math.floor(Date.now() / 1000);
const systemFingerprint = "fp_" + Math.random().toString(36).substr(2, 9);
const content = `🎨 原始提示词:${originalPrompt}\n` +
`🌐 翻译后的提示词:${translatedPrompt}\n` +
`📐 图像规格:${size}\n` +
`🌟 图像生成成功!\n` +
`以下是结果:\n\n` +
``;
const response = {
id: uniqueId,
object: "chat.completion",
created: createdTimestamp,
model: model,
system_fingerprint: systemFingerprint,
choices: [{
index: 0,
message: {
role: "assistant",
content: content
},
finish_reason: "stop"
}],
usage: {
prompt_tokens: translatedPrompt.length,
completion_tokens: content.length,
total_tokens: translatedPrompt.length + content.length
}
};
return getResponse(JSON.stringify(response), 200);
}
function getResponse(resp, status) {
return new Response(resp, {
status: status,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': '*'
}
});
}
async function getPrompt(prompt) {
const requestBodyJson = {
model: SILICONFLOW_TRANSLATE_MODEL,
messages: [{
role: "system",
content: `作为 Stable Diffusion Prompt 提示词专家,您将从关键词中创建提示,通常来自 Danbooru 等数据库。
提示通常描述图像,使用常见词汇,按重要性排列,并用逗号分隔。避免使用"-"或".",但可以接受空格和自然语言。避免词汇重复。
为了强调关键词,请将其放在括号中以增加其权重。例如,"(flowers)"将'flowers'的权重增加1.1倍,而"(((flowers)))"将其增加1.331倍。使用"(flowers:1.5)"将'flowers'的权重增加1.5倍。只为重要的标签增加权重。
提示包括三个部分:**前缀**(质量标签+风格词+效果器)+ **主题**(图像的主要焦点)+ **场景**(背景、环境)。
* 前缀影响图像质量。像"masterpiece"、"best quality"、"4k"这样的标签可以提高图像的细节。像"illustration"、"lensflare"这样的风格词定义图像的风格。像"bestlighting"、"lensflare"、"depthoffield"这样的效果器会影响光照和深度。
* 主题是图像的主要焦点,如角色或场景。对主题进行详细描述可以确保图像丰富而详细。增加主题的权重以增强其清晰度。对于角色,描述面部、头发、身体、服装、姿势等特征。
* 场景描述环境。没有场景,图像的背景是平淡的,主题显得过大。某些主题本身包含场景(例如建筑物、风景)。像"花草草地"、"阳光"、"河流"这样的环境词可以丰富场景。你的任务是设计图像生成的提示。请按照以下步骤进行操作:
1. 我会发送给您一个图像场景。需要你生成详细的图像描述
2. 图像描述必须是英文,输出为Positive Prompt。
示例:
我发送:二战时期的护士。
您回复只回复:
A WWII-era nurse in a German uniform, holding a wine bottle and stethoscope, sitting at a table in white attire, with a table in the background, masterpiece, best quality, 4k, illustration style, best lighting, depth of field, detailed character, detailed environment.`
},
{
role: "user",
content: prompt
}
],
stream: false,
max_tokens: 512,
temperature: 0.7,
top_p: 0.7,
top_k: 50,
frequency_penalty: 0.5,
n: 1
};
const apiUrl = "https://api.siliconflow.cn/v1/chat/completions";
const response = await postRequest(apiUrl, requestBodyJson);
if (response.ok) {
const jsonResponse = await response.json();
const res = jsonResponse.choices[0].message.content;
return res;
} else {
return prompt;
}
}
async function getImageUrl(apiUrl, jsonBody) {
const response = await postRequest(apiUrl, jsonBody);
if (!response.ok) {
throw new Error('Unexpected response ' + response.status);
}
const jsonResponse = await response.json();
return jsonResponse.images[0].url;
}
async function postRequest(apiUrl, jsonBody) {
const token = SILICONFLOW_TOKEN_LIST[Math.floor(Math.random() * SILICONFLOW_TOKEN_LIST.length)];
const response = await fetch(apiUrl, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
'Accept': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify(jsonBody)
});
return response;
}
function extractImageSize(prompt, RATIO_MAP) {
const match = prompt.match(/---(\d+:\d+)/);
return match ? RATIO_MAP[match[1].trim()] || "1024x1024" : "1024x1024";
}
function extractImageUrl(prompt) {
const regex = /(https?:\/\/[^\s]+?\.(?:png|jpe?g|gif|bmp|webp|svg))/i;
const match = prompt.match(regex);
return match ? match[0] : null;
}
function extractTranslate(prompt) {
const match = prompt.match(/---n?tl/);
if (match && match[0]) {
if (match[0] == "---ntl") {
return false;
} else if (match[0] == "---tl") {
return true;
}
}
return SILICONFLOW_IS_TRANSLATE;
}
function cleanPromptString(prompt) {
return prompt.replace(/---\d+:\d+/, "").replace(/---n?tl/, "").replace(/https?:\/\/\S+\.(?:png|jpe?g|gif|bmp|webp|svg)/gi, "").trim();
}
async function convertImageToBase64(imageUrl) {
const response = await fetch(imageUrl);
if (!response.ok) {
throw new Error('Failed to download image');
}
const arrayBuffer = await response.arrayBuffer();
const base64Image = arrayBufferToBase64(arrayBuffer);
return `data:image/webp;base64,${base64Image}`;
}
function arrayBufferToBase64(buffer) {
let binary = '';
const bytes = new Uint8Array(buffer);
const len = bytes.byteLength;
for (let i = 0; i < len; i++) {
binary += String.fromCharCode(bytes[i]);
}
return btoa(binary);
}
addEventListener('fetch', event => {
event.respondWith(handleRequest(event.request));
});