通过cf worker每月白嫖1000次sd绘图(支持api,支持多key轮询,支持多种绘图样式模型)

本人仅是搬运且编辑发布

本项目由 johnson
基于以下项目二次开发:

【第一弹】【更新】用不完,根本用不完,部署cf worker无限免费绘画,可Api,支持多模型切换 - 资源荟萃 - LINUX DO

【第二弹】用不完,根本用不完,部署cf worker无限免费绘画,可Api,支持多模型切换 - 资源荟萃 - LINUX DO

由于 johnson 是个低调的大佬,我就代劳帮 johnson 发布文章,白嫖stars啦! :tieba_013:


1.我们需要在 Prodiap 注册一个账户,并获取 PRODIA_API_KEY

2.在Cloud Flare中的Workers 和 Pages中创建一个Worker项目并点击编辑代码:

3.复制下方的js项目粘贴至worker中修改相应参数后部署
(如何获取cf_accountID以及cf_token请自行查阅学习)

API_KEY可自定义比如:sk-123456789

//prodia.com API_KEY
const PRODIA_API_KEY = "XXX";

//本项目授权api_key,防止被恶意调用
const API_KEY = "sk-XXX";
//cloudflare账号列表,每次请求都会随机从列表里取一个账号
const CF_ACCOUNT_LIST = [
  { account_id: "cf_accountID", token: "cf_token" }
];
//在你输入的prompt中添加 ---ntl可强制禁止提示词翻译、优化功能
//在你输入的prompt中添加 ---tl可强制开启提示词翻译、优化功能
//是否开启提示词翻译、优化功能
const CF_IS_TRANSLATE = true;
//示词翻译、优化模型
const CF_TRANSLATE_MODEL = "@cf/qwen/qwen1.5-14b-chat-awq";
//模型映射,设置客户端可用的模型。one-api,new-api在添加渠道时可使用"获取模型列表"功能,一键添加模型
const CUSTOMER_MODEL_MAP = {
    "animagineXLV3_v30.safetensors": "animagineXLV3_v30.safetensors [75f2f05b]",
    "devlishphotorealism_sdxl15.safetensors": "devlishphotorealism_sdxl15.safetensors [77cba69f]",
    "dreamshaperXL10_alpha2.safetensors": "dreamshaperXL10_alpha2.safetensors [c8afe2ef]",
    "dynavisionXL_0411.safetensors": "dynavisionXL_0411.safetensors [c39cc051]",
    "juggernautXL_v45.safetensors": "juggernautXL_v45.safetensors [e75f5471]",
    "realismEngineSDXL_v10.safetensors": "realismEngineSDXL_v10.safetensors [af771c3f]",
    "realvisxlV40.safetensors": "realvisxlV40.safetensors [f7fdcb51]",
    "sd_xl_base_1.0.safetensors": "sd_xl_base_1.0.safetensors [be9edd61]",
    "sd_xl_base_1.0_inpainting_0.1.safetensors": "sd_xl_base_1.0_inpainting_0.1.safetensors [5679a81a]",
    "turbovisionXL_v431.safetensors": "turbovisionXL_v431.safetensors [78890989]"
};

/**
 * Handles incoming requests to the Cloudflare Worker.
 * @param {Request} request - The incoming request object.
 * @returns {Response} - The response object.
 * @throws {Error} - If the request is invalid or the response fails.
 */
async function handleRequest(request) {
  try {
    if (request.method === "OPTIONS") {
      return new Response("", {
        status: 204,
        headers: {
          'Access-Control-Allow-Origin': '*',
          "Access-Control-Allow-Headers": '*'
        }
      });
    }

    const authHeader = request.headers.get("Authorization");
    if (!authHeader || !authHeader.startsWith("Bearer ") || authHeader.split(" ")[1] !== API_KEY) {
      return new Response("Unauthorized", { status: 401 });
    }

    if (request.url.endsWith("/v1/models")) {
      const arrs = [];
      Object.keys(CUSTOMER_MODEL_MAP).map(element => arrs.push({ id: element, object: "model" }))
      const response = {
        data: arrs,
        success: true
      };
      return new Response(JSON.stringify(response), {
        headers: {
          'Content-Type': 'application/json',
          'Access-Control-Allow-Origin': '*',
          'Access-Control-Allow-Headers': '*'
        }
      });
    }

    if (request.method !== "POST") {
      return new Response("Only POST requests are allowed", {
        status: 405,
        headers: {
          'Access-Control-Allow-Origin': '*',
          "Access-Control-Allow-Headers": '*'
        }
      });
    }

    if (!request.url.endsWith("/v1/chat/completions")) {
      return new Response("Not Found", {
        status: 404,
        headers: {
          'Access-Control-Allow-Origin': '*',
          "Access-Control-Allow-Headers": '*'
        }
      });
    }

    const data = await request.json();
    const messages = data.messages || [];
    const model = CUSTOMER_MODEL_MAP[data.model] || CUSTOMER_MODEL_MAP["v1-5-inpainting.safetensors"];
    const stream = data.stream || false;
    const userMessage = messages.reverse().find((msg) => msg.role === "user")?.content;
    if (!userMessage) {
      return new Response(JSON.stringify({ error: "未找到用户消息" }), {
        status: 400,
        headers: {
          'Content-Type': 'application/json',
          'Access-Control-Allow-Origin': '*',
          'Access-Control-Allow-Headers': '*'
        }
      });
    }

    const is_translate = extractTranslate(userMessage);
    const originalPrompt = cleanPromptString(userMessage);
    const translatedPrompt = is_translate ? await getPrompt(originalPrompt) : originalPrompt;

    const imageUrl = await generateImageByText(model, translatedPrompt);

    if (stream) {
      return handleStreamResponse(originalPrompt, translatedPrompt,"1024x1024", model, imageUrl);
    } else {
      return handleNonStreamResponse(originalPrompt, translatedPrompt, "1024x1024", model, imageUrl);
    }
  } catch (error) {
    return new Response("Internal Server Error: " + error.message, {
      status: 500,
      headers: {
        'Content-Type': 'application/json',
        'Access-Control-Allow-Origin': '*',
        'Access-Control-Allow-Headers': '*'
      }
    });
  }
}

/**
 * @description
 *  Translate a prompt into a stable diffusion prompt style.
 * @param {string} prompt - The prompt to translate.
 * @returns {Promise<string>} The translated prompt.
 * @throws {Error} If the translation fails.
 */
async function getPrompt(prompt) {
  const requestBodyJson = {
    messages: [
      {
        role: "system",
        content: `作为 Stable Diffusion Prompt 提示词专家,您将从关键词中创建提示,通常来自 Danbooru 等数据库。

        提示通常描述图像,使用常见词汇,按重要性排列,并用逗号分隔。避免使用"-"或".",但可以接受空格和自然语言。避免词汇重复。

        为了强调关键词,请将其放在括号中以增加其权重。例如,"(flowers)"将'flowers'的权重增加1.1倍,而"(((flowers)))"将其增加1.331倍。使用"(flowers:1.5)"将'flowers'的权重增加1.5倍。只为重要的标签增加权重。

        提示包括三个部分:**前缀**(质量标签+风格词+效果器)+ **主题**(图像的主要焦点)+ **场景**(背景、环境)。

        *   前缀影响图像质量。像"masterpiece"、"best quality"、"4k"这样的标签可以提高图像的细节。像"illustration"、"lensflare"这样的风格词定义图像的风格。像"bestlighting"、"lensflare"、"depthoffield"这样的效果器会影响光照和深度。

        *   主题是图像的主要焦点,如角色或场景。对主题进行详细描述可以确保图像丰富而详细。增加主题的权重以增强其清晰度。对于角色,描述面部、头发、身体、服装、姿势等特征。

        *   场景描述环境。没有场景,图像的背景是平淡的,主题显得过大。某些主题本身包含场景(例如建筑物、风景)。像"花草草地"、"阳光"、"河流"这样的环境词可以丰富场景。你的任务是设计图像生成的提示。请按照以下步骤进行操作:

        1.  我会发送给您一个图像场景。需要你生成详细的图像描述
        2.  图像描述必须是英文,输出为Positive Prompt。

        示例:

        我发送:二战时期的护士。
        您回复只回复:
        A WWII-era nurse in a German uniform, holding a wine bottle and stethoscope, sitting at a table in white attire, with a table in the background, masterpiece, best quality, 4k, illustration style, best lighting, depth of field, detailed character, detailed environment.`
      },
      {
        role: "user",
        content: prompt
      }
    ]
  };

  const response = await postRequest(CF_TRANSLATE_MODEL, requestBodyJson);

  if (!response.ok) {
    return prompt;
  }

  const jsonResponse = await response.json();
  const res = jsonResponse.result.response;
  return res;
}

/**
 * Generate an image from a given text prompt using the Prodia AI API
 * @param {string} model - The name of the AI model to use for image generation
 * @param {string} prompt - The text prompt to generate an image from
 * @returns {string} - The URL of the generated image
 * @throws {Error} - If the image generation fails
 * @see https://docs.prodia.ai/docs/api-reference
 */
async function generateImageByText(model, prompt) {
  // First request to generate the image
  const generateOptions = {
    method: 'POST',
    headers: {
      accept: 'application/json',
      'content-type': 'application/json',
      'X-Prodia-Key': PRODIA_API_KEY
    },
    body: JSON.stringify({
      model: model,
      prompt: prompt,
      negative_prompt: 'low resolution, blurry, distorted features, wrong fingers, extra numbers, watermarks, ugly, distorted, deformed, deformed, repetitive, missing arms and legs, multiple hands and legs, incomplete limbs, long neck, cross-eyed, glazed eyes, lax eyes, squinting, deformed eyes',
      steps: 20,
      cfg_scale: 7,
      seed: -1,
      sampler: 'DPM++ 2M Karras',
      width: 1024,
      height: 1024
    })
  };

  try {
    const generateResponse = await fetch('https://api.prodia.com/v1/sdxl/generate', generateOptions);
    const generateData = await generateResponse.json();

    if (generateData.status !== 'queued') {
      throw new Error('Failed to queue the job');
    }

    const jobId = generateData.job;

    // Polling for the job status
    const statusOptions = {
      method: 'GET',
      headers: {
        accept: 'application/json',
        'X-Prodia-Key': PRODIA_API_KEY
      }
    };

    let statusData;
    while (true) {
      const statusResponse = await fetch(`https://api.prodia.com/v1/job/${jobId}`, statusOptions);
      statusData = await statusResponse.json();

      if (statusData.status === 'succeeded') {
        return statusData.imageUrl;
      } else if (statusData.status === 'failed') {
        throw new Error('Image generation failed');
      }

      // Wait for a short period before checking again
      await new Promise(resolve => setTimeout(resolve, 5000));
    }
  } catch (error) {
    return "图像生成或转换失败,请检查!" + error.message;
  }
}

/**
 * Return a streaming response with the generated image.
 *
 * The response will contain the generated image as a base64 encoded string
 * and the original and translated prompts as text. The response will be sent
 * as a Server-Sent Event (SSE) stream, with the `data` event containing the
 * response payload.
 *
 * @param {string} originalPrompt - The original prompt given to the model.
 * @param {string} translatedPrompt - The translated prompt given to the model.
 * @param {string} size - The size of the generated image.
 * @param {string} model - The model used to generate the image.
 * @param {string} imageUrl - The URL of the generated image.
 * @returns {Response} - The response object.
 */
function handleStreamResponse(originalPrompt, translatedPrompt, size, model, imageUrl) {
  const uniqueId = `chatcmpl-${Date.now()}`;
  const createdTimestamp = Math.floor(Date.now() / 1000);
  const systemFingerprint = "fp_" + Math.random().toString(36).substr(2, 9);
  const content = `🎨 原始提示词:${originalPrompt}\n` +
    `🌐 翻译后的提示词:${translatedPrompt}\n` +
    `📐 图像规格:${size}\n` +
    `🌟 图像生成成功!\n` +
    `以下是结果:\n\n` +
    `![生成的图像](${imageUrl})`;

  const responsePayload = {
    id: uniqueId,
    object: "chat.completion.chunk",
    created: createdTimestamp,
    model: model,
    system_fingerprint: systemFingerprint,
    choices: [
      {
        index: 0,
        delta: {
          content: content,
        },
        finish_reason: "stop",
      },
    ],
  };

  const dataString = JSON.stringify(responsePayload);

  return new Response(`data: ${dataString}\n\n`, {
    status: 200,
    headers: {
      "Content-Type": "text/event-stream",
      'Access-Control-Allow-Origin': '*',
      "Access-Control-Allow-Headers": '*',
    },
  });
}

/**
 * Return a non-streaming response with the generated image.
 *
 * The response will contain the generated image as a base64 encoded string
 * and the original and translated prompts as text.
 *
 * @param {string} originalPrompt - The original prompt given to the model.
 * @param {string} translatedPrompt - The translated prompt given to the model.
 * @param {string} size - The size of the generated image (e.g. 1024x1024).
 * * @param {string} model - The model used to generate the image (e.g. @cf/stabilityai/stable-diffusion-xl-base-1.0).
 * @param {string} imageUrl - The URL of the generated image.
 * @return {Response} - The response object with the generated image and prompts.
 */
function handleNonStreamResponse(originalPrompt, translatedPrompt, size, model, imageUrl) {
  const uniqueId = `chatcmpl-${Date.now()}`;
  const createdTimestamp = Math.floor(Date.now() / 1000);
  const systemFingerprint = "fp_" + Math.random().toString(36).substr(2, 9);
  const content = `🎨 原始提示词:${originalPrompt}\n` +
    `🌐 翻译后的提示词:${translatedPrompt}\n` +
    `📐 图像规格:${size}\n` +
    `🌟 图像生成成功!\n` +
    `以下是结果:\n\n` +
    `![生成的图像](${imageUrl})`;

  const response = {
    id: uniqueId,
    object: "chat.completion",
    created: createdTimestamp,
    model: model,
    system_fingerprint: systemFingerprint,
    choices: [{
      index: 0,
      message: {
        role: "assistant",
        content: content
      },
      finish_reason: "stop"
    }],
    usage: {
      prompt_tokens: translatedPrompt.length,
      completion_tokens: content.length,
      total_tokens: translatedPrompt.length + content.length
    }
  };

  const dataString = JSON.stringify(response);

  return new Response(dataString, {
    status: 200,
    headers: {
      'Content-Type': 'application/json',
      'Access-Control-Allow-Origin': '*',
      'Access-Control-Allow-Headers': '*'
    }
  });
}

/**
 * @description
 * POST request to Cloudflare AI API
 * @param {string} model - AI model name
 * @param {object} jsonBody - JSON object to be sent in the body of the request
 * @returns {Promise<Response>} - Response object
 * @throws {Error} - If response status is not OK
 */
async function postRequest(model, jsonBody) {
  const cf_account = CF_ACCOUNT_LIST[Math.floor(Math.random() * CF_ACCOUNT_LIST.length)];
  const apiUrl = `https://api.cloudflare.com/client/v4/accounts/${cf_account.account_id}/ai/run/${model}`;
  const response = await fetch(apiUrl, {
    method: 'POST',
    headers: {
      'Authorization': `Bearer ${cf_account.token}`,
      'Content-Type': 'application/json'
    },
    body: JSON.stringify(jsonBody)
  });

  if (!response.ok) {
    throw new Error('Unexpected response ' + response.status);
  }
  return response;
}

/**
 * Extract translate flag from prompt string.
 *
 * This function will parse the flag from the given prompt string and return the
 * translate flag. If the flag is not found, it will return the default translate
 * flag set in CF_IS_TRANSLATE.
 *
 * @param {string} prompt The prompt string to parse the flag from.
 * @return {boolean} The translate flag parsed from the prompt string.
 */
function extractTranslate(prompt) {
  const match = prompt.match(/---n?tl/);
  if (match && match[0]) {
    if (match[0] == "---ntl") {
      return false;
    }
    else if (match[0] == "---tl") {
      return true;
    }
  }
  return CF_IS_TRANSLATE;
}

/**
 * Remove translate flag from prompt string.
 *
 * This function will remove the translate flag ("---ntl" or "---tl") from the
 * given prompt string and return the cleaned prompt string.
 *
 * @param {string} prompt The prompt string to clean.
 * @return {string} The cleaned prompt string.
 */
function cleanPromptString(prompt) {
  return prompt.replace(/---n?tl/, "").trim();
}

addEventListener('fetch', event => {
  event.respondWith(handleRequest(event.request));
});

4.在刚刚创建的worker中点击设置,在域和路由中添加自定义域,并填写自己的子域名,如image.xxxx.com
(此处需自行查阅学习如何给cf添加自己的域名)

5.复制自己的域名在任意对话平台或API中转站中添加自定义接口以及自定义模型:
(此处以ChatGPT-Next-Web为例)

以下是所有模型的介绍,可以根据自己的需求来选择模型:

  1. animagineXLV3_v30 :适合想要动画风格的创作者,可能在动画效果上表现优越。
  2. devlishphotorealism_sdxl15 :如果你需要超真实的图像效果,这个模型可能是个好选择,特别适合用于摄影风格的创作。
  3. dreamshaperXL10_alpha2 :注重创意和想象力,适合艺术风格较强的作品。
  4. juggernautXL_v45 :可能在生成大型复杂场景方面表现出色,可以考虑。
  5. realismEngineSDXL_v10 :专注于真实效果的生成,适合需要高度逼真图像的项目。
  6. sd_xl_base_1.0sd_xl_base_1.0_inpainting_0.1 :这些是基础模型,适合多种用途,也可以在需要时进行细微调整。
  7. turbovisionXL_v431 :可能在速度和效率上有优势,适合需要快速生成的场景。

有关接入云存储:


添加多个key实现无限白嫖:


最新优化:

398 Likes

厉害,mark一下

6 Likes

mark 有时间集成下,目前看鬼谷鸡柳的还够用,不过绘制效果感觉都一般。比较厉害的还是mj ,mj就是收费太贵了。

12 Likes

mark mark

4 Likes

写的不错,点个赞:+1:t2:

7 Likes

啊这,有没有小白完整教程 :sweat_smile:
比如,步骤1,步骤2,几个帖子一起看,完全乱了

5 Likes

我不是标注123了吗我再写一下

2 Likes

厉害,什么时候试一下

6 Likes

修改完了,你再看下

2 Likes

牛批,有空试一下

8 Likes

大佬 :star_struck: 爱了爱了

6 Likes

我记得之前有个CF直接部署的UI,回头看看,一步到位感觉更舒服

5 Likes

等放假了来玩玩,1000次不错哦

9 Likes

太强了!大佬!这就去试试

4 Likes

报错了:Internal Server Error: Unexpected response 400

10 Likes

厉害厉害,学习一下

24 Likes

解决了。
account_id在任意一个域名页面的右下角可以看到。
token 使用 Workers AI 模板创建即可

5 Likes

感谢分享大佬厉害啊

4 Likes

感谢分享 :bili_006:

6 Likes

啊?还有啥能白嫖sd和flux啊

4 Likes