【一键部署教程】Fal画图API转Open格式,适配openwebui/next/cherry等

首先感谢论坛大佬的资料分享

教程说明

  • fal画图API转OpenAI接口/v1/chat格式
  • 兼容openwebui、nextchat、chatbox、cherry等
  • CF部署
  • 可以自定义KEY
  • 如果你有多个朋友,可以把他们的fal秘钥也“抢过来”放代码里轮训

代码如下

export default {
  async fetch(request, env, ctx) {
    const url = new URL(request.url);
    const path = url.pathname;
    // Handle different endpoints
    if (path === '/v1/chat/completions' && request.method === 'POST') {
      return await handleChatCompletions(request, env, ctx); // Pass ctx here
    } else if (path === '/v1/images/generations' && request.method === 'POST') {
      return await handleImageGenerations(request, env, ctx); // Pass ctx here
    } else if (path === '/v1/models' && request.method === 'GET') {
      return await listModels();
    } else {
      return new Response(JSON.stringify({
        error: {
          message: "Not Found",
          type: "not_found_error"
        }
      }), {
        status: 404,
        headers: { 'Content-Type': 'application/json' }
      });
    }
  }
};
// Define your array of AI keys that will be used for actual API calls
const AI_KEYS = [
  "你的FAL的key",
  // Add more keys as needed
];
// Define your custom access key that users will use
const CUSTOM_ACCESS_KEY = "你实际请求的key";
// Model URLs configuration
const MODEL_URLS = {
  "FLUX-pro": {
    "submit_url": "https://queue.fal.run/fal-ai/flux-pro/v1.1-ultra",
    "status_base_url": "https://queue.fal.run/fal-ai/flux-pro"
  },
  "recraft-v3": {
    "submit_url": "https://queue.fal.run/fal-ai/recraft-v3",
    "status_base_url": "https://queue.fal.run/fal-ai/recraft-v3"
  },
  "FLUX-1.1-pro": {
    "submit_url": "https://queue.fal.run/fal-ai/flux-pro/v1.1",
    "status_base_url": "https://queue.fal.run/fal-ai/flux-pro"
  },
  "ideogram": {
    "submit_url": "https://queue.fal.run/fal-ai/ideogram/v2",
    "status_base_url": "https://queue.fal.run/fal-ai/ideogram"
  },
  "dall-e-3": {
    "submit_url": "https://queue.fal.run/fal-ai/flux/dev",
    "status_base_url": "https://queue.fal.run/fal-ai/flux"
  }
};
/**
* Get a random key from the AI_KEYS array
*/
function getRandomApiKey() {
  const randomIndex = Math.floor(Math.random() * AI_KEYS.length);
  return AI_KEYS[randomIndex];
}
/**
* Extract API key from Authorization header and validate it
*/
function extractAndValidateApiKey(request) {
  const authHeader = request.headers.get('Authorization') || '';
  let userKey;
  if (authHeader.startsWith('Bearer ')) {
    userKey = authHeader.substring(7);
  } else if (authHeader.startsWith('Key ')) {
    userKey = authHeader.substring(4);
  } else {
    userKey = authHeader;
  }
  // Validate the user's key against our custom access key
  if (userKey !== CUSTOM_ACCESS_KEY) {
    return { valid: false, userKey };
  }
  // Return a randomly selected AI key for actual API calls
  const randomApiKey = getRandomApiKey();
  console.log(`Selected random API key: ${randomApiKey.substring(0, 3)}...${randomApiKey.length > 6 ? randomApiKey.substring(randomApiKey.length - 3) : ''}`);
  return { valid: true, userKey, apiKey: randomApiKey };
}
/**
* Creates a streaming response by chunk encoding individual messages
*/
function createStreamingResponse(requestId, model, imageUrls, prompt) {
  // Create a readable stream to return SSE data
  const stream = new ReadableStream({
    start(controller) {
      // First, send the intro message
      const introMessage = {
        id: `chatcmpl-${requestId}`,
        object: "chat.completion.chunk",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [
          {
            index: 0,
            delta: {
              role: "assistant"
            },
            finish_reason: null
          }
        ]
      };
      controller.enqueue(`data: ${JSON.stringify(introMessage)}\n\n`);
      // Function to send content chunks
      const sendContentChunk = (content) => {
        const chunk = {
          id: `chatcmpl-${requestId}`,
          object: "chat.completion.chunk",
          created: Math.floor(Date.now() / 1000),
          model: model,
          choices: [
            {
              index: 0,
              delta: {
                content: content
              },
              finish_reason: null
            }
          ]
        };
        controller.enqueue(`data: ${JSON.stringify(chunk)}\n\n`);
      };
      // Split message into logical chunks
      const baseText = `图像生成成功,请耐心等待加载!`;
      // Send base text first
      sendContentChunk(baseText + "\n\n");
      // Send each image URL
      for (let i = 0; i < imageUrls.length; i++) {
        const imageText = `![Generated Image ${i+1}](${imageUrls[i]})`;
        if (i > 0) {
          sendContentChunk("\n\n");
        }
        sendContentChunk(imageText);
      }
      // Send the final message with finish_reason
      const finalMessage = {
        id: `chatcmpl-${requestId}`,
        object: "chat.completion.chunk",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [
          {
            index: 0,
            delta: {},
            finish_reason: "stop"
          }
        ]
      };
      controller.enqueue(`data: ${JSON.stringify(finalMessage)}\n\n`);
      // Send the [DONE] message to indicate the stream is complete
      controller.enqueue(`data: [DONE]\n\n`);
      controller.close();
    }
  });
  return new Response(stream, {
    headers: {
      'Content-Type': 'text/event-stream',
      'Cache-Control': 'no-cache',
      'Connection': 'keep-alive',
      'Transfer-Encoding': 'chunked'
    }
  });
}
/**
* Handle chat completions endpoint
*/
async function handleChatCompletions(request, env, ctx) { // Receive ctx here
  const { valid, userKey, apiKey } = extractAndValidateApiKey(request);
  if (!valid) {
    console.log("Invalid API key provided");
    return new Response(JSON.stringify({
      error: {
        message: "Invalid API key. Please provide the correct key in the Authorization header.",
        type: "authentication_error"
      }
    }), {
      status: 401,
      headers: { 'Content-Type': 'application/json' }
    });
  }
  let openaiRequest;
  try {
    openaiRequest = await request.json();
  } catch (error) {
    return new Response(JSON.stringify({
      error: {
        message: "Missing or invalid request body",
        type: "invalid_request_error"
      }
    }), {
      status: 400,
      headers: { 'Content-Type': 'application/json' }
    });
  }
  const messages = openaiRequest.messages || [];
  const model = openaiRequest.model || 'dall-e-3';  // Default
  const stream = openaiRequest.stream === true;  // Check for streaming request
  console.log(`Stream mode: ${stream ? 'enabled' : 'disabled'}`);
  // Extract prompt from the last user message
  let prompt = "";
  for (let i = messages.length - 1; i >= 0; i--) {
    if (messages[i].role === 'user') {
      prompt = messages[i].content;
      break;
    }
  }
  if (!prompt) {
    const defaultMessage = "I can generate images. Describe what you'd like.";
    if (stream) {
      // Return a streaming response with a default message
      return createStreamingDefaultResponse(model, defaultMessage);
    }
    const completionsResponse = {
      id: `chatcmpl-${Date.now()}`,
      object: "chat.completion",
      created: Math.floor(Date.now() / 1000),
      model: model,
      choices: [
        {
          index: 0,
          message: {
            role: "assistant",
            content: defaultMessage
          },
          finish_reason: "stop"
        }
      ],
      usage: {
        prompt_tokens: Math.floor(JSON.stringify(messages).length / 4),
        completion_tokens: 20,
        total_tokens: Math.floor(JSON.stringify(messages).length / 4) + 20
      }
    };
    return new Response(JSON.stringify(completionsResponse), {
      headers: { 'Content-Type': 'application/json' }
    });
  }
  console.log(`Extracted image prompt: ${prompt}`);
  const n = 1;
  const falRequest = { prompt: prompt, num_images: n };
  console.log("Making request to Fal API...");
  // Get the correct URLs based on the model
  const modelUrls = MODEL_URLS[model] || MODEL_URLS["dall-e-3"];
  const falSubmitUrl = modelUrls.submit_url;
  const falStatusBaseUrl = modelUrls.status_base_url;
  console.log(`Using model: ${model}, Submit URL: ${falSubmitUrl}, Status Base URL: ${falStatusBaseUrl}`);
  try {
    // Use the randomly selected API key from our pool
    const headers = {
      "Authorization": `Key ${apiKey}`,
      "Content-Type": "application/json"
    };
    console.log(`Using random API key: ${apiKey.substring(0, 3)}...${apiKey.length > 6 ? apiKey.substring(apiKey.length - 3) : ''}`);
    console.log(`Request URL: ${falSubmitUrl}`);
    console.log(`Payload: ${JSON.stringify(falRequest)}`);
    // Submit request to Fal API
    const falResponse = await fetch(falSubmitUrl, {
      method: 'POST',
      headers: headers,
      body: JSON.stringify(falRequest)
    });
    console.log(`Fal API response status: ${falResponse.status}`);
    const responseText = await falResponse.text();
    console.log(`Fal API response: ${responseText.substring(0, 200)}...`);
    if (falResponse.status !== 200) {
      let errorMessage = responseText;
      try {
        const errorData = JSON.parse(responseText);
        errorMessage = errorData.error?.message || responseText;
      } catch (e) {
        // Keep original errorMessage if JSON parsing fails
      }
      console.log(`Fal API error: ${falResponse.status}, ${errorMessage}`);
      if (falResponse.status === 401 || falResponse.status === 403) {
        return new Response(JSON.stringify({
          error: {
            message: `Authentication error with Fal API: ${errorMessage}`,
            type: "invalid_api_key",
            code: falResponse.status
          }
        }), {
          status: 401,
          headers: { 'Content-Type': 'application/json' }
        });
      }
      return new Response(JSON.stringify({
        error: {
          message: `Fal API error: ${errorMessage}`,
          type: "fal_api_error",
          code: falResponse.status
        }
      }), {
        status: 500,
        headers: { 'Content-Type': 'application/json' }
      });
    }
    const falData = JSON.parse(responseText);
    const requestId = falData.request_id;
    if (!requestId) {
      console.log("No request_id found in Fal response.");
      return new Response(JSON.stringify({
        error: {
          message: "Missing request_id",
          type: "fal_api_error"
        }
      }), {
        status: 500,
        headers: { 'Content-Type': 'application/json' }
      });
    }
    console.log(`Got request_id: ${requestId}`);
    const imageUrls = [];
    const maxAttempts = 30; // Reduce polling attempts for Workers
    // If streaming, we need to start the response early with initial chunks
    if (stream) {
      // For streaming, we'll use a TransformStream which allows us to start returning
      // data to the client immediately while continuing our polling process
      const { readable, writable } = new TransformStream();
      const encoder = new TextEncoder();
      const writer = writable.getWriter();
      // Start responding with initial chunks
      const introMessage = {
        id: `chatcmpl-${requestId}`,
        object: "chat.completion.chunk",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [
          {
            index: 0,
            delta: {
              role: "assistant"
            },
            finish_reason: null
          }
        ]
      };
      writer.write(encoder.encode(`data: ${JSON.stringify(introMessage)}\n\n`));
      // Start the polling in the background without blocking response
      ctx.waitUntil((async () => { // Now ctx is available
        let attempt = 0;
        let imageGenerated = false;
        while (attempt < maxAttempts && !imageGenerated) {
          try {
            const statusUrl = `${falStatusBaseUrl}/requests/${requestId}/status`;
            const resultUrl = `${falStatusBaseUrl}/requests/${requestId}`;
            const statusResponse = await fetch(statusUrl, {
              headers: { "Authorization": `Key ${apiKey}` }
            });
            if (statusResponse.status === 200) {
              const statusData = await statusResponse.json();
              const status = statusData.status;
              if (status === "FAILED") {
                const errorChunk = {
                  id: `chatcmpl-${requestId}`,
                  object: "chat.completion.chunk",
                  created: Math.floor(Date.now() / 1000),
                  model: model,
                  choices: [
                    {
                      index: 0,
                      delta: { content: "Unable to generate an image. Try a different description." },
                      finish_reason: null
                    }
                  ]
                };
                writer.write(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`));
                break;
              }
              if (status === "COMPLETED") {
                const resultResponse = await fetch(resultUrl, {
                  headers: { "Authorization": `Key ${apiKey}` }
                });
                if (resultResponse.status === 200) {
                  const resultData = await resultResponse.json();
                  if ("images" in resultData) {
                    const images = resultData.images || [];
                    for (const img of images) {
                      if (img && typeof img === 'object' && "url" in img) {
                        imageUrls.push(img.url);
                      }
                    }
                  }
                  if (imageUrls.length > 0) {
                    imageGenerated = true;
                    // Send content chunks
                    writer.write(encoder.encode(`data: ${JSON.stringify({
                      id: `chatcmpl-${requestId}`,
                      object: "chat.completion.chunk",
                      created: Math.floor(Date.now() / 1000),
                      model: model,
                      choices: [{ index: 0, delta: { content: `图像生成成功\n\n` }, finish_reason: null }]
                    })}\n\n`));
                    // Send each image URL as a separate chunk
                    for (let i = 0; i < imageUrls.length; i++) {
                      if (i > 0) {
                        writer.write(encoder.encode(`data: ${JSON.stringify({
                          id: `chatcmpl-${requestId}`,
                          object: "chat.completion.chunk",
                          created: Math.floor(Date.now() / 1000),
                          model: model,
                          choices: [{ index: 0, delta: { content: "\n\n" }, finish_reason: null }]
                        })}\n\n`));
                      }
                      writer.write(encoder.encode(`data: ${JSON.stringify({
                        id: `chatcmpl-${requestId}`,
                        object: "chat.completion.chunk",
                        created: Math.floor(Date.now() / 1000),
                        model: model,
                        choices: [{ index: 0, delta: { content: `![Generated Image ${i+1}](${imageUrls[i]})` }, finish_reason: null }]
                      })}\n\n`));
                    }
                  }
                }
              }
            }
          } catch (e) {
            console.log(`Error during polling: ${e.toString()}`);
          }
          if (!imageGenerated) {
            await new Promise(resolve => setTimeout(resolve, 2000));
            attempt++;
          }
        }
        if (!imageGenerated) {
          writer.write(encoder.encode(`data: ${JSON.stringify({
            id: `chatcmpl-${requestId}`,
            object: "chat.completion.chunk",
            created: Math.floor(Date.now() / 1000),
            model: model,
            choices: [{ index: 0, delta: { content: "Unable to generate an image in time. Please try again with a different description." }, finish_reason: null }]
          })}\n\n`));
        }
        // Send final messages
        writer.write(encoder.encode(`data: ${JSON.stringify({
          id: `chatcmpl-${requestId}`,
          object: "chat.completion.chunk",
          created: Math.floor(Date.now() / 1000),
          model: model,
          choices: [{ index: 0, delta: {}, finish_reason: "stop" }]
        })}\n\n`));
        writer.write(encoder.encode("data: [DONE]\n\n"));
        writer.close();
      })());
      return new Response(readable, {
        headers: {
          'Content-Type': 'text/event-stream',
          'Cache-Control': 'no-cache',
          'Connection': 'keep-alive'
        }
      });
    }
    // For non-streaming requests, implement polling as before
    let attempt = 0;
    while (attempt < maxAttempts) {
      console.log(`Polling attempt ${attempt+1}/${maxAttempts}`);
      try {
        // Construct the correct status and result URLs
        const statusUrl = `${falStatusBaseUrl}/requests/${requestId}/status`;
        const resultUrl = `${falStatusBaseUrl}/requests/${requestId}`;
        console.log(`Checking status URL: ${statusUrl}`);
        const statusHeaders = {
          "Authorization": `Key ${apiKey}`,
          "Content-Type": "application/json"
        };
        const statusResponse = await fetch(statusUrl, {
          headers: statusHeaders
        });
        console.log(`Status response code: ${statusResponse.status}`);
        if (statusResponse.status === 200) {
          const statusData = await statusResponse.json();
          const status = statusData.status;
          console.log(`Current status: ${status}`);
          if (status === "FAILED") {
            console.log("Generation failed!");
            return new Response(JSON.stringify({
              error: {
                message: "Image generation failed",
                type: "generation_failed"
              }
            }), {
              status: 500,
              headers: { 'Content-Type': 'application/json' }
            });
          }
          if (status === "COMPLETED") {
            console.log(`Fetching result from: ${resultUrl}`);
            const resultResponse = await fetch(resultUrl, {
              headers: {
                "Authorization": `Key ${apiKey}`
              }
            });
            console.log(`Result fetch status: ${resultResponse.status}`);
            if (resultResponse.status === 200) {
              const resultData = await resultResponse.json();
              console.log(`Result data preview: ${JSON.stringify(resultData).substring(0, 200)}...`);
              if ("images" in resultData) {
                const images = resultData.images || [];
                for (const img of images) {
                  if (img && typeof img === 'object' && "url" in img) {
                    imageUrls.push(img.url);
                    console.log(`Found image URL: ${img.url}`);
                  }
                }
              }
              if (imageUrls.length > 0) {
                break;
              } else {
                console.log("Completed, no images found.");
              }
            }
          }
        } else {
          const statusText = await statusResponse.text();
          console.log(`Error checking status: ${statusText}`);
        }
      } catch (e) {
        console.log(`Error during polling: ${e.toString()}`);
      }
      // Wait before next poll - using setTimeout with await
      await new Promise(resolve => setTimeout(resolve, 2000));
      attempt++;
    }
    if (imageUrls.length === 0) {
      console.log("No images found after polling.");
      const completionsResponse = {
        id: `chatcmpl-${requestId}`,
        object: "chat.completion",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [
          {
            index: 0,
            message: {
              role: "assistant",
              content: "Unable to generate an image. Try a different description."
            },
            finish_reason: "stop"
          }
        ],
        usage: {
          prompt_tokens: Math.floor(prompt.length / 4),
          completion_tokens: 30,
          total_tokens: Math.floor(prompt.length / 4) + 30
        }
      };
      return new Response(JSON.stringify(completionsResponse), {
        headers: { 'Content-Type': 'application/json' }
      });
    }
    let content = `图像生成成功\n\n`;
    for (let i = 0; i < imageUrls.length; i++) {
      if (i > 0) {
        content += "\n\n";
      }
      content += `![Generated Image ${i+1}](${imageUrls[i]})`;
    }
    const completionsResponse = {
      id: `chatcmpl-${requestId}`,
      object: "chat.completion",
      created: Math.floor(Date.now() / 1000),
      model: model,
      choices: [
        {
          index: 0,
          message: {
            role: "assistant",
            content: content
          },
          finish_reason: "stop"
        }
      ],
      usage: {
        prompt_tokens: Math.floor(prompt.length / 4),
        completion_tokens: Math.floor(content.length / 4),
        total_tokens: Math.floor(prompt.length / 4) + Math.floor(content.length / 4)
      }
    };
    console.log("Returning OpenAI completions-style response");
    return new Response(JSON.stringify(completionsResponse), {
      headers: { 'Content-Type': 'application/json' }
    });
  } catch (e) {
    console.log(`Exception: ${e.toString()}`);
    return new Response(JSON.stringify({
      error: {
        message: `Server error: ${e.toString()}`,
        type: "server_error"
      }
    }), {
      status: 500,
      headers: { 'Content-Type': 'application/json' }
    });
  }
}
/**
* Create a streaming response with a default message
*/
function createStreamingDefaultResponse(model, message) {
  const requestId = Date.now().toString();
  const stream = new ReadableStream({
    start(controller) {
      // Send role
      controller.enqueue(`data: ${JSON.stringify({
        id: `chatcmpl-${requestId}`,
        object: "chat.completion.chunk",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [{
          index: 0,
          delta: { role: "assistant" },
          finish_reason: null
        }]
      })}\n\n`);
      // Send content
      controller.enqueue(`data: ${JSON.stringify({
        id: `chatcmpl-${requestId}`,
        object: "chat.completion.chunk",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [{
          index: 0,
          delta: { content: message },
          finish_reason: null
        }]
      })}\n\n`);
      // Send finish
      controller.enqueue(`data: ${JSON.stringify({
        id: `chatcmpl-${requestId}`,
        object: "chat.completion.chunk",
        created: Math.floor(Date.now() / 1000),
        model: model,
        choices: [{
          index: 0,
          delta: {},
          finish_reason: "stop"
        }]
      })}\n\n`);
      // Send done
      controller.enqueue(`data: [DONE]\n\n`);
      controller.close();
    }
  });
  return new Response(stream, {
    headers: {
      'Content-Type': 'text/event-stream',
      'Cache-Control': 'no-cache',
      'Connection': 'keep-alive'
    }
  });
}
/**
* Handle image generations endpoint
*/
async function handleImageGenerations(request, env, ctx) { // Receive ctx here
  const { valid, userKey, apiKey } = extractAndValidateApiKey(request);
  if (!valid) {
    return new Response(JSON.stringify({
      error: {
        message: "Invalid API key.",
        type: "authentication_error"
      }
    }), {
      status: 401,
      headers: { 'Content-Type': 'application/json' }
    });
  }
  let openaiRequest;
  try {
    openaiRequest = await request.json();
  } catch (error) {
    return new Response(JSON.stringify({
      error: {
        message: "Missing or invalid request body",
        type: "invalid_request_error"
      }
    }), {
      status: 400,
      headers: { 'Content-Type': 'application/json' }
    });
  }
  const prompt = openaiRequest.prompt || '';
  const n = openaiRequest.n || 1;
  const model = openaiRequest.model || 'dall-e-3';
  const stream = openaiRequest.stream === true;
  // Convert to chat request format and preserve our auth validation results
  const clonedHeaders = new Headers(request.headers);
  if (!clonedHeaders.has('Authorization')) {
    clonedHeaders.set('Authorization', `Key ${userKey}`);
  }
  const modifiedRequest = new Request(request.url, {
    method: 'POST',
    headers: clonedHeaders,
    body: JSON.stringify({
      model: model,
      messages: [{ role: "user", content: prompt }],
      stream: stream
    })
  });
  return handleChatCompletions(modifiedRequest, env, ctx); // Pass ctx
}
/**
* List available models
*/
async function listModels() {
  const models = [
    { id: "dall-e-3", object: "model", created: 1698785189, owned_by: "fal-openai-adapter", permission: [], root: "dall-e-3", parent: null },
    { id: "gpt-4-vision-preview", object: "model", created: 1698785189, owned_by: "fal-openai-adapter", permission: [], root: "gpt-4-vision-preview", parent: null },
    { id: "flux-1.1-ultra", object: "model", created: 1698785189, owned_by: "fal-openai-adapter", permission: [], root: "flux-1.1-ultra", parent: null },
    { id: "recraft-v3", object: "model", created: 1698785189, owned_by: "fal-openai-adapter", permission: [], root: "recraft-v3", parent: null },
    { id: "flux-1.1-pro", object: "model", created: 1698785189, owned_by: "fal-openai-adapter", permission: [], root: "flux-1.1-pro", parent: null },
    { id: "ideogram-v2", object: "model", created: 1698785189, owned_by: "fal-openai-adapter", permission: [], root: "ideogram-v2", parent: null }
  ];
  return new Response(JSON.stringify({
    object: "list",
    data: models
  }), {
    headers: { 'Content-Type': 'application/json' }
  });
}

调用方式

curl --location 'https://你的CF地址/v1/chat/completions' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer 秘钥' \
--data '{
     "model": "FLUX-pro",
     "messages": [{"role": "user", "content": "你的提示词!"}],
     "stream": true
   }'
23 Likes

感谢大佬!

感谢大佬

进来学习

方便使用:+1:

感觉50​:heavy_dollar_sign:似乎很快消耗

lobechat好像不能用,cherry可以,可能是我姿势不对

用起来,加速消耗

好用,支持别的模型不

:bili_040: 这家太贵了留着 pro 和 Ideogram 工作用。硅硅和 Nebius 就如雨后春笋般源源不绝。

代码里写了 好几个模型都支持,具体没研究他官网,有好用的模型可以告诉我

有道理,又学到了

谢谢佬友分享!!!!~~

1 Like

此话题已在最后回复的 30 天后被自动关闭。不再允许新回复。