Vercel成全网逆向难度最高的网站?

免费号不是有次数限制吗

1 个赞

常规话题人工智能

Vercel, #chatgpt添加

能2api放到nextchat里用吗

1 个赞

我靠,这厮(4o)的速度是快

1 个赞

这个url是多少呢?

1 个赞

这个

1 个赞

这个之前不就发过逆向了吗 :thinking:

7 个赞

怎么使用嘞

1 个赞

蹲,不会用

1 个赞

有大佬教教怎么用吗?

1 个赞

好快

1 个赞

快的离谱

2 个赞

好快好爽,可惜只是个体验版,只能留在对话框里。始皇快来挑战一下

2 个赞

我去买了个pro。私你 来玩玩

1 个赞

试了,有次数限制吧,还是说ip不够干净

有大佬逆出来了吗,Pro订阅可用opus?

NB,MARK

好快 :hear_no_evil:

尝试C 一下,卡在机器人那边,无法通过

const express = require('express');
const bodyParser = require('body-parser');
const { OpenAI, Configuration } = require('openai');
const uuid = require('uuid');
const randomUseragent = require('random-useragent');
const axios = require('axios');
const base64 = require('js-base64').Base64; 
const vm = require('vm');
const app = express();
app.use(bodyParser.json());

const modelInfo = {
    'replicate/llama70b-v2-chat': {
        id: 'replicate:replicate/llama-2-70b-chat',
        default_params: {
            temperature: 0.75,
            maximumLength: 3000,
            topP: 1,
            repetitionPenalty: 1,
        },
    },
    'a16z-infra/llama7b-v2-chat': {
        id: 'replicate:a16z-infra/llama7b-v2-chat',
        default_params: {
            temperature: 0.75,
            maximumLength: 3000,
            topP: 1,
            repetitionPenalty: 1,
        },
    },
    'a16z-infra/llama13b-v2-chat': {
        id: 'replicate:a16z-infra/llama13b-v2-chat',
        default_params: {
            temperature: 0.75,
            maximumLength: 3000,
            topP: 1,
            repetitionPenalty: 1,
        },
    },
    'replicate/llama-2-70b-chat': {
        id: 'replicate:replicate/llama-2-70b-chat',
        default_params: {
            temperature: 0.75,
            maximumLength: 3000,
            topP: 1,
            repetitionPenalty: 1,
        },
    },
    'bigscience/bloom': {
        id: 'huggingface:bigscience/bloom',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 0.95,
            topK: 4,
            repetitionPenalty: 1.03,
        },
    },
    'google/flan-t5-xxl': {
        id: 'huggingface:google/flan-t5-xxl',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 0.95,
            topK: 4,
            repetitionPenalty: 1.03,
        },
    },
    'EleutherAI/gpt-neox-20b': {
        id: 'huggingface:EleutherAI/gpt-neox-20b',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 0.95,
            topK: 4,
            repetitionPenalty: 1.03,
            stopSequences: [],
        },
    },
    'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
        id: 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
        default_params: {
            maximumLength: 1024,
            typicalP: 0.2,
            repetitionPenalty: 1,
        },
    },
    'OpenAssistant/oasst-sft-1-pythia-12b': {
        id: 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
        default_params: {
            maximumLength: 1024,
            typicalP: 0.2,
            repetitionPenalty: 1,
        },
    },
    'bigcode/santacoder': {
        id: 'huggingface:bigcode/santacoder',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 0.95,
            topK: 4,
            repetitionPenalty: 1.03,
        },
    },
    'command-light-nightly': {
        id: 'cohere:command-light-nightly',
        default_params: {
            temperature: 0.9,
            maximumLength: 1024,
            topP: 1,
            topK: 0,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'command-nightly': {
        id: 'cohere:command-nightly',
        default_params: {
            temperature: 0.9,
            maximumLength: 1024,
            topP: 1,
            topK: 0,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'code-davinci-002': {
        id: 'openai:code-davinci-002',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 1,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'gpt-3.5-turbo': {
        id: 'openai:gpt-3.5-turbo',
        default_params: {
            temperature: 0.7,
            maximumLength: 4096,
            topP: 1,
            topK: 1,
            presencePenalty: 1,
            frequencyPenalty: 1,
            stopSequences: [],
        },
    },
    'gpt-3.5-turbo-16k': {
        id: 'openai:gpt-3.5-turbo-16k',
        default_params: {
            temperature: 0.7,
            maximumLength: 16280,
            topP: 1,
            topK: 1,
            presencePenalty: 1,
            frequencyPenalty: 1,
            stopSequences: [],
        },
    },
    'gpt-3.5-turbo-16k-0613': {
        id: 'openai:gpt-3.5-turbo-16k-0613',
        default_params: {
            temperature: 0.7,
            maximumLength: 16280,
            topP: 1,
            topK: 1,
            presencePenalty: 1,
            frequencyPenalty: 1,
            stopSequences: [],
        },
    },
    'text-ada-001': {
        id: 'openai:text-ada-001',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 1,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'text-babbage-001': {
        id: 'openai:text-babbage-001',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 1,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'text-curie-001': {
        id: 'openai:text-curie-001',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 1,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'text-davinci-002': {
        id: 'openai:text-davinci-002',
        default_params: {
            temperature: 0.5,
            maximumLength: 1024,
            topP: 1,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
    'text-davinci-003': {
        id: 'openai:text-davinci-003',
        default_params: {
            temperature: 0.5,
            maximumLength: 4097,
            topP: 1,
            presencePenalty: 0,
            frequencyPenalty: 0,
            stopSequences: [],
        },
    },
};

async function getAntiBotToken2() {
    const headers = {
        authority: 'sdk.vercel.ai',
        accept: '*/*',
        'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
        'cache-control': 'no-cache',
        pragma: 'no-cache',
        referer: 'https://sdk.vercel.ai/',
        'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"macOS"',
        'user-agent': randomUseragent.getRandom(),
    };

    try {
        const response = await axios.get('https://sdk.vercel.ai/openai.jpeg', { headers, responseType: 'text' });

        let rawData;
        try {
            rawData = JSON.parse(base64.decode(response.data));
        } catch (error) {
            throw new Error("Failed to parse response as base64-encoded JSON.");
        }

        const script = new vm.Script(`(function(){const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return \`<font>\${this}</font>\`}; return (${rawData.c})(${rawData.a})})();`);
        const context = vm.createContext();
        const result = script.runInContext(context);

        const rawToken = JSON.stringify({ r: result, t: rawData.t });
        return base64.encode(Buffer.from(rawToken, 'utf-16le'));
    } catch (error) {
        console.error("Failed to get anti-bot token:", error.message);
        throw error;
    }
}
async function getAntiBotToken() {
    const headers = {
        authority: 'sdk.vercel.ai',
        accept: '*/*',
        'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
        'cache-control': 'no-cache',
        pragma: 'no-cache',
        referer: 'https://sdk.vercel.ai/',
        'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"macOS"',
        'user-agent': randomUseragent.getRandom(),
    };

    try {
        const response = await axios.get('https://sdk.vercel.ai/openai.jpeg', { headers, responseType: 'text' });
        let rawData;

        try {
            rawData = JSON.parse(Base64.decode(response.data));
        } catch (error) {
            throw new Error("Failed to parse response as base64-encoded JSON.");
        }

        const scriptCode = `(function() {
            const globalThis = { marker: "mark" };
            String.prototype.fontcolor = function() { return \`<font>\${this}</font>\`; };
            return (${rawData.c})(${rawData.a});
        })();`;

        const script = new vm.Script(scriptCode);
        const context = vm.createContext();
        const result = script.runInContext(context);

        const rawToken = JSON.stringify({ r: result, t: rawData.t });
        const encodedToken = Base64.encode(Buffer.from(rawToken, 'utf-16le').toString('binary'));

        return encodedToken;
    } catch (error) {
        console.error("Failed to get anti-bot token:", error.message);
        throw error;
    }
}


class Vercel {
    static async *createCompletion(model, messages, stream, proxy = null, kwargs = {}) {
        if (!model) {
            model = 'gpt-3.5-turbo';
        } else if (!modelInfo[model]) {
            throw new Error(`Vercel does not support ${model}`);
        }

        const headers = {
            authority: 'sdk.vercel.ai',
            accept: '*/*',
            'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
            'cache-control': 'no-cache',
            'content-type': 'application/json',
            'custom-encoding': await getAntiBotToken(),
            origin: 'https://sdk.vercel.ai',
            pragma: 'no-cache',
            referer: 'https://sdk.vercel.ai/',
            'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"macOS"',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'same-origin',
            'user-agent': randomUseragent.getRandom(),
        };

        const jsonData = {
            model: modelInfo[model].id,
            messages: messages,
            playgroundId: uuid.v4(),
            chatIndex: 0,
            ...modelInfo[model].default_params,
            ...kwargs,
        };

        const maxRetries = kwargs.max_retries || 20;
        for (let i = 0; i < maxRetries; i++) {
            try {
                const response = await axios.post(
                    'https://sdk.vercel.ai/api/generate',
                    jsonData,
                    { headers, proxy: proxy ? { host: proxy } : null, responseType: 'stream' }
                );

                for await (const chunk of response.data) {
                    yield chunk.toString();
                }
                break;
            } catch (error) {
                if (i === maxRetries - 1) {
                    throw new Error(`Failed after ${maxRetries} retries: ${error.message}`);
                }
            }
        }
    }
}

app.post('/v1/chat/completions', async (req, res) => {
    const { model, messages, stream, proxy, ...kwargs } = req.body;

    try {
        const completion = Vercel.createCompletion(model, messages, stream, proxy, kwargs);

        res.writeHead(200, { 'Content-Type': 'text/plain; charset=utf-8' });

        for await (const token of completion) {
            res.write(token);
        }

        res.end();
    } catch (error) {
        if (!res.headersSent) {
            res.status(500).send({ error: error.message });
        }
    }
});

const PORT = process.env.PORT || 3040;
app.listen(PORT, () => {
    console.log(`Server is running on port ${PORT}`);
});