让所有chat模型支持代码补全

补全效果 玩的开心

from flask import Flask, request, Response, stream_with_context
import json
import time
import uuid
app = Flask(__name__)
# logging.basicConfig(level=logging.DEBUG)

import Agently
agent_factory = (
    Agently.AgentFactory()
    .set_settings("current_model", "OAIClient")
    .set_settings("model.OAIClient.auth", {"api_key": "sk-UwGrkEwoeCAwf6Bc3d10C21aCaD34434A23d426a9aB11677"})
    .set_settings("model.OAIClient.options", {"model": "Doubao-lite-4k"})
    .set_settings("model.OAIClient.url", "http://192.168.100.1:18300/v1")
)

agent = agent_factory.create_agent()

def complete_code(djson):
    prompt = djson['prompt']
    suffix = djson['suffix']
    max_tokens = djson['max_tokens']
    temperature = djson['temperature']
    top_p = djson['top_p']
    n = djson['n']
    stop = djson['stop']
    stream = djson['stream']
    model = djson['model']

    result = (
        agent
        .general("""您正在协助完成代码。以下是主要指导原则:
{{input}} 中 # Path: 是文件的路径,请检查后缀名确定编程语言
stop: 这是一个包含停止符的列表,用来告诉模型在生成到这些符号时停止。
suffix: 这是给模型的输入后缀,通常用来提供代码的后续部分,以帮助模型更好地理解上下文。
completion输出内容应像直接写入代码编辑器一样。
completion输出内容可以是代码、注释或字符串。
completion不要提供现有的和重复的代码。
如果提供的前缀和后缀包含不完整的代码或语句,响应内容应该能直接连接到提供的前缀和后缀。
我可能会告诉你我想在注释中写什么,你需要根据这些指示提供内容。
completion始终提供非空的输出。""")
        .info("suffix", suffix)
        .info("stop", stop)
        .input(prompt)
        .set_settings("model.OAIClient.options.temperature", temperature)
        .set_settings("model.OAIClient.options.top_p", top_p)
        .set_settings("model.OAIClient.options.max_tokens", max_tokens)
        .output({
            "completion": ("str", "Code Completion Results"),
        })
        .start()
    )
    try:
        return [result["completion"]]
    except:
        return [""]

def out_chat(chat_response, original_model):
    chat_contents = []
    for i, text in enumerate(chat_response):
        chat_content = {
            "index": i,
            "text": text
        }
        chat_contents.append(chat_content)

    m_uuid = str(uuid.uuid4())

    completion_response = {
        "id": m_uuid,
        "object": "text_completion",
        "created": int(time.time()),
        "model": original_model,
        "choices": chat_contents,
        "usage": {'completion_tokens': 0, 'prompt_tokens': 0, 'total_tokens': 0}
    }
    return completion_response



@app.route('/', defaults={'path': ''}, methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH'])
@app.route('/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH'])
def proxy(path):
    if request.method != 'POST' or 'completions' not in path:
        return "This proxy only supports POST requests to completions endpoints", 400
    completion_request = request.json
    # print("得到数据",json.dumps(completion_request))
    print("得到数据",completion_request)
    completion = complete_code(completion_request)
    print("得到回答",completion)
    completion_response = out_chat(completion, completion_request.get('model', 'gpt-4o-mini'))

    if completion_request.get('stream', False):
        def generate():
            yield f"data: {json.dumps(completion_response)}\n\n"
            yield "data: [DONE]\n\n"
        return Response(stream_with_context(generate()), content_type='text/event-stream')
    else:
        return json.dumps(completion_response), 200, {'Content-Type': 'application/json'}


if __name__ == '__main__':
    app.run(debug=False, host='0.0.0.0', port=5001)

override 的配置

{
  "bind": "0.0.0.0:8181",
  "proxy_url": "",
  "timeout": 600,
  "codex_api_base": "http://127.0.0.1:5001/v1",
  "codex_api_key": "sk-",
  "codex_api_organization": "",
  "codex_api_project": "",
  "codex_max_tokens": 500,
  "code_instruct_model": "deepseek-coder",
  "chat_api_base": "https://api.deepseek.com/v1",
  "chat_api_key": "sk-",
  "chat_api_organization": "",
  "chat_api_project": "",
  "chat_max_tokens": 4096,
  "chat_model_default": "deepseek-chat",
  "chat_model_map": {
  },
  "chat_locale": "zh_CN",
  "auth_token": ""
}
110 个赞

前排

2 个赞

感谢分享

2 个赞

copilot, #override添加

强,感谢分享

2 个赞

现在支持哪些 api和他们的模型?

4 个赞

支持openai接口的都可以试试

1 个赞

太强了!

2 个赞

啊?

@duolabmeng6 试了下的确可以 不过就像老母猪带胸罩,一套又一套,调来调去补全代码太慢了。

2 个赞

强得一匹

很厉害,但延迟很高,没有专用代码补全api的快

1 个赞

感谢分享。

豆包的返回速度感觉还可以接受

大概延迟有多少毫秒?

这话术有水平的

1 个赞

收藏

[GIN] 2024/08/27 - 10:53:33 | 408 | 659.7595ms | 127.0.0.1 | POST "/v1/engines/copilot-codex/completions

佬,为什么代码补全时调用的/v1/engines/copilot-codex/completions端点一直返回408?

2 个赞

很强大,很好用

如果是谷歌的flash速度飞快 秒出

2 个赞