librechat真的快呀

配置确实是麻烦,但是响应真的快,打算找个时间都给它改成host模式部署上去 :tieba_022:

5 个赞

似乎artifacts会有错误?

2 个赞

html代码预览吗?我这正常

1 个赞

有什么额外的配置吗

心动了,有什么简易部署方法吗?

是无法预览吗?设置-实验特性,有个开关

1 个赞

官网很简单了,不想映射就把librechat镜像的端口加上127.0.0.1然后反代

缺点是模型管理不方便,要预设,优点就是响应快,在我这比openwevui快

1 个赞

要预设就很烦了

确实很难搞,虽然可以自定义端点和key,但是它不拉模型,我在想能不能自己改了

暂时自用倒也够了,主要是受不了owu,响应真慢,和api速度无关,就是代码烂

主力已经是lobechat db版了,唯一就是手机上没那么流畅,感觉hivechat也不错

1 个赞

已经从librechat换到lobechat-db了

1 个赞

过去用的配置文件,应该是非常全了,甚至还有YOU的逆向

version: 1.0.6
cache: true
speech:
  stt:
    openai:
      url: 'http://192.168.31.25:30000/v1/audio/transcriptions'
      apiKey: '${LibreChat_API_KEY}'
      model: 'whisper-large-v3-turbo'
  tts:
    openai:
      apiKey: 'sk-XXXXXXXXXXXXXXXXXXXXXXXXX'
      model: 'FunAudioLLM/CosyVoice2-0.5B'
      voices: ['speech:soft_girl:XXXXXXXXXXXXXXXXXXXXXX']
      url: "https://api.siliconflow.cn/v1/audio/speech"

  speechTab:
    conversationMode: true
    advancedMode: false
    speechToText:
      engineSTT: "external"
      languageSTT: "Mandarin Chinese"
      autoTranscribeAudio: true
      decibelValue: -45
      autoSendText: 0
    textToSpeech:
      engineTTS: "external"
      voice: "onyx"
      languageTTS: "zh-cn"
      automaticPlayback: false
      playbackRate: 1.0
      cacheTTS: true
#  stderr: inherit


#interface:
#  endpointsMenu: true
#  modelSelect: true
#  parameters: true
#  sidePanel: true
#  presets: true
#  prompts: true
#  bookmarks: true
#  multiConvo: true
#  agents: true

endpoints:
  azureOpenAI:
    titleModel: "gpt-4o-mini"
    assistants: false
    plugins: false
    groups:
    - group: "XXXXXXXXXXXXXXXXXXXXXXXX"
      apiKey: "XXXXXXXXXXXXXXXXXXXXXXXXXX"
      instanceName: "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
      version: "2024-12-01-preview"
      models:
        gpt-4o-mini:
          deploymentName: "gpt-4o-mini"
        gpt-4o:
          deploymentName: "gpt-4o"
        gpt-4-turbo:
          deploymentName: "gpt-4-turbo"
        o1-mini:
          deploymentName: "o1-mini"
        o1:
          deploymentName: "o1"
        o3-mini:
          deploymentName: "o3-mini"
#    - group: "o1-mini-o"
#      apiKey: "${LibreChat_API_KEY}"
#      serverless: true
#      baseURL: "http://192.168.31.25:30000/v1"
#      additionalHeaders:
#        api-key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#      models:
#        o1-mini-o:
#          deploymentName: "o1-mini"
  custom:
    - name: "Claude"
      apiKey: "${LibreChat_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/Claude-2.svg"
      baseURL: "http://192.168.31.25:30000/v1"
      models: 
        default: [
          "claude-3-haiku-20240307",
          "claude-3-sonnet-20240229",
          "claude-3-opus-20240229", 
          "claude-3-5-sonnet-20240620",
          "claude-3-5-haiku-20241022",
          "claude-3-5-sonnet-20241022",
          "claude-3-7-sonnet-20250219",
          "claude-3-7-sonnet-20250219-thinking",
          ]
      fetch: false
      titleConvo: true
      titleModel: "claude-3-5-haiku-20241022"
      summarize: false
      summaryModel: "claude-3-5-haiku-20241022" 
      forcePrompt: false 
      modelDisplayLabel: "Claude"

    - name: "Deepseek"
      apiKey: "${DEEPSEEK_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/deepseek2.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "deepseek-chat",
          "deepseek-reasoner",
          ]
        fetch: false
      titleConvo: true
      titleModel: "deepseek-chat" 
      summarize: false
      summaryModel: "deepseek-chat" 
      forcePrompt: false
      modelDisplayLabel: "Deepseek"

    - name: "Siliconflow"
      apiKey: "${Siliconflow_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/Siliconflow.webp"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "01-ai/Yi-1.5-34B-Chat-16K",
          "01-ai/Yi-1.5-6B-Chat",
          "01-ai/Yi-1.5-9B-Chat-16K",
          "AIDC-AI/Marco-o1",
          "OpenGVLab/InternVL2-26B",
          "Qwen/QVQ-72B-Preview",
          "Qwen/QwQ-32B-Preview",
          "Qwen/QwQ-32B",
          "Qwen/Qwen2-1.5B-Instruct",
          "Qwen/Qwen2-7B-Instruct",
          "Qwen/Qwen2-VL-72B-Instruct",
          "Qwen/Qwen2.5-14B-Instruct",
          "Qwen/Qwen2.5-32B-Instruct",
          "Qwen/Qwen2.5-72B-Instruct",
          "Qwen/Qwen2.5-72B-Instruct-128K",
          "Qwen/Qwen2.5-7B-Instruct",
          "Qwen/Qwen2.5-Coder-32B-Instruct",
          "Qwen/Qwen2.5-Coder-7B-Instruct",
          "THUDM/chatglm3-6b",
          "THUDM/glm-4-9b-chat",
          "TeleAI/TeleChat2",
          "deepseek-ai/DeepSeek-R1",
          "deepseek-ai/DeepSeek-V3",
          "deepseek-ai/DeepSeek-V2.5",
          "deepseek-ai/deepseek-v2",
          "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
          "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
          "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
          "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
          "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
          "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
          "genmo/mochi-1-preview",
          "google/gemma-2-27b-it",
          "google/gemma-2-9b-it",
          "internlm/internlm2_5-20b-chat",
          "internlm/internlm2_5-7b-chat",
          "meta-llama/Llama-3.3-70B-Instruct",
          "meta-llama/Meta-Llama-3.1-405B-Instruct",
          "meta-llama/Meta-Llama-3.1-70B-Instruct",
          "meta-llama/Meta-Llama-3.1-8B-Instruct",
          ]
        fetch: false
      titleConvo: true
      titleModel: "Qwen/Qwen2.5-72B-Instruct-128K" 
      summarize: false
      summaryModel: "Qwen/Qwen2.5-72B-Instruct-128K" 
      forcePrompt: false
      modelDisplayLabel: "Siliconflow"

    - name: "ZhipuAI"
      apiKey: "${LibreChat_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/icon/main/glm.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "glm-4-flash",
          "glm-4-flashx",
          "glm-4-air",
          "glm-4-airx",
          "glm-4-long",
          "glm-4",
          "glm-4-0520",
          "glm-4-plus",
          "glm-4-alltools",
          "glm-4v",
          "glm-4v-flash",
          "glm-4v-plus",
          "glm-zero-preview",
          ]
        fetch: false
      titleConvo: true
      titleModel: "gpt-4o" 
      summarize: false
      summaryModel: "gpt-4o" 
      forcePrompt: false
      modelDisplayLabel: "ZhipuAI"
      dropParams: ["stop", "user", "presence_penalty", "frequency_penalty"]

    - name: "通义千问"
      apiKey: "${Ali_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/通义千问2.svg"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "qwen-turbo",
          "qwen-plus",
          "qwen-max",
          "qwen-vl-plus",
          "qwen-vl-max",
          ]
        fetch: false
      titleConvo: true
      titleModel: "qwen-turbo" 
      summarize: false
      summaryModel: "qwen-turbo" 
      forcePrompt: false
      modelDisplayLabel: "通义千问"
#      dropParams: ["stop", "user", "presence_penalty", "frequency_penalty"]

    - name: "01.AI"
      apiKey: "${01.AI_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/YI-3.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "yi-lightning",
          "yi-large",
          "yi-large-fc",
          "yi-medium",
          "yi-vision",
          "yi-vision-solution",
          "yi-vision-v2",
          "yi-medium-200k",
          "yi-spark",
          "yi-large-preview",
          ]
        fetch: false
      titleConvo: true
      titleModel: "yi-lightning" 
      summarize: false
      summaryModel: "yi-lightning" 
      forcePrompt: false
      modelDisplayLabel: "01.AI"
#      max_tokens: 4096

    - name: "Metaso"
      apiKey: "${LibreChat_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/metaso.svg"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "concise",
          "detail",
          "research",
          ]
      fetch: false
      titleConvo: true
      titleModel: "gpt-4o" 
      summarize: false
      summaryModel: "gpt-4o" 
      forcePrompt: false
      modelDisplayLabel: "Metaso"
      dropParams: ["temperature", "stop", "user", "presence_penalty", "frequency_penalty", "top_p"]



    - name: "Moonshot AI"
      apiKey: "${LibreChat_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/Moonshot%20Al.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "kimi",
          "kimi-search",
          "kimi-research",
          "kimi-math",
          "moonshot-v1-auto",
          "moonshot-v1-8k",
          "moonshot-v1-32k", 
          "moonshot-v1-128k",
          "moonshot-v1-8k-vision-preview",
          "moonshot-v1-32k-vision-preview",
          "moonshot-v1-128k-vision-preview",
          ]
        fetch: false
      titleConvo: true
      titleModel: "gpt-4o" 
      summarize: false
      summaryModel: "gpt-4o" 
      forcePrompt: false
      modelDisplayLabel: "Moonshot AI"
#      dropParams: [ "stop", "user", "presence_penalty", "frequency_penalty"]

    - name: "Doubao AI"
      apiKey: "${Doubao_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/doubao.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "Doubao-lite-4k",
          "Doubao-lite-32k",	
          "Doubao-lite-128k",
          "Doubao-1.5-lite-32k",
          "Doubao-pro-32k",
          "Doubao-pro-128k",
          "Doubao-pro-256k",
          "Doubao-1.5-pro-32k",
          "Doubao-1.5-pro-256k",
          "Doubao-pro-32k-browsing",
          "Doubao-1.5-vision-pro-32k",
          "DeepSeek-V3",
          "DeepSeek-R1",
        ]
        fetch: false
      titleConvo: true
      titleModel: "Doubao-1.5-pro-32k" 
      summarize: false
      summaryModel: "Doubao-1.5-pro-32k" 
      modelDisplayLabel: "Doubao"
#      dropParams: ["stop", "user", "presence_penalty", "frequency_penalty"]

    - name: "xAI"
      apiKey: "${xAI_API_KEY}"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "grok-beta",
          "grok-vision-beta",
          "grok-2-1212",
          "grok-3",
          "grok-3-search",
          "grok-3-reasoning",
          "grok-3-deepsearch",
        ]
        fetch: false
      titleConvo: true
      titleMethod: "completion"
      titleModel: "grok-2-1212"
      summarize: false
      summaryModel: "grok-2-1212"
      forcePrompt: false
      modelDisplayLabel: "Grok"

    - name: "YOU AI"
      apiKey: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXX"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/you.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "claude-3-sonnet",
          "claude-3-opus",
          "claude-3.5-haiku",
          "claude-3.5-sonnet",
          "claude-3-7-sonnet",
          "claude-3-7-sonnet-think",
          "command-r-plus",
          "deepseek-chat",
          "deepseek-reasoner",
          "gemini-1.5-flash",
          "gemini-1.5-pro",
          "gpt-4-turbo",
          "gpt-4o-mini",
          "gpt-4o",
          "gpt-4.5-preview",
          "llama-3.1-405b",
          "llama-3.2-90b",
          "mistral-large-2",
          "o1-mini",
          "o1",
          "o3-mini-medium",
          "o3-mini-high",
          "qwen-2.5-72b",
          "qwen-2.5-coder-32b",
        ]
      fetch: true
      titleConvo: true
      titleModel: "claude-3.5-sonnet" 
      summarize: false
      summaryModel: "claude-3.5-sonnet" 
      forcePrompt: false
      modelDisplayLabel: "YOU"

    - name: "Genspark AI"
      apiKey: "sk-XXXXXXXXXXXXXXXXXXXXX#42"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/genspark.ico"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "claude-3-5-haiku",
          "claude-3-7-sonnet",
          "claude-3-7-sonnet-thinking",
          "deep-seek-v3",
          "deep-seek-r1",
          "gemini-2.0-flash",
          "gpt-4o",
          "o1",
          "o3-mini-high",
        ]
      fetch: false
      titleConvo: true
      titleModel: "claude-3-7-sonnet" 
      summarize: false
      summaryModel: "claude-3-7-sonnet" 
      forcePrompt: false
      modelDisplayLabel: "Genspark"

    - name: "Step AI"
      apiKey: "${LibreChat_API_KEY}"
      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/refs/heads/main/step.png"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: [
          "step-1-flash",
          "step-1-8k",	
          "step-1-32k",
          "step-1-128k",
          "step-1-256k",
          "step-2-16k",
          "step-1v-8k",
          "step-1v-32k", 
          "step-1.5v-turbo",
          "step-1x-medium",
        ]
        fetch: false
      titleConvo: true
      titleModel: "gpt-4o" 
      summarize: false
      summaryModel: "gpt-4o" 
      forcePrompt: false
      modelDisplayLabel: "Step AI"
#      dropParams: ["stop", "user", "presence_penalty", "frequency_penalty"]      
    

#    - name: "Meta AI"
#      apiKey: "${LibreChat_API_KEY}"
#      iconURL: "https://raw.gitmirror.com/Passerby1011/Garbage-dump/831e1e82455c2475aecedb9f2a2439e8226ec742/Llama2.svg"
#      baseURL: "http://192.168.31.25:30000/v1"
#      models:
#        default: [	
#          "Meta-Llama-3.1-8B-Instruct",
#          "Meta-Llama-3.1-8B-Instruct-Pro",
#          "Meta-Llama-3.1-70B-Instruct",
#          "Meta-Llama-3.1-405B-Instruct",
#          "Meta-Llama-3.3-70B-Instruct",
#        ]
#        fetch: false
#      titleConvo: true
#      titleModel: "gpt-4o" 
#      summarize: false
#      summaryModel: "gpt-4o" 
#      forcePrompt: false
#      modelDisplayLabel: "Meta AI"

    - name: "Mistral"
      apiKey: "${MISTRAL_API_KEY}"
      baseURL: "http://192.168.31.25:30000/v1"
      models:
        default: ["mistral-small-latest", "mistral-medium-latest", "mistral-large-latest"]
        fetch: true
      titleConvo: true
      titleModel: "mistral-medium-latest"
      modelDisplayLabel: "Mistral"
      dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]

    - name: "cohere"
      apiKey: "${COHERE_API_KEY}"
      baseURL: "https://gateway.ai.cloudflare.com/v1/XXXXXXXXXXXXXXXXXXXXX6/oXXXXXXXXXXXXXpi/cohere/v1"
      models:
        default: ["command-r","command-r-plus","command-light","command-light-nightly","command","command-nightly"]
        fetch: false
      modelDisplayLabel: "cohere"
      titleModel: "command"
      dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]

    - name: "groq"
      apiKey: "${GROQ_API_KEY}"
      baseURL: "https://XXXXs.erSADSAySDSDj.cSADSAub/groq/v1"
      models:
        default: [
          "llama-guard-3-8b",
          "mistral-saba-24b",
          "llama-3.2-11b-vision-preview",
          "gemma2-9b-it",
          "llama-3.1-8b-instant",
          "llama-3.2-1b-preview",
          "llama3-8b-8192",
          "llama-3.2-3b-preview",
          "llama3-70b-8192",
          "deepseek-r1-distill-llama-70b",
          "llama-3.2-90b-vision-preview",
          "deepseek-r1-distill-qwen-32b",
          "mixtral-8x7b-32768",
          "llama-3.3-70b-versatile",
          "qwen-2.5-coder-32b",
          "llama-3.3-70b-specdec",
          "qwen-2.5-32b",
          "qwen-qwq-32b",
          ]
        fetch: false
      titleConvo: true
      titleModel: "qwen-2.5-32b"
      modelDisplayLabel: "groq"

    - name: "OpenRouter"
      apiKey: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
      baseURL: "https://gateway.ai.cloudflare.com/v1/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXX/openrouter/v1"
      models:
        default: [
          "mistralai/mixtral-8x7b-instruct",
          ]
        fetch: true
      titleConvo: true
      titleModel: "deepseek/deepseek-chat" 
      summarize: false
      summaryModel: "deepseek/deepseek-chat" 
      forcePrompt: false
      modelDisplayLabel: "OpenRouter"
      dropParams: ["stop"]
3 个赞

lobe今年挺猛的,我在等他家客户端,哈哈

1 个赞

lobechat有点慢吧?

年初改过一版了,整体加载速度上来了

2 个赞